Merge "Corrected links associated with release docs. To be updated along with the...
authorJonas Bjurel <jonas.bjurel@ericsson.com>
Thu, 25 Jun 2015 17:06:58 +0000 (17:06 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Thu, 25 Jun 2015 17:06:58 +0000 (17:06 +0000)
210 files changed:
common/puppet-opnfv/manifests/controller_networker.pp
common/puppet-opnfv/manifests/odl_docker.pp
common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile [new file with mode: 0644]
common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh [moved from fuel/deploy/install-ubuntu-packages.sh with 56% similarity, mode: 0644]
common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh [new file with mode: 0644]
common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/allinone.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/compute.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/controller.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/group_vars/all [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/multinodes.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/network.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-controller/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_config.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_install.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/api-paste.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder_init.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-volume/files/loop.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-volume/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-volume/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/cinder-volume/templates/cinder.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/common/files/sources.list.d/cloudarchive-juno.list [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/common/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/common/templates/hosts [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/common/templates/ntp.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/dashboard/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/dashboard/templates/local_settings.py [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/dashboard/templates/openstack-dashboard.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/database/files/my.cnf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/database/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/database/tasks/mariadb.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/database/tasks/mysql.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/database/templates/data.j2 [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/database/templates/my.cnf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/database/templates/wsrep.cnf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/glance/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_config.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_install.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/glance/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/glance/tasks/nfs.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-api.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-registry.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/glance/templates/image_upload.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/ha/files/galera_chk [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/ha/files/mysqlchk [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/ha/files/notify.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/ha/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/ha/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/ha/templates/failover.j2 [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/ha/templates/haproxy.cfg [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/ha/templates/keepalived.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_config.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_install.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/keystone/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/keystone/templates/admin-openrc.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/keystone/templates/demo-openrc.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone_init [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/monitor/files/check_service.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/monitor/files/root [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/monitor/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/mq/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq_cluster.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/mq/templates/.erlang.cookie [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/mq/templates/rabbitmq-env.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-common/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/defaults/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dhcp_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dnsmasq-neutron.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/etc/xorp/config.boot [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/l3_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/metadata_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/ml2_conf.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron-network.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron_init.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/nova.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_config.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_install.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dhcp_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dnsmasq-neutron.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/etc/xorp/config.boot [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/l3_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/metadata_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/ml2_conf.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron-network.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron_init.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/nova.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/igmp-router.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/odl.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dhcp_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dnsmasq-neutron.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/etc/xorp/config.boot [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/l3_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/metadata_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/ml2_conf.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron-network.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron_init.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/nova.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-compute/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-compute/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova-compute.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/handlers/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_config.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_install.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dhcp_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dnsmasq-neutron.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/etc/xorp/config.boot [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/l3_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/metadata_agent.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/ml2_conf.ini [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron-network.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron_init.sh [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/nova.conf [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/roles/repo/templates/sources.list [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/single-controller.yml [new file with mode: 0644]
compass/deploy/ansible/openstack_juno/storage.yml [new file with mode: 0644]
compass/deploy/conf/base.conf
compass/deploy/conf/cluster.conf [new file with mode: 0644]
compass/deploy/conf/five.conf
compass/deploy/deploy-vm.sh
compass/deploy/remote_excute.exp [new file with mode: 0644]
compass/deploy/status_callback.py [new file with mode: 0644]
foreman/ci/deploy.sh
fuel/build/Makefile
fuel/build/f_lith_odl_docker/Makefile [new file with mode: 0755]
fuel/build/f_lith_odl_docker/dockerfile/Dockerfile [new file with mode: 0755]
fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh [new file with mode: 0755]
fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh [new file with mode: 0755]
fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh [new file with mode: 0755]
fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp [new file with mode: 0644]
fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh [new file with mode: 0644]
fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh [new file with mode: 0644]
fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh [new file with mode: 0644]
fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh [new file with mode: 0644]
fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh [new file with mode: 0755]
fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh [new file with mode: 0755]
fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh [new file with mode: 0755]
fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh [new file with mode: 0644]
fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh [new file with mode: 0644]
fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh [new file with mode: 0755]
fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh [new file with mode: 0644]
fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh
fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
fuel/ci/deploy.sh
fuel/deploy/README.txt
fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml [new file with mode: 0644]
fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml [new file with mode: 0644]
fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dea.yaml [moved from fuel/deploy/baremetal/dea.yaml with 96% similarity]
fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml [new file with mode: 0644]
fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml [new file with mode: 0644]
fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml [new file with mode: 0644]
fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml [new file with mode: 0644]
fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml [new file with mode: 0644]
fuel/deploy/baremetal/dha.yaml [deleted file]
fuel/deploy/baremetal/vms/fuel.xml [moved from fuel/deploy/baremetal/vm/vFuel with 99% similarity]
fuel/deploy/baremetal/vms/fuel_lf.xml [new file with mode: 0644]
fuel/deploy/cloud/configure_nodes.py
fuel/deploy/cloud/deploy.py
fuel/deploy/cloud/deployment.py
fuel/deploy/common.py
fuel/deploy/dea.py
fuel/deploy/deploy.py
fuel/deploy/deploy_env.py
fuel/deploy/dha_adapters/hardware_adapter.py
fuel/deploy/dha_adapters/hp_adapter.py
fuel/deploy/dha_adapters/ipmi_adapter.py
fuel/deploy/dha_adapters/libvirt_adapter.py
fuel/deploy/environments/__init__.py [new file with mode: 0644]
fuel/deploy/environments/execution_environment.py [new file with mode: 0644]
fuel/deploy/environments/libvirt_environment.py [new file with mode: 0644]
fuel/deploy/environments/virtual_fuel.py [new file with mode: 0644]
fuel/deploy/install_fuel_master.py
fuel/deploy/libvirt/conf/ha/dea.yaml [new file with mode: 0644]
fuel/deploy/libvirt/conf/ha/dha.yaml [new file with mode: 0644]
fuel/deploy/libvirt/conf/multinode/dea.yaml [moved from fuel/deploy/libvirt/dea.yaml with 97% similarity]
fuel/deploy/libvirt/conf/multinode/dha.yaml [new file with mode: 0644]
fuel/deploy/libvirt/dha.yaml [deleted file]
fuel/deploy/libvirt/networks/fuel1.xml [moved from fuel/deploy/libvirt/networks/fuel1 with 100% similarity]
fuel/deploy/libvirt/networks/fuel2.xml [moved from fuel/deploy/libvirt/networks/fuel2 with 100% similarity]
fuel/deploy/libvirt/networks/fuel3.xml [moved from fuel/deploy/libvirt/networks/fuel3 with 100% similarity]
fuel/deploy/libvirt/networks/fuel4.xml [moved from fuel/deploy/libvirt/networks/fuel4 with 100% similarity]
fuel/deploy/libvirt/vms/compute.xml [moved from fuel/deploy/libvirt/vms/compute with 99% similarity]
fuel/deploy/libvirt/vms/controller.xml [moved from fuel/deploy/libvirt/vms/controller with 99% similarity]
fuel/deploy/libvirt/vms/fuel.xml [moved from fuel/deploy/libvirt/vms/fuel-master with 99% similarity]
fuel/deploy/reap.py [new file with mode: 0644]
fuel/deploy/setup_environment.py [deleted file]
fuel/deploy/setup_execution_environment.py [new file with mode: 0644]
fuel/deploy/setup_vfuel.py [deleted file]
fuel/deploy/ssh_client.py
fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml
fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml

index 157bc8f..b148ec8 100644 (file)
@@ -302,6 +302,7 @@ class opnfv::controller_networker {
     class { "quickstack::pacemaker::neutron":
       agent_type               =>  $this_agent,
       enable_tunneling         =>  'true',
+      external_network_bridge  =>  'br-ex',
       ml2_mechanism_drivers    =>  $ml2_mech_drivers,
       ml2_network_vlan_ranges  =>  ["physnet1:10:50"],
       odl_controller_ip        =>  $odl_control_ip,
@@ -309,6 +310,18 @@ class opnfv::controller_networker {
       ovs_tunnel_iface         =>  $ovs_tunnel_if,
       ovs_tunnel_types         =>  ["vxlan"],
       verbose                  =>  'true',
+      neutron_conf_additional_params => { default_quota => 'default',
+                                      quota_network => '50',
+                                      quota_subnet => '50',
+                                      quota_port => 'default',
+                                      quota_security_group => '50',
+                                      quota_security_group_rule  => 'default',
+                                      quota_vip => 'default',
+                                      quota_pool => 'default',
+                                      quota_router => '50',
+                                      quota_floatingip => '100',
+                                      network_auto_schedule => 'default',
+                                    },
     }
 
     if ($external_network_flag != '') and str2bool($external_network_flag) {
index 6e70ba0..fdbbe67 100644 (file)
@@ -39,7 +39,7 @@ class opnfv::odl_docker
                 mode => 750,
              }
 
-        file { "/opt/opnfv/odl/start_odl_conatiner.sh":
+        file { "/opt/opnfv/odl/start_odl_container.sh":
                 ensure => present,
                 source => "/etc/puppet/modules/opnfv/scripts/start_odl_container.sh",
                 mode => 750,
diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile
new file mode 100644 (file)
index 0000000..6d7535d
--- /dev/null
@@ -0,0 +1,82 @@
+####################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+#  DOCKER FILE FOR LITHIUM ODL RC0 Testing 
+#
+#############################################################################
+
+
+#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
+FROM ubuntu:12.04
+
+# Maintainer Info
+MAINTAINER Daniel Smith
+
+
+#Run apt-get update one start just to check for updates when building
+RUN echo "Updating APT"
+RUN apt-get update
+RUN echo "Adding wget"
+RUN apt-get install -y wget
+RUN apt-get install -y net-tools
+RUN apt-get install -y openjdk-7-jre
+RUN apt-get install -y openjdk-7-jdk
+RUN apt-get install -y openssh-server
+RUN apt-get install -y vim
+RUN apt-get install -y expect
+RUN apt-get install -y daemontools
+RUN mkdir -p /opt/odl_source/lithium
+RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
+
+
+
+#Now lets got and fetch the ODL distribution
+RUN echo "Fetching Lithium Rc0"
+RUN wget https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/org/opendaylight/integration/distribution-karaf/0.3.0-SNAPSHOT/distribution-karaf-0.3.0-20150612.144348-2492.tar.gz -O /opt/odl_source/lithium/distribution-karaf-0.3.0-Lithium-RC0.tar.gz
+
+RUN echo "Untarring ODL inplace"
+RUN mkdir -p /opt/odl/lithium
+RUN tar zxvf /opt/odl_source/lithium/distribution-karaf-0.3.0-Lithium-RC0.tar.gz -C /opt/odl/lithium
+
+RUN echo "Installing DLUX and other features into ODL"
+#COPY dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
+COPY container_scripts/start_odl_docker_container.sh /etc/init.d/
+COPY container_scripts/speak.sh /etc/init.d/
+#COPY dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
+RUN chmod 777 /etc/init.d/start_odl_docker_container.sh
+RUN chmod 777 /etc/init.d/speak.sh
+
+
+
+# Expose the ports
+
+# PORTS FOR BASE SYSTEM AND DLUX
+EXPOSE 8101
+EXPOSE 6633
+EXPOSE 1099
+EXPOSE 43506
+EXPOSE 8181
+EXPOSE 8185
+EXPOSE 9000
+EXPOSE 39378
+EXPOSE 33714
+EXPOSE 44444
+EXPOSE 6653
+
+# PORTS FOR OVSDB AND ODL CONTROL
+EXPOSE 12001
+EXPOSE 6640
+EXPOSE 8080
+EXPOSE 7800
+EXPOSE 55130
+EXPOSE 52150
+EXPOSE 36826
+
+# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
+CMD ["/etc/init.d/start_odl_docker_container.sh"]
old mode 100755 (executable)
new mode 100644 (file)
similarity index 56%
rename from fuel/deploy/install-ubuntu-packages.sh
rename to common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh
index 1ebd7c0..04d7b53
@@ -1,18 +1,18 @@
-#!/bin/bash
 ##############################################################################
 # Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
+# daniel.smith@ericsson.com
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-# Tools for installation on the libvirt server/base host
-#
-apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager \
-   sshpass fuseiso genisoimage blackbox xterm python-yaml python-netaddr \
-   python-paramiko python-lxml python-pip
-pip install scp
-restart libvirt-bin
\ No newline at end of file
+#!/usr/bin/expect
+spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client
+expect "root>"
+send "feature:list | grep -i odl-restconf\r"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
+
diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh
new file mode 100644 (file)
index 0000000..a7d0e6c
--- /dev/null
@@ -0,0 +1,20 @@
+#!/usr/bin/expect
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+#  NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+
+spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client
+expect "root>"
+send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs  odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh
new file mode 100644 (file)
index 0000000..96a40ec
--- /dev/null
@@ -0,0 +1,48 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+#  NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+#  Start up script for calling karaf / ODL inside a docker container.
+#
+#  This script will also call a couple expect scripts to load the feature set that we want
+
+
+#ENV
+export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+
+#MAIN
+echo "Starting up the da Sheilds..."
+/opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/karaf server &
+echo "Sleeping 5 bad hack"
+sleep 10
+echo "should see stuff listening now"
+netstat -na
+echo " should see proess running for karaf"
+ps -efa
+echo " Starting the packages we want"
+/etc/init.d/speak.sh
+echo "Printout the status - if its right, you should see 8181 appear now"
+netstat -na
+ps -efa
+
+
+
+## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
+## Cheap - but effective
+while true;
+do
+        echo "Checking status of ODL:"
+        /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/status
+        sleep 60
+done
+
+
diff --git a/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml b/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml
new file mode 100644 (file)
index 0000000..9c1d7e7
--- /dev/null
@@ -0,0 +1,42 @@
+---
+- hosts: all
+  remote_user: root
+  sudo: true
+  roles:
+    - repo
+    - common
+
+- hosts: ha
+  remote_user: root
+  sudo: True
+  roles:
+    - ha
+
+- hosts: controller
+  remote_user: root
+  sudo: True
+  roles:
+    - database
+    - mq
+    - keystone
+    - nova-controller
+    - neutron-controller
+    - cinder-controller
+    - glance
+    - neutron-common
+    - neutron-network
+    - dashboard
+
+- hosts: compute
+  remote_user: root
+  sudo: True
+  roles:
+    - nova-compute
+    - neutron-compute
+    - cinder-volume
+
+- hosts: all
+  remote_user: root
+  sudo: True
+  roles:
+    - monitor
diff --git a/compass/deploy/ansible/openstack_juno/allinone.yml b/compass/deploy/ansible/openstack_juno/allinone.yml
new file mode 100644 (file)
index 0000000..15220ca
--- /dev/null
@@ -0,0 +1,38 @@
+---
+- hosts: all
+  remote_user: root
+  sudo: true
+  roles:
+    - repo
+
+- hosts: controller
+  sudo: True
+  roles: 
+    - common
+    - database
+    - mq
+    - keystone
+    - nova-controller
+    - neutron-controller
+    - dashboard
+    - cinder-controller
+    - glance
+
+- hosts: network
+  sudo: True
+  roles:
+    - common
+    - neutron-network
+
+- hosts: storage
+  sudo: True
+  roles:
+    - common
+    - cinder-volume
+
+- hosts: compute
+  sudo: True
+  roles:
+    - common
+    - nova-compute
+    - neutron-compute
diff --git a/compass/deploy/ansible/openstack_juno/compute.yml b/compass/deploy/ansible/openstack_juno/compute.yml
new file mode 100644 (file)
index 0000000..b2679c0
--- /dev/null
@@ -0,0 +1,9 @@
+---
+- hosts: all
+  remote_user: vagrant
+  sudo: True
+  roles:
+    - repo
+    - common
+    - nova-compute
+    - neutron-compute
diff --git a/compass/deploy/ansible/openstack_juno/controller.yml b/compass/deploy/ansible/openstack_juno/controller.yml
new file mode 100644 (file)
index 0000000..7f4a10e
--- /dev/null
@@ -0,0 +1,15 @@
+---
+- hosts: controller
+  remote_user: root
+  sudo: True
+  roles:
+      - repo
+      - common
+      - database
+      - mq
+      - keystone
+      - nova-controller
+      - neutron-controller
+      - dashboard
+      - cinder-controller
+      - glance
diff --git a/compass/deploy/ansible/openstack_juno/group_vars/all b/compass/deploy/ansible/openstack_juno/group_vars/all
new file mode 100644 (file)
index 0000000..5643fcd
--- /dev/null
@@ -0,0 +1,54 @@
+controller_host: 10.1.0.11
+network_host: 10.1.0.12
+compute_host: 10.1.0.13
+storage_host: 10.1.0.14
+odl_controller: 10.1.0.15
+
+DEBUG: False
+VERBOSE: False
+NTP_SERVER_LOCAL: controller
+DB_HOST: "{{ controller_host }}"
+MQ_BROKER: rabbitmq
+
+OPENSTACK_REPO: cloudarchive-juno.list
+ADMIN_TOKEN: admin
+CEILOMETER_TOKEN: c095d479023a0fd58a54
+RABBIT_PASS: guest
+KEYSTONE_DBPASS: keystone_db_secret
+DEMO_PASS: demo_secret
+ADMIN_PASS: admin_secret
+GLANCE_DBPASS: glance_db_secret
+GLANCE_PASS: glance_secret
+NOVA_DBPASS: nova_db_secret
+NOVA_PASS: nova_secret
+DASH_DBPASS: dash_db_secret
+CINDER_DBPASS: cinder_db_secret
+CINDER_PASS: cinder_secret
+NEUTRON_DBPASS: neutron_db_secret
+NEUTRON_PASS: netron_secret
+NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
+NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
+#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
+NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
+NEUTRON_TUNNEL_TYPES: ['vxlan']
+METADATA_SECRET: metadata_secret
+INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
+INTERFACE_NAME: eth2
+
+EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
+EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
+FLOATING_IP_START: 203.0.113.101
+FLOATING_IP_END: 203.0.113.200
+
+juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
+build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+build_in_image_name: cirros-0.3.3-x86_64-disk.img
+
+physical_device: /dev/sdb
+
+internal_interface: ansible_eth1
+internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
+
+odl_username: admin
+odl_password: admin
+odl_api_port: 8080
diff --git a/compass/deploy/ansible/openstack_juno/multinodes.yml b/compass/deploy/ansible/openstack_juno/multinodes.yml
new file mode 100644 (file)
index 0000000..ffd29d5
--- /dev/null
@@ -0,0 +1,75 @@
+---
+- hosts: all
+  remote_user: root
+  sudo: true
+  roles:
+    - repo
+
+- hosts: database
+  sudo: True
+  roles:
+    - common
+    - database
+
+- hosts: messaging
+  sudo: True
+  roles:
+    - common
+    - mq
+
+- hosts: identity
+  sudo: True
+  roles:
+    - common
+    - keystone
+
+- hosts: compute-controller
+  sudo: True
+  roles:
+    - common
+    - nova-controller
+
+- hosts: network-server
+  sudo: True
+  roles:
+    - common
+    - neutron-controller
+
+- hosts: storage-controller
+  sudo: True
+  roles:
+    - common
+    - cinder-controller
+
+- hosts: image
+  sudo: True
+  roles:
+    - common
+    - glance
+
+- hosts: dashboard
+  sudo: True
+  roles:
+    - common
+    - dashboard
+
+- hosts: network-worker
+  sudo: True
+  roles:
+    - common
+    - neutron-network
+
+- hosts: storage-volume
+  sudo: True
+  roles:
+    - common
+    - cinder-volume
+
+- hosts: compute-worker
+  sudo: True
+  roles:
+    - common
+    - nova-compute
+    - neutron-compute
+
+
diff --git a/compass/deploy/ansible/openstack_juno/network.yml b/compass/deploy/ansible/openstack_juno/network.yml
new file mode 100644 (file)
index 0000000..558f317
--- /dev/null
@@ -0,0 +1,8 @@
+---
+- hosts: all
+  remote_user: vagrant
+  sudo: True
+  roles:
+    - repo
+    - common
+    - neutron-network
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/handlers/main.yml
new file mode 100644 (file)
index 0000000..ef671dd
--- /dev/null
@@ -0,0 +1,6 @@
+---
+- name: restart cinder-scheduler
+  service: name=cinder-scheduler state=restarted enabled=yes
+- name: restart cinder-api
+  service: name=cinder-api state=restarted enabled=yes
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_config.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_config.yml
new file mode 100644 (file)
index 0000000..7796cf7
--- /dev/null
@@ -0,0 +1,20 @@
+---
+- name: sync cinder db
+  shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder
+  register: result
+  until: result.rc == 0
+  retries: 5
+  delay: 3
+  notify:
+    - restart cinder-scheduler
+    - restart cinder-api
+
+- meta: flush_handlers
+
+- name: upload cinder keystone register script
+  template: src=cinder_init.sh dest=/opt/cinder_init.sh mode=0744
+
+- name: run cinder register script
+  shell: for i in {0..5}; do /opt/cinder_init.sh && touch cinder_init_complete; if [ $? != 0 ]; then sleep 5; else break; fi; done
+  args:
+    creates: cinder_init_complete
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_install.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_install.yml
new file mode 100644 (file)
index 0000000..03ad432
--- /dev/null
@@ -0,0 +1,20 @@
+---
+- name: install cinder packages
+  apt: name={{ item }} state=present force=yes
+  with_items:
+    - cinder-api
+    - cinder-scheduler
+    - python-cinderclient
+
+- name: generate cinder service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - cinder-api
+    - cinder-scheduler
+
+- name: upload cinder conf
+  template: src=cinder.conf dest=/etc/cinder/cinder.conf
+  notify:
+    - restart cinder-scheduler
+    - restart cinder-api
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/main.yml
new file mode 100644 (file)
index 0000000..1dbe91f
--- /dev/null
@@ -0,0 +1,13 @@
+---
+- include: cinder_install.yml
+  tags:
+    - install
+    - cinder-install
+    - cinder
+
+- include: cinder_config.yml
+  when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+  tags:
+    - config
+    - cinder-config
+    - cinder
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/api-paste.ini b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/api-paste.ini
new file mode 100644 (file)
index 0000000..b568a17
--- /dev/null
@@ -0,0 +1,71 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+# auth_host = 127.0.0.1
+# auth_port = 35357
+# auth_protocol = http
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder.conf b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder.conf
new file mode 100644 (file)
index 0000000..e34fd2f
--- /dev/null
@@ -0,0 +1,63 @@
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+volume_group = cinder-volumes
+verbose = {{ VERBOSE }}
+debug = {{ DEBUG }}
+auth_strategy = keystone
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+notification_driver=cinder.openstack.common.notifier.rpc_notifier
+volumes_dir = /var/lib/cinder/volumes
+
+log_file=/var/log/cinder/cinder.log
+
+control_exchange = cinder
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+my_ip = {{ storage_controller_host }}
+
+glance_host = {{ HA_VIP }}
+glance_port = 9292
+api_rate_limit = False
+storage_availability_zone = nova
+
+quota_volumes = 10
+quota_gigabytes=1000
+quota_driver=cinder.quota.DbQuotaDriver
+
+osapi_volume_listen = {{ storage_controller_host }}
+osapi_volume_listen_port = 8776
+
+db_backend = sqlalchemy
+volume_name_template = volume-%s
+snapshot_name_template = snapshot-%s
+
+max_gigabytes=10000
+volume_group=cinder-volumes
+
+volume_clear=zero
+volume_clear_size=10
+
+iscsi_ip_address={{ storage_controller_host }}
+iscsi_port=3260
+iscsi_helper=tgtadm
+
+volumes_dir=/var/lib/cinder/volumes
+
+volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder_init.sh b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder_init.sh
new file mode 100644 (file)
index 0000000..0ec61b6
--- /dev/null
@@ -0,0 +1,6 @@
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=cinder --pass={{ CINDER_PASS }} --email=cinder@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin
+
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-volume/files/loop.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/files/loop.yml
new file mode 100644 (file)
index 0000000..e872652
--- /dev/null
@@ -0,0 +1 @@
+physical_device: /dev/loop0
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-volume/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/handlers/main.yml
new file mode 100644 (file)
index 0000000..d8e8852
--- /dev/null
@@ -0,0 +1,4 @@
+---
+- name: restart cinder-volume
+  service: name=cinder-volume state=restarted enabled=yes
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-volume/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/tasks/main.yml
new file mode 100644 (file)
index 0000000..8c0e626
--- /dev/null
@@ -0,0 +1,55 @@
+---
+- name: install cinder-volume and lvm2 packages
+  apt: name={{ item }} state=present force=yes
+  with_items:
+    - cinder-volume
+    - lvm2
+
+- name: generate cinder volume service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - cinder-volume
+
+- name: check if physical device exists
+  stat: path={{ physical_device }}
+  register: st
+
+- name: repace physical_device if st is false
+  local_action: copy src=loop.yml dest=/tmp/loop.yml
+  when: st.stat.exists == False
+
+- name: load loop.yml
+  include_vars: /tmp/loop.yml
+  when: st.stat.exists == False
+
+- name: check if cinder-volumes is mounted
+  shell: ls /mnt
+  register: cindervolumes
+
+- name: get available partition size
+  shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }'
+  register: partition_size
+
+- name: if not mounted, mount it
+  shell: dd if=/dev/zero of=/mnt/cinder-volumes
+         bs=1 count=0 seek={{ partition_size.stdout }}
+  when: cindervolumes.stdout != 'cinder-volumes'
+
+- name: get first lo device
+  shell: ls /dev/loop* | egrep 'loop[0-9]+'|sed -n 1p
+  register: first_lo
+  when: cindervolumes.stdout != 'cinder-volumes'
+
+- name: do a losetup on /mnt/cinder-volumes
+  shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes
+  when: cindervolumes.stdout != 'cinder-volumes'
+
+- name: create physical and group volumes
+  lvg: vg=cinder-volumes pvs={{ physical_device }}
+       vg_options=--force
+
+- name: upload cinder-volume configuration
+  template: src=cinder.conf dest=/etc/cinder/cinder.conf
+            backup=yes
+  notify:
+     - restart cinder-volume
diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-volume/templates/cinder.conf b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/templates/cinder.conf
new file mode 100644 (file)
index 0000000..aa3b8cc
--- /dev/null
@@ -0,0 +1,62 @@
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+volume_group = cinder-volumes
+verbose = True
+auth_strategy = keystone
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+notification_driver=cinder.openstack.common.notifier.rpc_notifier
+volumes_dir = /var/lib/cinder/volumes
+
+log_file=/var/log/cinder/cinder.log
+
+control_exchange = cinder
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+my_ip = {{ storage_controller_host }}
+
+glance_host = {{ HA_VIP }}
+glance_port = 9292
+api_rate_limit = False
+storage_availability_zone = nova
+
+quota_volumes = 10
+quota_gigabytes=1000
+quota_driver=cinder.quota.DbQuotaDriver
+
+osapi_volume_listen = {{ storage_controller_host }}
+osapi_volume_listen_port = 8776
+
+db_backend = sqlalchemy
+volume_name_template = volume-%s
+snapshot_name_template = snapshot-%s
+
+max_gigabytes=10000
+volume_group=cinder-volumes
+
+volume_clear=zero
+volume_clear_size=10
+
+iscsi_ip_address={{ storage_controller_host }}
+iscsi_port=3260
+iscsi_helper=tgtadm
+
+volumes_dir=/var/lib/cinder/volumes
+
+volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
diff --git a/compass/deploy/ansible/openstack_juno/roles/common/files/sources.list.d/cloudarchive-juno.list b/compass/deploy/ansible/openstack_juno/roles/common/files/sources.list.d/cloudarchive-juno.list
new file mode 100644 (file)
index 0000000..920f3d2
--- /dev/null
@@ -0,0 +1 @@
+deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
diff --git a/compass/deploy/ansible/openstack_juno/roles/common/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/common/tasks/main.yml
new file mode 100644 (file)
index 0000000..ce595f5
--- /dev/null
@@ -0,0 +1,28 @@
+---
+- name: install ubuntu-cloud-keyring(ubuntu)
+  apt: name={{ item }} state=latest
+  with_items:
+    - ubuntu-cloud-keyring
+
+- name: update hosts files to all hosts
+  template: src=hosts
+            dest=/etc/hosts
+            backup=yes
+
+- name: install common packages
+  apt: name={{ item }} state=latest
+  with_items:
+    - python-pip
+    - python-dev
+    - python-mysqldb
+    - ntp
+
+- name: restart ntp
+  command: su -s /bin/sh -c "service ntp stop; ntpd -gq; hwclock --systohc"
+  ignore_errors: True
+
+- name: update ntp conf
+  template: src=ntp.conf dest=/etc/ntp.conf backup=yes
+
+- name: restart ntp
+  service: name=ntp state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/common/templates/hosts b/compass/deploy/ansible/openstack_juno/roles/common/templates/hosts
new file mode 100644 (file)
index 0000000..9d27c0a
--- /dev/null
@@ -0,0 +1,22 @@
+# compute-controller
+10.145.89.136 host-136
+# database
+10.145.89.136 host-136
+# messaging
+10.145.89.136 host-136
+# storage-controller
+10.145.89.138 host-138
+# image
+10.145.89.138 host-138
+# identity
+10.145.89.136 host-136
+# network-server
+10.145.89.138 host-138
+# dashboard
+10.145.89.136 host-136
+# storage-volume
+10.145.89.139 host-139
+# network-worker
+10.145.89.139 host-139
+# compute-worker
+10.145.89.137 host-137
diff --git a/compass/deploy/ansible/openstack_juno/roles/common/templates/ntp.conf b/compass/deploy/ansible/openstack_juno/roles/common/templates/ntp.conf
new file mode 100644 (file)
index 0000000..c613809
--- /dev/null
@@ -0,0 +1,56 @@
+# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
+
+driftfile /var/lib/ntp/ntp.drift
+
+
+# Enable this if you want statistics to be logged.
+#statsdir /var/log/ntpstats/
+
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+
+# Specify one or more NTP servers.
+
+# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
+# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
+# more information.
+server {{ NTP_SERVER_LOCAL }}
+server 0.ubuntu.pool.ntp.org
+server 1.ubuntu.pool.ntp.org
+server 2.ubuntu.pool.ntp.org
+server 3.ubuntu.pool.ntp.org
+
+# Use Ubuntu's ntp server as a fallback.
+server ntp.ubuntu.com
+
+# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
+# details.  The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
+# might also be helpful.
+#
+# Note that "restrict" applies to both servers and clients, so a configuration
+# that might be intended to block requests from certain clients could also end
+# up blocking replies from your own upstream servers.
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default kod notrap nomodify nopeer noquery
+restrict -6 default kod notrap nomodify nopeer noquery
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+# Clients from this (example!) subnet have unlimited access, but only if
+# cryptographically authenticated.
+#restrict 192.168.123.0 mask 255.255.255.0 notrust
+
+
+# If you want to provide time to your local subnet, change the next line.
+# (Again, the address is an example only.)
+#broadcast 192.168.123.255
+
+# If you want to listen to time broadcasts on your local subnet, de-comment the
+# next lines.  Please do this only if you trust everybody on the network!
+#disable auth
+#broadcastclient
diff --git a/compass/deploy/ansible/openstack_juno/roles/dashboard/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/dashboard/tasks/main.yml
new file mode 100644 (file)
index 0000000..465b996
--- /dev/null
@@ -0,0 +1,30 @@
+---
+- name: install dashboard packages
+  apt: name={{ item }} state=present force=yes
+  with_items:
+    - apache2
+    - memcached
+    - libapache2-mod-wsgi
+    - openstack-dashboard
+
+- name: remove ubuntu theme
+  apt: name=openstack-dashboard-ubuntu-theme
+       state=absent
+
+## horizon configuration is already enabled in apache2/conf-enabled
+## by openstack-dashboard package deploy script.
+#- name: update dashboard conf
+#  template: src=openstack-dashboard.conf
+#            dest=/etc/apache2/sites-available/openstack-dashboard.conf
+#            backup=yes
+
+- name: update horizon settings
+  template: src=local_settings.py
+            dest=/etc/openstack-dashboard/local_settings.py
+            backup=yes
+
+- name: restart apache2
+  service: name=apache2 state=restarted enabled=yes
+
+- name: restart memcached
+  service: name=memcached state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/local_settings.py b/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/local_settings.py
new file mode 100644 (file)
index 0000000..87e06e3
--- /dev/null
@@ -0,0 +1,511 @@
+import os
+
+from django.utils.translation import ugettext_lazy as _
+
+from openstack_dashboard import exceptions
+
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+
+# Required for Django 1.5.
+# If horizon is running in production (DEBUG is False), set this
+# with the list of host/domain names that the application can serve.
+# For more information see:
+# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
+#ALLOWED_HOSTS = ['horizon.example.com', ]
+
+# Set SSL proxy settings:
+# For Django 1.4+ pass this header from the proxy after terminating the SSL,
+# and don't forget to strip it from the client's request.
+# For more information see:
+# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
+# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
+
+# If Horizon is being served through SSL, then uncomment the following two
+# settings to better secure the cookies from security exploits
+#CSRF_COOKIE_SECURE = True
+#SESSION_COOKIE_SECURE = True
+
+# Overrides for OpenStack API versions. Use this setting to force the
+# OpenStack dashboard to use a specific API version for a given service API.
+# NOTE: The version should be formatted as it appears in the URL for the
+# service API. For example, The identity service APIs have inconsistent
+# use of the decimal point, so valid options would be "2.0" or "3".
+# OPENSTACK_API_VERSIONS = {
+#     "identity": 3,
+#     "volume": 2
+# }
+
+# Set this to True if running on multi-domain model. When this is enabled, it
+# will require user to enter the Domain name in addition to username for login.
+# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
+
+# Overrides the default domain used when running on single-domain model
+# with Keystone V3. All entities will be created in the default domain.
+# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
+
+# Set Console type:
+# valid options would be "AUTO", "VNC", "SPICE" or "RDP"
+# CONSOLE_TYPE = "AUTO"
+
+# Default OpenStack Dashboard configuration.
+HORIZON_CONFIG = {
+    'dashboards': ('project', 'admin', 'settings',),
+    'default_dashboard': 'project',
+    'user_home': 'openstack_dashboard.views.get_user_home',
+    'ajax_queue_limit': 10,
+    'auto_fade_alerts': {
+        'delay': 3000,
+        'fade_duration': 1500,
+        'types': ['alert-success', 'alert-info']
+    },
+    'help_url': "http://docs.openstack.org",
+    'exceptions': {'recoverable': exceptions.RECOVERABLE,
+                   'not_found': exceptions.NOT_FOUND,
+                   'unauthorized': exceptions.UNAUTHORIZED},
+}
+
+# Specify a regular expression to validate user passwords.
+# HORIZON_CONFIG["password_validator"] = {
+#     "regex": '.*',
+#     "help_text": _("Your password does not meet the requirements.")
+# }
+
+# Disable simplified floating IP address management for deployments with
+# multiple floating IP pools or complex network requirements.
+# HORIZON_CONFIG["simple_ip_management"] = False
+
+# Turn off browser autocompletion for the login form if so desired.
+# HORIZON_CONFIG["password_autocomplete"] = "off"
+
+LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
+
+# Set custom secret key:
+# You can either set it to a specific value or you can let horizion generate a
+# default secret key that is unique on this machine, e.i. regardless of the
+# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
+# may be situations where you would want to set this explicitly, e.g. when
+# multiple dashboard instances are distributed on different machines (usually
+# behind a load-balancer). Either you have to make sure that a session gets all
+# requests routed to the same dashboard instance or you set the same SECRET_KEY
+# for all of them.
+from horizon.utils import secret_key
+SECRET_KEY = 'AJDSKLAJDKASJDKASJDKSAJDKSJAKDSA'
+# We recommend you use memcached for development; otherwise after every reload
+# of the django development server, you will have to login again. To use
+# memcached set CACHES to something like
+CACHES = {
+   'default': {
+       'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
+       'LOCATION' : '127.0.0.1:11211',
+   }
+}
+
+#CACHES = {
+#    'default': {
+#        'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache'
+#   }
+#}
+
+# Enable the Ubuntu theme if it is present.
+try:
+       from ubuntu_theme import *
+except ImportError:
+       pass
+
+# Default Ubuntu apache configuration uses /horizon as the application root.
+# Configure auth redirects here accordingly.
+LOGIN_URL='/horizon/auth/login/'
+LOGOUT_URL='/horizon/auth/logout/'
+LOGIN_REDIRECT_URL='/horizon'
+
+# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
+# offline compression by default.  To enable online compression, install
+# the node-less package and enable the following option.
+COMPRESS_OFFLINE = True
+
+# By default, validation of the HTTP Host header is disabled.  Production
+# installations should have this set accordingly.  For more information
+# see https://docs.djangoproject.com/en/dev/ref/settings/.
+ALLOWED_HOSTS = ['{{ dashboard_host }}']
+
+# Send email to the console by default
+EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
+# Or send them to /dev/null
+#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
+
+# Configure these for your outgoing email host
+# EMAIL_HOST = 'smtp.my-company.com'
+# EMAIL_PORT = 25
+# EMAIL_HOST_USER = 'djangomail'
+# EMAIL_HOST_PASSWORD = 'top-secret!'
+
+# For multiple regions uncomment this configuration, and add (endpoint, title).
+# AVAILABLE_REGIONS = [
+#     ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
+#     ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
+# ]
+
+OPENSTACK_HOST = "{{ HA_VIP }}"
+OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
+OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
+
+# Disable SSL certificate checks (useful for self-signed certificates):
+# OPENSTACK_SSL_NO_VERIFY = True
+
+# The CA certificate to use to verify SSL connections
+# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
+
+# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
+# capabilities of the auth backend for Keystone.
+# If Keystone has been configured to use LDAP as the auth backend then set
+# can_edit_user to False and name to 'ldap'.
+#
+# TODO(tres): Remove these once Keystone has an API to identify auth backend.
+OPENSTACK_KEYSTONE_BACKEND = {
+    'name': 'native',
+    'can_edit_user': True,
+    'can_edit_group': True,
+    'can_edit_project': True,
+    'can_edit_domain': True,
+    'can_edit_role': True
+}
+
+#Setting this to True, will add a new "Retrieve Password" action on instance,
+#allowing Admin session password retrieval/decryption.
+#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
+
+# The Xen Hypervisor has the ability to set the mount point for volumes
+# attached to instances (other Hypervisors currently do not). Setting
+# can_set_mount_point to True will add the option to set the mount point
+# from the UI.
+OPENSTACK_HYPERVISOR_FEATURES = {
+    'can_set_mount_point': False,
+    'can_set_password': False,
+}
+
+# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
+# services provided by neutron. Options currently available are load
+# balancer service, security groups, quotas, VPN service.
+OPENSTACK_NEUTRON_NETWORK = {
+    'enable_lb': False,
+    'enable_firewall': False,
+    'enable_quotas': True,
+    'enable_vpn': False,
+    # The profile_support option is used to detect if an external router can be
+    # configured via the dashboard. When using specific plugins the
+    # profile_support can be turned on if needed.
+    'profile_support': None,
+    #'profile_support': 'cisco',
+}
+
+# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
+# in the OpenStack Dashboard related to the Image service, such as the list
+# of supported image formats.
+# OPENSTACK_IMAGE_BACKEND = {
+#     'image_formats': [
+#         ('', ''),
+#         ('aki', _('AKI - Amazon Kernel Image')),
+#         ('ami', _('AMI - Amazon Machine Image')),
+#         ('ari', _('ARI - Amazon Ramdisk Image')),
+#         ('iso', _('ISO - Optical Disk Image')),
+#         ('qcow2', _('QCOW2 - QEMU Emulator')),
+#         ('raw', _('Raw')),
+#         ('vdi', _('VDI')),
+#         ('vhd', _('VHD')),
+#         ('vmdk', _('VMDK'))
+#     ]
+# }
+
+# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
+# image custom property attributes that appear on image detail pages.
+IMAGE_CUSTOM_PROPERTY_TITLES = {
+    "architecture": _("Architecture"),
+    "kernel_id": _("Kernel ID"),
+    "ramdisk_id": _("Ramdisk ID"),
+    "image_state": _("Euca2ools state"),
+    "project_id": _("Project ID"),
+    "image_type": _("Image Type")
+}
+
+# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is 'publicURL'.
+#OPENSTACK_ENDPOINT_TYPE = "publicURL"
+
+# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
+# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is None.  This
+# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
+#SECONDARY_ENDPOINT_TYPE = "publicURL"
+
+# The number of objects (Swift containers/objects or images) to display
+# on a single page before providing a paging element (a "more" link)
+# to paginate results.
+API_RESULT_LIMIT = 1000
+API_RESULT_PAGE_SIZE = 20
+
+# The timezone of the server. This should correspond with the timezone
+# of your entire OpenStack installation, and hopefully be in UTC.
+TIME_ZONE = "UTC"
+
+# When launching an instance, the menu of available flavors is
+# sorted by RAM usage, ascending. If you would like a different sort order,
+# you can provide another flavor attribute as sorting key. Alternatively, you
+# can provide a custom callback method to use for sorting. You can also provide
+# a flag for reverse sort. For more info, see
+# http://docs.python.org/2/library/functions.html#sorted
+# CREATE_INSTANCE_FLAVOR_SORT = {
+#     'key': 'name',
+#      # or
+#     'key': my_awesome_callback_method,
+#     'reverse': False,
+# }
+
+# The Horizon Policy Enforcement engine uses these values to load per service
+# policy rule files. The content of these files should match the files the
+# OpenStack services are using to determine role based access control in the
+# target installation.
+
+# Path to directory containing policy.json files
+#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
+# Map of local copy of service policy files
+#POLICY_FILES = {
+#    'identity': 'keystone_policy.json',
+#    'compute': 'nova_policy.json',
+#    'volume': 'cinder_policy.json',
+#    'image': 'glance_policy.json',
+#}
+
+# Trove user and database extension support. By default support for
+# creating users and databases on database instances is turned on.
+# To disable these extensions set the permission here to something
+# unusable such as ["!"].
+# TROVE_ADD_USER_PERMS = []
+# TROVE_ADD_DATABASE_PERMS = []
+
+LOGGING = {
+    'version': 1,
+    # When set to True this will disable all logging except
+    # for loggers specified in this configuration dictionary. Note that
+    # if nothing is specified here and disable_existing_loggers is True,
+    # django.db.backends will still log unless it is disabled explicitly.
+    'disable_existing_loggers': False,
+    'handlers': {
+        'null': {
+            'level': 'DEBUG',
+            'class': 'django.utils.log.NullHandler',
+        },
+        'console': {
+            # Set the level to "DEBUG" for verbose output logging.
+            'level': 'INFO',
+            'class': 'logging.StreamHandler',
+        },
+    },
+    'loggers': {
+        # Logging from django.db.backends is VERY verbose, send to null
+        # by default.
+        'django.db.backends': {
+            'handlers': ['null'],
+            'propagate': False,
+        },
+        'requests': {
+            'handlers': ['null'],
+            'propagate': False,
+        },
+        'horizon': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'openstack_dashboard': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'novaclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'cinderclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'keystoneclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'glanceclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'neutronclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'heatclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'ceilometerclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'troveclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'swiftclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'openstack_auth': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'nose.plugins.manager': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'django': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'iso8601': {
+            'handlers': ['null'],
+            'propagate': False,
+        },
+    }
+}
+
+# 'direction' should not be specified for all_tcp/udp/icmp.
+# It is specified in the form.
+SECURITY_GROUP_RULES = {
+    'all_tcp': {
+        'name': 'ALL TCP',
+        'ip_protocol': 'tcp',
+        'from_port': '1',
+        'to_port': '65535',
+    },
+    'all_udp': {
+        'name': 'ALL UDP',
+        'ip_protocol': 'udp',
+        'from_port': '1',
+        'to_port': '65535',
+    },
+    'all_icmp': {
+        'name': 'ALL ICMP',
+        'ip_protocol': 'icmp',
+        'from_port': '-1',
+        'to_port': '-1',
+    },
+    'ssh': {
+        'name': 'SSH',
+        'ip_protocol': 'tcp',
+        'from_port': '22',
+        'to_port': '22',
+    },
+    'smtp': {
+        'name': 'SMTP',
+        'ip_protocol': 'tcp',
+        'from_port': '25',
+        'to_port': '25',
+    },
+    'dns': {
+        'name': 'DNS',
+        'ip_protocol': 'tcp',
+        'from_port': '53',
+        'to_port': '53',
+    },
+    'http': {
+        'name': 'HTTP',
+        'ip_protocol': 'tcp',
+        'from_port': '80',
+        'to_port': '80',
+    },
+    'pop3': {
+        'name': 'POP3',
+        'ip_protocol': 'tcp',
+        'from_port': '110',
+        'to_port': '110',
+    },
+    'imap': {
+        'name': 'IMAP',
+        'ip_protocol': 'tcp',
+        'from_port': '143',
+        'to_port': '143',
+    },
+    'ldap': {
+        'name': 'LDAP',
+        'ip_protocol': 'tcp',
+        'from_port': '389',
+        'to_port': '389',
+    },
+    'https': {
+        'name': 'HTTPS',
+        'ip_protocol': 'tcp',
+        'from_port': '443',
+        'to_port': '443',
+    },
+    'smtps': {
+        'name': 'SMTPS',
+        'ip_protocol': 'tcp',
+        'from_port': '465',
+        'to_port': '465',
+    },
+    'imaps': {
+        'name': 'IMAPS',
+        'ip_protocol': 'tcp',
+        'from_port': '993',
+        'to_port': '993',
+    },
+    'pop3s': {
+        'name': 'POP3S',
+        'ip_protocol': 'tcp',
+        'from_port': '995',
+        'to_port': '995',
+    },
+    'ms_sql': {
+        'name': 'MS SQL',
+        'ip_protocol': 'tcp',
+        'from_port': '1433',
+        'to_port': '1433',
+    },
+    'mysql': {
+        'name': 'MYSQL',
+        'ip_protocol': 'tcp',
+        'from_port': '3306',
+        'to_port': '3306',
+    },
+    'rdp': {
+        'name': 'RDP',
+        'ip_protocol': 'tcp',
+        'from_port': '3389',
+        'to_port': '3389',
+    },
+}
+
+FLAVOR_EXTRA_KEYS = {
+    'flavor_keys': [
+        ('quota:read_bytes_sec', _('Quota: Read bytes')),
+        ('quota:write_bytes_sec', _('Quota: Write bytes')),
+        ('quota:cpu_quota', _('Quota: CPU')),
+        ('quota:cpu_period', _('Quota: CPU period')),
+        ('quota:inbound_average', _('Quota: Inbound average')),
+        ('quota:outbound_average', _('Quota: Outbound average')),
+    ]
+}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/openstack-dashboard.conf b/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/openstack-dashboard.conf
new file mode 100644 (file)
index 0000000..a5a791a
--- /dev/null
@@ -0,0 +1,14 @@
+<VirtualHost *:80>
+
+WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
+WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10
+Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/
+
+<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
+Order allow,deny
+Allow from all
+</Directory>
+
+
+</VirtualHost>
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/database/files/my.cnf b/compass/deploy/ansible/openstack_juno/roles/database/files/my.cnf
new file mode 100644 (file)
index 0000000..d61f947
--- /dev/null
@@ -0,0 +1,131 @@
+#
+# The MySQL database server configuration file.
+#
+# You can copy this to one of:
+# - "/etc/mysql/my.cnf" to set global options,
+# - "~/.my.cnf" to set user-specific options.
+#
+# One can use all long options that the program supports.
+# Run program with --help to get a list of available options and with
+# --print-defaults to see which it would actually understand and use.
+#
+# For explanations see
+# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
+
+# This will be passed to all mysql clients
+# It has been reported that passwords should be enclosed with ticks/quotes
+# escpecially if they contain "#" chars...
+# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
+[client]
+port           = 3306
+socket         = /var/run/mysqld/mysqld.sock
+
+# Here is entries for some specific programs
+# The following values assume you have at least 32M ram
+
+# This was formally known as [safe_mysqld]. Both versions are currently parsed.
+[mysqld_safe]
+socket         = /var/run/mysqld/mysqld.sock
+nice           = 0
+
+[mysqld]
+#
+# * Basic Settings
+#
+user           = mysql
+pid-file       = /var/run/mysqld/mysqld.pid
+socket         = /var/run/mysqld/mysqld.sock
+port           = 3306
+basedir                = /usr
+datadir                = /var/lib/mysql
+tmpdir         = /tmp
+lc-messages-dir        = /usr/share/mysql
+skip-external-locking
+#
+# Instead of skip-networking the default is now to listen only on
+# localhost which is more compatible and is not less secure.
+bind-address           = 0.0.0.0
+#
+# * Fine Tuning
+#
+key_buffer             = 16M
+max_allowed_packet     = 16M
+thread_stack           = 192K
+thread_cache_size       = 8
+# This replaces the startup script and checks MyISAM tables if needed
+# the first time they are touched
+myisam-recover         = BACKUP
+#max_connections        = 100
+#table_cache            = 64
+#thread_concurrency     = 10
+#
+# * Query Cache Configuration
+#
+query_cache_limit      = 1M
+query_cache_size        = 16M
+#
+# * Logging and Replication
+#
+# Both location gets rotated by the cronjob.
+# Be aware that this log type is a performance killer.
+# As of 5.1 you can enable the log at runtime!
+#general_log_file        = /var/log/mysql/mysql.log
+#general_log             = 1
+#
+# Error log - should be very few entries.
+#
+log_error = /var/log/mysql/error.log
+#
+# Here you can see queries with especially long duration
+#log_slow_queries      = /var/log/mysql/mysql-slow.log
+#long_query_time = 2
+#log-queries-not-using-indexes
+#
+# The following can be used as easy to replay backup logs or for replication.
+# note: if you are setting up a replication slave, see README.Debian about
+#       other settings you may need to change.
+#server-id             = 1
+#log_bin                       = /var/log/mysql/mysql-bin.log
+expire_logs_days       = 10
+max_binlog_size         = 100M
+#binlog_do_db          = include_database_name
+#binlog_ignore_db      = include_database_name
+#
+# * InnoDB
+#
+# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
+# Read the manual for more InnoDB related options. There are many!
+#
+# * Security Features
+#
+# Read the manual, too, if you want chroot!
+# chroot = /var/lib/mysql/
+#
+# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
+#
+# ssl-ca=/etc/mysql/cacert.pem
+# ssl-cert=/etc/mysql/server-cert.pem
+# ssl-key=/etc/mysql/server-key.pem
+default-storage-engine = innodb
+innodb_file_per_table
+collation-server = utf8_general_ci
+init-connect = 'SET NAMES utf8'
+character-set-server = utf8
+
+[mysqldump]
+quick
+quote-names
+max_allowed_packet     = 16M
+
+[mysql]
+#no-auto-rehash        # faster start of mysql but no tab completition
+
+[isamchk]
+key_buffer             = 16M
+
+#
+# * IMPORTANT: Additional settings that can override those from this file!
+#   The files must end with '.cnf', otherwise they'll be ignored.
+#
+!includedir /etc/mysql/conf.d/
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/database/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/database/tasks/main.yml
new file mode 100644 (file)
index 0000000..e66f0cd
--- /dev/null
@@ -0,0 +1,12 @@
+---
+- name: copy data.sh
+  template: src=data.j2 dest=/opt/data.sh mode=777
+  tags:
+    - mysql_user
+
+- include: mysql.yml
+  when: HA_CLUSTER is not defined
+
+- include: mariadb.yml
+  when: HA_CLUSTER is defined
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/database/tasks/mariadb.yml b/compass/deploy/ansible/openstack_juno/roles/database/tasks/mariadb.yml
new file mode 100644 (file)
index 0000000..f87ea2f
--- /dev/null
@@ -0,0 +1,61 @@
+---
+- name: install python-mysqldb
+  apt: name={{ item }} state=present force=yes
+  with_items:
+      - libaio1
+      - libssl0.9.8
+      #- mariadb-client-5.5
+      - mysql-client-5.5
+      - python-mysqldb
+
+- name: download mariadb and galera deb package
+  get_url: url={{ item.url }} dest=/opt/{{ item.filename }}
+  register: result
+  until: result|success
+  retries: 5
+  delay: 3
+  with_items:
+    - { url:  "{{ MARIADB_URL }}", filename: "{{ MARIADB }}" }
+    - { url:  "{{ GALERA_URL }}", filename: "{{ GALERA }}" }
+
+- name: install mariadb  and galera packages
+  command: dpkg -i /opt/{{ item }}
+  with_items:
+    - "{{ MARIADB }}"
+    - "{{ GALERA }}"
+
+- name: create mysql log directy
+  file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
+
+- name: update mariadb my.cnf
+  template: src=my.cnf dest=/etc/mysql/my.cnf backup=yes
+
+- name: update galera wsrep.cnf
+  template: src=wsrep.cnf dest=/etc/mysql/conf.d/wsrep.cnf backup=yes
+
+- name: update wsrep_sst_rsync uid 
+  lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*uid = \$MYUID$"  backup=yes
+
+- name: update wsrep_sst_rsync gid 
+  lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*gid = \$MYGID$"  backup=yes
+
+- name: manually restart mysql server
+  service: name=mysql state=restarted enabled=yes
+  register: result
+  until: result|success
+  retries: 5
+  delay: 5
+  tags:
+    - mysql_restart
+
+- name: generate mysql service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+   - mysql
+
+
+- name: create database/user
+  shell: /opt/data.sh
+  when: HA_CLUSTER[inventory_hostname] == ''
+  tags:
+    - mysql_user
diff --git a/compass/deploy/ansible/openstack_juno/roles/database/tasks/mysql.yml b/compass/deploy/ansible/openstack_juno/roles/database/tasks/mysql.yml
new file mode 100644 (file)
index 0000000..327b656
--- /dev/null
@@ -0,0 +1,22 @@
+---
+- name: install mysql client and server packages
+  apt: name={{ item }} state=present
+  with_items:
+    - python-mysqldb
+    - mysql-server
+
+- name: create mysql log directy
+  file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
+
+- name: update mysql my.cnf
+  copy: src=my.cnf
+        dest=/etc/mysql/my.cnf
+        backup=yes
+
+- name: manually restart mysql server
+  shell: service mysql restart
+
+- name: create database/user
+  shell: /opt/data.sh
+  tags:
+    - mysql_user
diff --git a/compass/deploy/ansible/openstack_juno/roles/database/templates/data.j2 b/compass/deploy/ansible/openstack_juno/roles/database/templates/data.j2
new file mode 100644 (file)
index 0000000..c894b32
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/sh
+mysql -uroot -Dmysql <<EOF
+drop database if exists keystone;
+drop database if exists glance;
+drop database if exists neutron;
+drop database if exists nova;
+drop database if exists cinder;
+
+CREATE DATABASE keystone;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON keystone.* TO 'keystone'@'{{ host }}' IDENTIFIED BY '{{ KEYSTONE_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE glance;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON glance.* TO 'glance'@'{{ host }}' IDENTIFIED BY '{{ GLANCE_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE neutron;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON neutron.* TO 'neutron'@'{{ host }}' IDENTIFIED BY '{{ NEUTRON_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE nova;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON nova.* TO 'nova'@'{{ host }}' IDENTIFIED BY '{{ NOVA_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE cinder;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON cinder.* TO 'cinder'@'{{ host }}' IDENTIFIED BY '{{ CINDER_DBPASS }}';
+{% endfor %}
+
+{% if WSREP_SST_USER is defined %}
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
+{% endfor %}
+{% endif %}
+EOF
diff --git a/compass/deploy/ansible/openstack_juno/roles/database/templates/my.cnf b/compass/deploy/ansible/openstack_juno/roles/database/templates/my.cnf
new file mode 100644 (file)
index 0000000..165d619
--- /dev/null
@@ -0,0 +1,134 @@
+#
+# The MySQL database server configuration file.
+#
+# You can copy this to one of:
+# - "/etc/mysql/my.cnf" to set global options,
+# - "~/.my.cnf" to set user-specific options.
+#
+# One can use all long options that the program supports.
+# Run program with --help to get a list of available options and with
+# --print-defaults to see which it would actually understand and use.
+#
+# For explanations see
+# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
+
+# This will be passed to all mysql clients
+# It has been reported that passwords should be enclosed with ticks/quotes
+# escpecially if they contain "#" chars...
+# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
+[client]
+port           = 3306
+socket         = /var/run/mysqld/mysqld.sock
+
+# Here is entries for some specific programs
+# The following values assume you have at least 32M ram
+
+# This was formally known as [safe_mysqld]. Both versions are currently parsed.
+[mysqld_safe]
+socket         = /var/run/mysqld/mysqld.sock
+nice           = 0
+
+[mysqld]
+#
+# * Basic Settings
+#
+user           = mysql
+pid-file       = /var/run/mysqld/mysqld.pid
+socket         = /var/run/mysqld/mysqld.sock
+port           = 3306
+basedir                = /usr
+datadir                = /var/lib/mysql
+tmpdir         = /tmp
+lc-messages-dir        = /usr/share/mysql
+skip-external-locking
+skip-name-resolve
+#
+# Instead of skip-networking the default is now to listen only on
+# localhost which is more compatible and is not less secure.
+#bind-address          = {{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
+bind-address           = {{ HA_VIP }} 
+#
+# * Fine Tuning
+#
+key_buffer             = 16M
+max_allowed_packet     = 16M
+thread_stack           = 192K
+thread_cache_size       = 8
+# This replaces the startup script and checks MyISAM tables if needed
+# the first time they are touched
+myisam-recover         = BACKUP
+max_connections        = 2000
+max_connect_errors     = 8000
+#table_cache            = 64
+#thread_concurrency     = 10
+#
+# * Query Cache Configuration
+#
+query_cache_limit      = 1M
+query_cache_size        = 16M
+#
+# * Logging and Replication
+#
+# Both location gets rotated by the cronjob.
+# Be aware that this log type is a performance killer.
+# As of 5.1 you can enable the log at runtime!
+general_log_file        = /var/log/mysql/mysql.log
+#general_log             = 1
+#
+# Error log - should be very few entries.
+#
+log_error = /var/log/mysql/error.log
+#
+# Here you can see queries with especially long duration
+#log_slow_queries      = /var/log/mysql/mysql-slow.log
+#long_query_time = 2
+#log-queries-not-using-indexes
+#
+# The following can be used as easy to replay backup logs or for replication.
+# note: if you are setting up a replication slave, see README.Debian about
+#       other settings you may need to change.
+#server-id             = 1
+#log_bin                       = /var/log/mysql/mysql-bin.log
+expire_logs_days       = 10
+max_binlog_size         = 100M
+#binlog_do_db          = include_database_name
+#binlog_ignore_db      = include_database_name
+#
+# * InnoDB
+#
+# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
+# Read the manual for more InnoDB related options. There are many!
+#
+# * Security Features
+#
+# Read the manual, too, if you want chroot!
+# chroot = /var/lib/mysql/
+#
+# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
+#
+# ssl-ca=/etc/mysql/cacert.pem
+# ssl-cert=/etc/mysql/server-cert.pem
+# ssl-key=/etc/mysql/server-key.pem
+default-storage-engine = innodb
+innodb_file_per_table
+collation-server = utf8_general_ci
+init-connect = 'SET NAMES utf8'
+character-set-server = utf8
+
+[mysqldump]
+quick
+quote-names
+max_allowed_packet     = 16M
+
+[mysql]
+#no-auto-rehash        # faster start of mysql but no tab completition
+
+[isamchk]
+key_buffer             = 16M
+
+#
+# * IMPORTANT: Additional settings that can override those from this file!
+#   The files must end with '.cnf', otherwise they'll be ignored.
+#
+!includedir /etc/mysql/conf.d/
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/database/templates/wsrep.cnf b/compass/deploy/ansible/openstack_juno/roles/database/templates/wsrep.cnf
new file mode 100644 (file)
index 0000000..b9e9424
--- /dev/null
@@ -0,0 +1,126 @@
+# This file contains wsrep-related mysqld options. It should be included
+# in the main MySQL configuration file.
+#
+# Options that need to be customized:
+#  - wsrep_provider
+#  - wsrep_cluster_address
+#  - wsrep_sst_auth
+# The rest of defaults should work out of the box.
+
+##
+## mysqld options _MANDATORY_ for correct opration of the cluster
+##
+[mysqld]
+
+# (This must be substituted by wsrep_format)
+binlog_format=ROW
+
+# Currently only InnoDB storage engine is supported
+default-storage-engine=innodb
+
+# to avoid issues with 'bulk mode inserts' using autoinc
+innodb_autoinc_lock_mode=2
+
+# This is a must for paralell applying
+innodb_locks_unsafe_for_binlog=1
+
+# Query Cache is not supported with wsrep
+query_cache_size=0
+query_cache_type=0
+
+# Override bind-address
+# In some systems bind-address defaults to 127.0.0.1, and with mysqldump SST
+# it will have (most likely) disastrous consequences on donor node
+#bind-address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
+bind-address={{ HA_VIP }}
+
+##
+## WSREP options
+##
+
+# Full path to wsrep provider library or 'none'
+wsrep_provider=/usr/lib/galera/libgalera_smm.so
+
+# Provider specific configuration options
+#wsrep_provider_options=
+
+# Logical cluster name. Should be the same for all nodes.
+wsrep_cluster_name="my_wsrep_cluster"
+
+# Group communication system handle
+wsrep_cluster_address=gcomm://{{ HA_CLUSTER[inventory_hostname] }}
+
+# Human-readable node name (non-unique). Hostname by default.
+#wsrep_node_name=
+
+# Base replication <address|hostname>[:port] of the node.
+# The values supplied will be used as defaults for state transfer receiving,
+# listening ports and so on. Default: address of the first network interface.
+wsrep_node_address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
+
+# Address for incoming client connections. Autodetect by default.
+#wsrep_node_incoming_address=
+
+# How many threads will process writesets from other nodes
+wsrep_slave_threads=1
+
+# DBUG options for wsrep provider
+#wsrep_dbug_option
+
+# Generate fake primary keys for non-PK tables (required for multi-master
+# and parallel applying operation)
+wsrep_certify_nonPK=1
+
+# Maximum number of rows in write set
+wsrep_max_ws_rows=131072
+
+# Maximum size of write set
+wsrep_max_ws_size=1073741824
+
+# to enable debug level logging, set this to 1
+wsrep_debug=1
+
+# convert locking sessions into transactions
+wsrep_convert_LOCK_to_trx=0
+
+# how many times to retry deadlocked autocommits
+wsrep_retry_autocommit=1
+
+# change auto_increment_increment and auto_increment_offset automatically
+wsrep_auto_increment_control=1
+
+# retry autoinc insert, which failed for duplicate key error
+wsrep_drupal_282555_workaround=0
+
+# enable "strictly synchronous" semantics for read operations
+wsrep_causal_reads=0
+
+# Command to call when node status or cluster membership changes.
+# Will be passed all or some of the following options:
+# --status  - new status of this node
+# --uuid    - UUID of the cluster
+# --primary - whether the component is primary or not ("yes"/"no")
+# --members - comma-separated list of members
+# --index   - index of this node in the list
+wsrep_notify_cmd=
+
+##
+## WSREP State Transfer options
+##
+
+# State Snapshot Transfer method
+wsrep_sst_method=rsync
+
+# Address on THIS node to receive SST at. DON'T SET IT TO DONOR ADDRESS!!!
+# (SST method dependent. Defaults to the first IP of the first interface)
+#wsrep_sst_receive_address=
+
+# SST authentication string. This will be used to send SST to joining nodes.
+# Depends on SST method. For mysqldump method it is root:<root password>
+wsrep_sst_auth={{ WSREP_SST_USER }}:{{ WSREP_SST_PASS }}
+
+# Desired SST donor name.
+#wsrep_sst_donor=
+
+# Protocol version to use
+# wsrep_protocol_version=
diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/glance/handlers/main.yml
new file mode 100644 (file)
index 0000000..d8eaa44
--- /dev/null
@@ -0,0 +1,6 @@
+---
+- name: restart glance-api
+  service: name=glance-api state=restarted enabled=yes
+
+- name: restart glance-registry
+  service: name=glance-registry state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_config.yml b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_config.yml
new file mode 100644 (file)
index 0000000..28392a3
--- /dev/null
@@ -0,0 +1,29 @@
+---
+- name: init glance db version
+  shell: glance-manage db_version_control 0
+
+- name: sync glance db
+  shell: sleep 15; su -s /bin/sh -c "glance-manage db_sync" glance
+  register: result
+  until: result.rc == 0
+  retries: 5
+  delay: 3
+  notify:
+    - restart glance-registry
+    - restart glance-api
+
+- meta: flush_handlers
+
+- name: place image upload script
+  template: src=image_upload.sh dest=/opt/image_upload.sh mode=0744
+
+- name: download cirros image file
+  get_url: url={{ build_in_image }} dest=/opt/{{ build_in_image_name }}
+
+- name: wait for 9292 port to become available
+  wait_for: host={{ image_host }} port=9292 delay=5
+
+- name: run image upload
+  shell: for i in {0..5}; do /opt/image_upload.sh && touch image_upload_completed; if [ $? != 0 ] ;then sleep 5; else break;fi;done
+  args:
+    creates: image_upload_completed
diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_install.yml b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_install.yml
new file mode 100644 (file)
index 0000000..505b3b0
--- /dev/null
@@ -0,0 +1,26 @@
+---
+- name: install glance packages
+  apt: name={{ item }} state=latest force=yes
+  with_items:
+    - glance
+    - python-glanceclient
+
+- name: generate glance service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - glance-registry
+    - glance-api
+
+- name: update glance conf
+  template: src={{ item }} dest=/etc/glance/{{ item }}
+            backup=yes
+  with_items:
+    - glance-api.conf
+    - glance-registry.conf
+  notify:
+    - restart glance-registry
+    - restart glance-api
+
+- name: remove default sqlite db
+  shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/main.yml
new file mode 100644 (file)
index 0000000..296f0dc
--- /dev/null
@@ -0,0 +1,18 @@
+---
+- include: glance_install.yml
+  tags:
+    - install
+    - glance_install
+    - glance
+
+- include: nfs.yml
+  tags:
+    - nfs
+
+- include: glance_config.yml
+  when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+  tags:
+    - config
+    - glance_config
+    - glance
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/tasks/nfs.yml b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/nfs.yml
new file mode 100644 (file)
index 0000000..c03ab4d
--- /dev/null
@@ -0,0 +1,41 @@
+---
+- name: get nfs server
+  local_action: shell  /sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6| grep "10" -m 1 |awk '{print $2}'|tr -d "addr:"
+  register: ip_info
+  run_once: True
+
+- name: install nfs
+  local_action: yum  name=nfs-utils state=present
+  run_once: True
+
+- name: create image directory
+  local_action: file path=/opt/images state=directory mode=0777
+  run_once: True
+
+- name: update nfs config
+  local_action: lineinfile dest=/etc/exports state=present
+              regexp="/opt/images *(rw,insecure,sync,all_squash)"
+              line="/opt/images *(rw,insecure,sync,all_squash)"
+  run_once: True
+
+- name: restart nfs service
+  local_action: service name=nfs state=restarted enabled=yes
+  run_once: True
+
+- name: install nfs comm
+  apt: name=nfs-common state=present
+
+- name: get mount info
+  command: mount
+  register: mount_info
+
+- name: mount image directory
+  shell: |
+    mount -t nfs  -onfsvers=3 {{ item }}:/opt/images /var/lib/glance/images
+    sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
+    echo {{ item }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
+  when: mount_info.stdout.find('images') == -1
+  with_items:
+      ip_info.stdout_lines
+  retries: 5
+  delay: 3
diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-api.conf b/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-api.conf
new file mode 100644 (file)
index 0000000..763539e
--- /dev/null
@@ -0,0 +1,677 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Which backend scheme should Glance use by default is not specified
+# in a request to add a new image to Glance? Known schemes are determined
+# by the known_stores option below.
+# Default: 'file'
+# "default_store" option has been moved to [glance_store] section in 
+# Juno release
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# Existing but disabled stores:
+#      glance.store.rbd.Store,
+#      glance.store.s3.Store,
+#      glance.store.swift.Store,
+#      glance.store.sheepdog.Store,
+#      glance.store.cinder.Store,
+#      glance.store.gridfs.Store,
+#      glance.store.vmware_datastore.Store,
+#known_stores = glance.store.filesystem.Store,
+#               glance.store.http.Store
+
+
+# Maximum image size (in bytes) that may be uploaded through the
+# Glance API server. Defaults to 1 TB.
+# WARNING: this value should only be increased after careful consideration
+# and must be set to a value under 8 EB (9223372036854775808).
+#image_size_cap = 1099511627776
+
+# Address to bind the API server
+bind_host = {{ image_host }}
+
+# Port the bind the API server to
+bind_port = 9292
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/api.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+# data_api = glance.db.sqlalchemy.api
+
+# Number of Glance API worker processes to start.
+# On machines with more than one CPU increasing this value
+# may improve performance (especially if using SSL with
+# compression turned on). It is typically recommended to set
+# this value to the number of CPUs present on your machine.
+workers = 1
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large tokens
+# (typically those generated by the Keystone v3 API with big service
+# catalogs)
+# max_header_line = 16384
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware.
+#allow_anonymous_access = False
+
+# Allow access to version 1 of glance api
+#enable_v1_api = True
+
+# Allow access to version 2 of glance api
+#enable_v2_api = True
+
+# Return the URL that references where the data is stored on
+# the backend storage system.  For example, if using the
+# file system store a URL of 'file:///path/to/image' will
+# be returned to the user in the 'direct_url' meta-data field.
+# The default value is false.
+#show_image_direct_url = False
+
+# Send headers containing user and tenant information when making requests to
+# the v1 glance registry. This allows the registry to function as if a user is
+# authenticated without the need to authenticate a user itself using the
+# auth_token middleware.
+# The default value is false.
+#send_identity_headers = False
+
+# Supported values for the 'container_format' image attribute
+#container_formats=ami,ari,aki,bare,ovf,ova
+
+# Supported values for the 'disk_format' image attribute
+#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# Property Protections config file
+# This file contains the rules for property protections and the roles/policies
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then the glance-api
+# service will not start.
+#property_protection_file =
+
+# Specify whether 'roles' or 'policies' are used in the
+# property_protection_file.
+# The default value for property_protection_rule_format is 'roles'.
+#property_protection_rule_format = roles
+
+# Specifies how long (in hours) a task is supposed to live in the tasks DB
+# after succeeding or failing before getting soft-deleted.
+# The default value for task_time_to_live is 48 hours.
+# task_time_to_live = 48
+
+# This value sets what strategy will be used to determine the image location
+# order. Currently two strategies are packaged with Glance 'location_order'
+# and 'store_type'.
+#location_strategy = location_order
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL0
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting API server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ============ Registry Options ===============================
+
+# Address to find the registry server
+registry_host = {{ internal_ip }}
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# What protocol to use when connecting to the registry server?
+# Set to https for secure HTTP communication
+registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
+#registry_client_key_file = /path/to/key/file
+
+# The path to the cert file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
+#registry_client_cert_file = /path/to/cert/file
+
+# The path to the certifying authority cert file to use in SSL connections
+# to the registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
+#registry_client_ca_file = /path/to/ca/file
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's equivalent of
+# specifying --insecure on the command line using glanceclient for the API
+# Default: False
+#registry_client_insecure = False
+
+# The period of time, in seconds, that the API server will wait for a registry
+# request to complete. A value of '0' implies no timeout.
+# Default: 600
+#registry_client_timeout = 600
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# Pass the user's token through for API requests to the registry.
+# Default: True
+#use_user_token = True
+
+# If 'use_user_token' is not in effect then admin credentials
+# can be specified. Requests to the registry on behalf of
+# the API will use these credentials.
+# Admin user name
+#admin_user = None
+# Admin password
+#admin_password = None
+# Admin tenant name
+#admin_tenant_name = None
+# Keystone endpoint
+#auth_url = None
+# Keystone region
+#auth_region = None
+# Auth strategy
+#auth_strategy = keystone
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when images are create, updated or deleted.
+# There are three methods of sending notifications, logging (via the
+# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
+# message queue), or noop (no notifications sent, the default)
+# NOTE: THIS CONFIGURATION OPTION HAS BEEN DEPRECATED IN FAVOR OF `notification_driver`
+# notifier_strategy = default
+
+# Driver or drivers to handle sending notifications
+# notification_driver = noop
+
+# Default publisher_id for outgoing notifications.
+# default_publisher_id = image.localhost
+
+# Configuration options if sending notifications via rabbitmq (these are
+# the defaults)
+rabbit_host = localhost
+rabbit_port = 5672
+rabbit_use_ssl = false
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+qpid_notification_exchange = glance
+qpid_notification_topic = notifications
+qpid_hostname = localhost
+qpid_port = 5672
+qpid_username =
+qpid_password =
+qpid_sasl_mechanisms =
+qpid_reconnect_timeout = 0
+qpid_reconnect_limit = 0
+qpid_reconnect_interval_min = 0
+qpid_reconnect_interval_max = 0
+qpid_reconnect_interval = 0
+qpid_heartbeat = 5
+# Set to 'ssl' to enable SSL
+qpid_protocol = tcp
+qpid_tcp_nodelay = True
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+# this option has been moved to [glance_store] for Juno release
+# filesystem_store_datadir = /var/lib/glance/images/
+
+# A list of directories where image data can be stored.
+# This option may be specified multiple times for specifying multiple store
+# directories. Either one of filesystem_store_datadirs or
+# filesystem_store_datadir option is required. A priority number may be given
+# after each directory entry, separated by a ":".
+# When adding an image, the highest priority directory will be selected, unless
+# there is not enough space available in cases where the image size is already
+# known. If no priority is given, it is assumed to be zero and the directory
+# will be considered for selection last. If multiple directories have the same
+# priority, then the one with the most free space available is selected.
+# If same store is specified multiple times then BadStoreConfiguration
+# exception will be raised.
+#filesystem_store_datadirs = /var/lib/glance/images/:1
+
+# A path to a JSON file that contains metadata describing the storage
+# system.  When show_multiple_locations is True the information in this
+# file will be returned with any location that is contained in this
+# store.
+#filesystem_store_metadata_file = None
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# If set to True enables multi-tenant storage mode which causes Glance images
+# to be stored in tenant specific Swift accounts.
+#swift_store_multi_tenant = False
+
+# A list of swift ACL strings that will be applied as both read and
+# write ACLs to the containers created by Glance in multi-tenant
+# mode. This grants the specified tenants/users read and write access
+# to all newly created image objects. The standard swift ACL string
+# formats are allowed, including:
+# <tenant_id>:<username>
+# <tenant_name>:<username>
+# *:<username>
+# Multiple ACLs can be combined using a comma separated list, for
+# example: swift_store_admin_tenants = service:glance,*:admin
+#swift_store_admin_tenants =
+
+# The region of the swift endpoint to be used for single tenant. This setting
+# is only necessary if the tenant has multiple swift endpoints.
+#swift_store_region =
+
+# If set to False, disables SSL layer compression of https swift requests.
+# Setting to 'False' may improve performance for images which are already
+# in a compressed format, eg qcow2. If set to True, enables SSL layer
+# compression (provided it is supported by the target swift proxy).
+#swift_store_ssl_compression = True
+
+# The number of times a Swift download will be retried before the
+# request fails
+#swift_store_retry_get_count = 0
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+#s3_store_object_buffer_dir = /path/to/dir
+
+# When forming a bucket url, boto will either set the bucket name as the
+# subdomain or as the first token of the path. Amazon's S3 service will
+# accept it as the subdomain, but Swift's S3 middleware requires it be
+# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
+#s3_store_bucket_url_format = subdomain
+
+# ============ RBD Store Options =============================
+
+# Ceph configuration file path
+# If using cephx authentication, this file should
+# include a reference to the right keyring
+# in a client.<USER> section
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# RADOS user to authenticate as (only applicable if using cephx)
+# If <None>, a default will be chosen based on the client. section
+# in rbd_store_ceph_conf
+#rbd_store_user = <None>
+
+# RADOS pool in which images are stored
+#rbd_store_pool = images
+
+# RADOS images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+#rbd_store_chunk_size = 8
+
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+sheepdog_store_chunk_size = 64
+
+# ============ Cinder Store Options ===============================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ VMware Datastore Store Options =====================
+
+# ESX/ESXi or vCenter Server target system.
+# The server value can be an IP address or a DNS name
+# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
+#vmware_server_host = <None>
+
+# Server username (string value)
+#vmware_server_username = <None>
+
+# Server password (string value)
+#vmware_server_password = <None>
+
+# Inventory path to a datacenter (string value)
+# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
+# should be `ha-datacenter`.
+#vmware_datacenter_path = <None>
+
+# Datastore associated with the datacenter (string value)
+#vmware_datastore_name = <None>
+
+# The number of times we retry on failures
+# e.g., socket error, etc (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks
+# invoked on VMware ESX/VC server in seconds (integer value)
+#vmware_task_poll_interval = 5
+
+# Absolute path of the folder containing the images in the datastore
+# (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# Allow to perform insecure SSL requests to the target system (boolean value)
+#vmware_api_insecure = False
+
+# ============ Delayed Delete Options =============================
+
+# Turn on/off delayed delete
+delayed_delete = False
+
+# Delayed delete time in seconds
+scrub_time = 43200
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-scrubber.conf
+scrubber_datadir = /var/lib/glance/scrubber
+
+# =============== Quota Options ==================================
+
+# The maximum number of image members allowed per image
+#image_member_quota = 128
+
+# The maximum number of image properties allowed per image
+#image_property_quota = 128
+
+# The maximum number of tags allowed per image
+#image_tag_quota = 128
+
+# The maximum number of locations allowed per image
+#image_location_quota = 10
+
+# Set a system wide quota for every user.  This value is the total number
+# of bytes that a user can use across all storage systems.  A value of
+# 0 means unlimited.
+#user_storage_quota = 0
+
+# =============== Image Cache Options =============================
+
+# Base directory that the Image Cache uses
+image_cache_dir = /var/lib/glance/image-cache/
+
+# =============== Manager Options =================================
+
+# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE.
+# Whether or not to enforce that all DB tables have charset utf8.
+# If your database tables do not have charset utf8 you will
+# need to convert before this option is removed. This option is
+# only relevant if your database engine is MySQL.
+#db_enforce_mysql_charset = True
+
+# =============== Glance Store ====================================
+[glance_store]
+# Moved from [DEFAULT], for Juno release
+default_store = file
+filesystem_store_datadir = /var/lib/glance/images/
+
+# =============== Database Options =================================
+
+[database]
+# The file name to use with SQLite (string value)
+sqlite_db = /var/lib/glance/glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = glance
+admin_password = {{ GLANCE_PASS }}
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-api-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor= keystone
+
+[store_type_location_strategy]
+# The scheme list to use to get store preference order. The scheme must be
+# registered by one of the stores defined by the 'known_stores' config option.
+# This option will be applied when you using 'store_type' option as image
+# location strategy defined by the 'location_strategy' config option.
+#store_type_preference =
diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-registry.conf b/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-registry.conf
new file mode 100644 (file)
index 0000000..8d731a2
--- /dev/null
@@ -0,0 +1,190 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Address to bind the registry server
+bind_host = {{ internal_ip }}
+
+# Port the bind the registry server to
+bind_port = 9191
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/registry.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package.
+#data_api = glance.db.sqlalchemy.api
+
+# Enable Registry API versions individually or simultaneously
+#enable_v1_registry = True
+#enable_v2_registry = True
+
+# Limit the api to return `param_limit_max` items in a call to a container. If
+# a larger `limit` query param is provided, it will be reduced to this value.
+api_limit_max = 1000
+
+# If a `limit` query param is not provided in an api request, it will
+# default to `limit_param_default`
+limit_param_default = 25
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL1
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting registry server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting registry server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Database Options ==========================
+
+[database]
+# The file name to use with SQLite (string value)
+sqlite_db = /var/lib/glance/glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = glance
+admin_password = {{ GLANCE_PASS }}
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-registry-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-registry-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor= keystone
diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/templates/image_upload.sh b/compass/deploy/ansible/openstack_juno/roles/glance/templates/image_upload.sh
new file mode 100644 (file)
index 0000000..9dd1fa8
--- /dev/null
@@ -0,0 +1,2 @@
+sleep 10
+glance --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ HA_VIP }}:35357/v2.0 image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < /opt/{{ build_in_image_name }} && touch glance.import.completed
diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/files/galera_chk b/compass/deploy/ansible/openstack_juno/roles/ha/files/galera_chk
new file mode 100644 (file)
index 0000000..9fd165c
--- /dev/null
@@ -0,0 +1,10 @@
+#! /bin/sh
+
+code=`mysql -uroot -e "show status" | awk '/Threads_running/{print $2}'`
+
+if [ "$code"=="1" ]
+then
+    echo "HTTP/1.1 200 OK\r\n"
+else
+    echo "HTTP/1.1 503 Service Unavailable\r\n"
+fi
diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/files/mysqlchk b/compass/deploy/ansible/openstack_juno/roles/ha/files/mysqlchk
new file mode 100644 (file)
index 0000000..2c03f19
--- /dev/null
@@ -0,0 +1,15 @@
+# default: off
+# description: An xinetd internal service which echo's characters back to
+# clients.
+# This is the tcp version.
+service mysqlchk
+{
+    disable     = no
+    flags       = REUSE
+    socket_type = stream
+    protocol    = tcp
+    user        = root
+    wait        = no
+    server      = /usr/local/bin/galera_chk
+    port        = 9200
+}    
diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/files/notify.sh b/compass/deploy/ansible/openstack_juno/roles/ha/files/notify.sh
new file mode 100644 (file)
index 0000000..5edffe8
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/bash
+python /usr/local/bin/failover.py $1
+mysql -uroot -e"flush hosts"
+service mysql restart
diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/ha/handlers/main.yml
new file mode 100644 (file)
index 0000000..a02c686
--- /dev/null
@@ -0,0 +1,9 @@
+---
+- name: restart haproxy
+  service: name=haproxy state=restarted enabled=yes
+
+- name: restart xinetd
+  service: name=xinetd state=restarted enabled=yes
+
+- name: restart keepalived
+  service: name=keepalived state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/ha/tasks/main.yml
new file mode 100644 (file)
index 0000000..a00c21a
--- /dev/null
@@ -0,0 +1,94 @@
+---
+- name: install keepalived xinet haproxy 
+  apt: name={{ item }} state=present
+  with_items:
+    - keepalived
+    - xinetd
+    - haproxy
+   
+- name: generate ha service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - keepalived
+    - xinetd
+    - haproxy
+
+- name: install pexpect
+  pip: name=pexpect state=present
+
+- name: activate ip_nonlocal_bind
+  sysctl: name=net.ipv4.ip_nonlocal_bind value=1
+          state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_intvl 
+  sysctl: name=net.ipv4.tcp_keepalive_intvl value=1
+          state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_probes
+  sysctl: name=net.ipv4.tcp_keepalive_probes value=5
+          state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_time
+  sysctl: name=net.ipv4.tcp_keepalive_time value=5
+          state=present reload=yes
+
+- name: update haproxy cfg
+  template: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg
+  notify: restart haproxy
+
+- name: set haproxy enable flag
+  lineinfile: dest=/etc/default/haproxy state=present
+              regexp="ENABLED=*"
+              line="ENABLED=1"
+  notify: restart haproxy
+
+- name: set haproxy log
+  lineinfile: dest=/etc/rsyslog.conf state=present
+              regexp="local0.* /var/log/haproxy.log"
+              line="local0.* /var/log/haproxy.log"
+
+- name: set rsyslog udp module
+  lineinfile: dest=/etc/rsyslog.conf state=present
+              regexp="^#$ModLoad imudp"
+              line="$ModLoad imudp"
+
+- name: set rsyslog udp port
+  lineinfile: dest=/etc/rsyslog.conf state=present
+              regexp="^#$UDPServerRun 514"
+              line="$UDPServerRun 514"
+
+- name: copy galera_chk file
+  copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777
+
+- name: copy notify file
+  copy: src=notify.sh dest=/usr/local/bin/notify.sh mode=0777
+  
+- name: copy notify template file
+  template: src=failover.j2 dest=/usr/local/bin/failover.py mode=0777
+
+- name: add network service
+  lineinfile: dest=/etc/services state=present
+              line="mysqlchk          9200/tcp"
+              insertafter="Local services"
+  notify: restart xinetd
+
+- name: copy mysqlchk file
+  copy: src=mysqlchk dest=/etc/xinetd.d/mysqlchk mode=0777
+  notify: restart xinetd
+
+- name: set keepalived start param
+  lineinfile: dest=/etc/default/keepalived state=present
+              regexp="^DAEMON_ARGS=*"
+              line="DAEMON_ARGS=\"-D -d -S 1\""
+
+- name: set keepalived log
+  lineinfile: dest=/etc/rsyslog.conf state=present
+              regexp="local1.* /var/log/keepalived.log"
+              line="local1.* /var/log/keepalived.log"
+
+- name: update keepalived info
+  template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf
+  notify: restart keepalived
+
+- name: restart rsyslog
+  shell: service rsyslog restart
diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/templates/failover.j2 b/compass/deploy/ansible/openstack_juno/roles/ha/templates/failover.j2
new file mode 100644 (file)
index 0000000..b03c737
--- /dev/null
@@ -0,0 +1,65 @@
+import ConfigParser, os, socket
+import logging as LOG
+import pxssh
+import sys
+import re
+
+LOG_FILE="/var/log/mysql_failover"
+try:
+    os.remove(LOG_FILE)
+except:
+    pass
+
+LOG.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE,level=LOG.DEBUG)
+ha_vip = {{ HA_VIP }} 
+LOG.info("ha_vip: %s" % ha_vip)
+
+#ha_vip = "10.1.0.50"
+galera_path = '/etc/mysql/conf.d/wsrep.cnf'
+pattern = re.compile(r"gcomm://(?P<prev_ip>.*)")
+
+def ssh_get_hostname(ip):
+    try:
+        s = pxssh.pxssh()
+        s.login("%s" % ip, "root", "root")
+        s.sendline('hostname')   # run a command
+        s.prompt()             # match the prompt
+        result = s.before.strip()      # print everything before the prompt.
+        return result.split(os.linesep)[1]
+    except pxssh.ExceptionPxssh as e:
+        LOG.error("pxssh failed on login.")
+       raise
+
+def failover(mode):
+    config = ConfigParser.ConfigParser()
+    config.optionxform = str
+    config.readfp(open(galera_path))
+    wsrep_cluster_address = config.get("mysqld", "wsrep_cluster_address")
+    wsrep_cluster_address = pattern.match(wsrep_cluster_address).groupdict()["prev_ip"]
+
+    LOG.info("old wsrep_cluster_address = %s" % wsrep_cluster_address)
+
+    if mode == "master":
+        # refresh wsrep_cluster_address to null
+        LOG.info("I'm being master, set wsrep_cluster_address to null")
+       wsrep_cluster_address = ""
+
+    elif mode == "backup":
+        # refresh wsrep_cluster_address to master int ip
+       hostname = ssh_get_hostname(ha_vip) 
+       wsrep_cluster_address = socket.gethostbyname(hostname)
+        LOG.info("I'm being slave, set wsrep_cluster_address to master internal ip")
+
+    LOG.info("new wsrep_cluster_address = %s" % wsrep_cluster_address)
+    wsrep_cluster_address  = "gcomm://%s" % wsrep_cluster_address
+    config.set("mysqld", "wsrep_cluster_address", wsrep_cluster_address)
+    with open(galera_path, 'wb') as fp:
+        #config.write(sys.stdout)
+        config.write(fp)
+   
+    os.system("service mysql restart")
+    LOG.info("failover success!!!")
+
+if __name__ == "__main__":
+    LOG.debug("call me: %s" % sys.argv)
+    failover(sys.argv[1])
diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/templates/haproxy.cfg b/compass/deploy/ansible/openstack_juno/roles/ha/templates/haproxy.cfg
new file mode 100644 (file)
index 0000000..4ed528a
--- /dev/null
@@ -0,0 +1,133 @@
+
+global
+    #chroot /var/run/haproxy
+    daemon
+    user haproxy
+    group haproxy
+    maxconn 4000
+    pidfile /var/run/haproxy/haproxy.pid
+    #log 127.0.0.1 local0
+    tune.bufsize 1000000
+    stats socket /var/run/haproxy.sock
+    stats timeout 2m
+
+defaults
+    log global
+    maxconn 8000
+    option redispatch
+    option dontlognull
+    option splice-auto
+    timeout http-request 10s
+    timeout queue 1m
+    timeout connect 10s
+    timeout client 6m
+    timeout server 6m
+    timeout check 10s
+    retries 5
+
+listen  proxy-glance_registry_cluster
+    bind {{ HA_VIP }}:9191
+    option tcpka
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9191 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen  proxy-glance_api_cluster
+    bind {{ HA_VIP }}:9292
+    option tcpka
+    option httpchk
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9292 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen  proxy-nova-novncproxy
+    bind {{ HA_VIP }}:6080
+    option tcpka
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:6080 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen  proxy-network
+    bind {{ HA_VIP }}:9696
+    option tcpka
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9696 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen  proxy-volume
+    bind {{ HA_VIP }}:8776
+    option tcpka
+    option httpchk
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen  proxy-keystone_admin_cluster
+    bind {{ HA_VIP }}:35357
+    option tcpka
+    option httpchk
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:35357 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen  proxy-keystone_public_internal_cluster
+    bind {{ HA_VIP }}:5000
+    option tcpka
+    option httpchk
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:5000 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen  proxy-nova_compute_api_cluster
+    bind {{ HA_VIP }}:8774
+    mode tcp
+    option httpchk
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8774 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen  proxy-nova_metadata_api_cluster
+    bind {{ HA_VIP }}:8775
+    option tcpka
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8775 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen  proxy-cinder_api_cluster
+    bind {{ HA_VIP }}:8776
+    mode tcp
+    option httpchk
+    option tcplog
+    balance source
+{% for host in groups['controller'] %}
+    server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen stats
+    mode http
+    bind 0.0.0.0:8888
+    stats enable
+    stats refresh 30s
+    stats uri /
+    stats realm Global\ statistics
+    stats auth admin:admin
+
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/templates/keepalived.conf b/compass/deploy/ansible/openstack_juno/roles/ha/templates/keepalived.conf
new file mode 100644 (file)
index 0000000..0b49137
--- /dev/null
@@ -0,0 +1,42 @@
+global_defs {
+
+    notification_email{
+        root@huawei.com
+    }
+
+    notification_email_from keepalived@huawei.com
+
+    smtp_server localhost
+
+    smtp_connect_timeout 30
+
+    router_id  NodeA
+
+}
+
+vrrp_instance VI_1 {
+    
+    interface {{ INTERNAL_INTERFACE }}
+    virtual_router_id 51
+    state BACKUP
+    nopreempt
+    advert_int 1
+{% for host in groups['controller'] %}
+{% if host == inventory_hostname %}
+    priority {{ 100 - loop.index0 * 5 }}
+{% endif %}
+{% endfor %}
+
+    authentication {
+        auth_type PASS
+        auth_pass 1111
+    }
+
+    virtual_ipaddress {
+        {{ HA_VIP }} dev {{ INTERNAL_INTERFACE }}
+    }
+
+    notify_master "/usr/local/bin/notify.sh master"
+    notify_backup "/usr/local/bin/notify.sh backup"
+}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml
new file mode 100644 (file)
index 0000000..9c0084e
--- /dev/null
@@ -0,0 +1,3 @@
+---
+- name: restart keystone
+  service: name=keystone state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_config.yml b/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_config.yml
new file mode 100644 (file)
index 0000000..3203b26
--- /dev/null
@@ -0,0 +1,16 @@
+---
+- name: keystone-manage db-sync
+  shell: su -s /bin/sh -c "keystone-manage db_sync"
+  register: result
+  until: result.rc == 0
+  retries: 5
+  delay: 3
+
+- name: place keystone init script under /opt/
+  template: src=keystone_init dest=/opt/keystone_init mode=0744
+
+- name: run keystone_init
+  shell: /opt/keystone_init && touch keystone_init_complete || keystone_init_failed
+  args:
+    creates: keystone_init_complete 
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_install.yml b/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_install.yml
new file mode 100644 (file)
index 0000000..7d92395
--- /dev/null
@@ -0,0 +1,27 @@
+---
+- name: install keystone packages
+  apt: name=keystone state=present force=yes
+
+- name: generate keystone service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - keystone
+
+- name: update keystone conf
+  template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
+  notify: restart keystone
+
+- name: delete sqlite database
+  shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed
+
+- name: cron job to purge expired tokens hourly
+  shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone
+
+- name: modify keystone cron rights
+  file: path=/var/spool/cron/crontabs/keystone mode=0600
+
+- name: keystone source files
+  template: src={{ item }} dest=/opt/{{ item }}
+  with_items:
+    - admin-openrc.sh
+    - demo-openrc.sh
diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/main.yml
new file mode 100644 (file)
index 0000000..2f36e91
--- /dev/null
@@ -0,0 +1,13 @@
+---
+- include: keystone_install.yml
+  tags:
+    - install
+    - keystone_install
+    - keystone
+
+- include: keystone_config.yml
+  when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+  tags:
+    - config
+    - keystone_config
+    - keystone
diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/templates/admin-openrc.sh b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/admin-openrc.sh
new file mode 100644 (file)
index 0000000..f2e0d61
--- /dev/null
@@ -0,0 +1,6 @@
+# Verify the Identity Service installation
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0
+export OS_USERNAME=ADMIN
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/templates/demo-openrc.sh b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/demo-openrc.sh
new file mode 100644 (file)
index 0000000..8bdc51b
--- /dev/null
@@ -0,0 +1,5 @@
+export OS_USERNAME=demo
+export OS_PASSWORD={{ DEMO_PASS }}
+export OS_TENANT_NAME=demo
+export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone.conf b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone.conf
new file mode 100644 (file)
index 0000000..c40f77f
--- /dev/null
@@ -0,0 +1,1317 @@
+[DEFAULT]
+
+admin_token={{ ADMIN_TOKEN }}
+
+public_bind_host= {{ identity_host }}
+
+admin_bind_host= {{ identity_host }}
+
+#compute_port=8774
+
+#admin_port=35357
+
+#public_port=5000
+
+# The base public endpoint URL for keystone that are
+# advertised to clients (NOTE: this does NOT affect how
+# keystone listens for connections) (string value).
+# Defaults to the base host URL of the request. Eg a
+# request to http://server:5000/v2.0/users will
+# default to http://server:5000. You should only need
+# to set this value if the base URL contains a path
+# (eg /prefix/v2.0) or the endpoint should be found on
+# a different server.
+#public_endpoint=http://localhost:%(public_port)s/
+
+# The base admin endpoint URL for keystone that are advertised
+# to clients (NOTE: this does NOT affect how keystone listens
+# for connections) (string value).
+# Defaults to the base host URL of the request. Eg a
+# request to http://server:35357/v2.0/users will
+# default to http://server:35357. You should only need
+# to set this value if the base URL contains a path
+# (eg /prefix/v2.0) or the endpoint should be found on
+# a different server.
+#admin_endpoint=http://localhost:%(admin_port)s/
+
+# onready allows you to send a notification when the process
+# is ready to serve For example, to have it notify using
+# systemd, one could set shell command: "onready = systemd-
+# notify --ready" or a module with notify() method: "onready =
+# keystone.common.systemd". (string value)
+#onready=<None>
+
+# enforced by optional sizelimit middleware
+# (keystone.middleware:RequestBodySizeLimiter). (integer
+# value)
+#max_request_body_size=114688
+
+# limit the sizes of user & tenant ID/names. (integer value)
+#max_param_size=64
+
+# similar to max_param_size, but provides an exception for
+# token values. (integer value)
+#max_token_size=8192
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the
+# user_tenant_membership table with explicit role grants.
+# After migration, the member_role_id will be used in the API
+# add_user_to_project. (string value)
+#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the
+# user_tenant_membership table with explicit role grants.
+# After migration, member_role_name will be ignored. (string
+# value)
+#member_role_name=_member_
+
+# The value passed as the keyword "rounds" to passlib encrypt
+# method. (integer value)
+#crypt_strength=40000
+
+# Set this to True if you want to enable TCP_KEEPALIVE on
+# server sockets i.e. sockets used by the keystone wsgi server
+# for client connections. (boolean value)
+#tcp_keepalive=false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Only applies if tcp_keepalive is True. Not supported
+# on OS X. (integer value)
+#tcp_keepidle=600
+
+# The maximum number of entities that will be returned in a
+# collection can be set with list_limit, with no limit set by
+# default. This global limit may be then overridden for a
+# specific driver, by specifying a list_limit in the
+# appropriate section (e.g. [assignment]). (integer value)
+#list_limit=<None>
+
+# Set this to false if you want to enable the ability for
+# user, group and project entities to be moved between domains
+# by updating their domain_id. Allowing such movement is not
+# recommended if the scope of a domain admin is being
+# restricted by use of an appropriate policy file (see
+# policy.v3cloudsample as an example). (boolean value)
+#domain_id_immutable=true
+
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use.  Version 1 is what was
+# originally used by impl_qpid.  Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work.  Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBIT_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBIT_PASS }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=keystone
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in keystone.notifications
+#
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in keystone.middleware.ec2_token
+#
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+# Required if EC2 server requires client certificate. (string
+# value)
+#keystone_ec2_keyfile=<None>
+
+# Client certificate key filename. Required if EC2 server
+# requires client certificate. (string value)
+#keystone_ec2_certfile=<None>
+
+# A PEM encoded certificate authority to use when verifying
+# HTTPS connections. Defaults to the system CAs. (string
+# value)
+#keystone_ec2_cafile=<None>
+
+# Disable SSL certificate verification. (boolean value)
+#keystone_ec2_insecure=false
+
+
+#
+# Options defined in keystone.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor.  Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers.  The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in keystone.openstack.common.lockutils
+#
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in keystone.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
+
+# Publish error events (boolean value)
+#publish_errors=false
+
+# Make deprecations fatal (boolean value)
+#fatal_deprecations=false
+
+# If an instance is passed with the log message, format it
+# like this (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# If an instance UUID is passed with the log message, format
+# it like this (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of logging configuration file. It does not disable
+# existing loggers, but just appends specified logging
+# configuration to any other existing logging options. Please
+# see the Python logging module documentation for details on
+# logging configuration files. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated.  Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+log_dir = /var/log/keystone
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and then will be changed in J to honor RFC5424
+# (boolean value)
+#use_syslog=false
+
+# (Optional) Use syslog rfc5424 format for logging. If
+# enabled, will add APP-NAME (RFC5424) before the MSG part of
+# the syslog message.  The old format without APP-NAME is
+# deprecated in I, and will be removed in J. (boolean value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in keystone.openstack.common.policy
+#
+
+# JSON file containing policy (string value)
+#policy_file=policy.json
+
+# Rule enforced when requested rule is not found (string
+# value)
+#policy_default_rule=default
+
+
+[assignment]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Assignment backend driver. (string value)
+#driver=<None>
+
+# Toggle for assignment caching. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# TTL (in seconds) to cache assignment data. This has no
+# effect unless global caching is enabled. (integer value)
+#cache_time=<None>
+
+# Maximum number of entities that will be returned in an
+# assignment collection. (integer value)
+#list_limit=<None>
+
+
+[auth]
+
+#
+# Options defined in keystone
+#
+
+# Default auth methods. (list value)
+#methods=external,password,token
+
+# The password auth plugin module. (string value)
+#password=keystone.auth.plugins.password.Password
+
+# The token auth plugin module. (string value)
+#token=keystone.auth.plugins.token.Token
+
+# The external (REMOTE_USER) auth plugin module. (string
+# value)
+#external=keystone.auth.plugins.external.DefaultDomain
+
+
+[cache]
+
+#
+# Options defined in keystone
+#
+
+# Prefix for building the configuration dictionary for the
+# cache region. This should not need to be changed unless
+# there is another dogpile.cache region with the same
+# configuration name. (string value)
+#config_prefix=cache.keystone
+
+# Default TTL, in seconds, for any cached item in the
+# dogpile.cache region. This applies to any cached method that
+# doesn't have an explicit cache expiration time defined for
+# it. (integer value)
+#expiration_time=600
+
+# Dogpile.cache backend module. It is recommended that
+# Memcache (dogpile.cache.memcache) or Redis
+# (dogpile.cache.redis) be used in production deployments.
+# Small workloads (single process) like devstack can use the
+# dogpile.cache.memory backend. (string value)
+#backend=keystone.common.cache.noop
+
+# Use a key-mangling function (sha1) to ensure fixed length
+# cache-keys. This is toggle-able for debugging purposes, it
+# is highly recommended to always leave this set to True.
+# (boolean value)
+#use_key_mangler=true
+
+# Arguments supplied to the backend module. Specify this
+# option once per argument to be passed to the dogpile.cache
+# backend. Example format: "<argname>:<value>". (multi valued)
+#backend_argument=
+
+# Proxy Classes to import that will affect the way the
+# dogpile.cache backend functions. See the dogpile.cache
+# documentation on changing-backend-behavior. Comma delimited
+# list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2.
+# (list value)
+#proxies=
+
+# Global toggle for all caching using the should_cache_fn
+# mechanism. (boolean value)
+#enabled=false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls) This is only really useful if you
+# need to see the specific cache-backend get/set/delete calls
+# with the keys/values.  Typically this should be left set to
+# False. (boolean value)
+#debug_cache_backend=false
+
+
+[catalog]
+
+#
+# Options defined in keystone
+#
+
+# Catalog template file name for use with the template catalog
+# backend. (string value)
+#template_file=default_catalog.templates
+
+# Keystone catalog backend driver. (string value)
+#driver=keystone.catalog.backends.sql.Catalog
+
+# Maximum number of entities that will be returned in a
+# catalog collection. (integer value)
+#list_limit=<None>
+
+
+[credential]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Credential backend driver. (string value)
+#driver=keystone.credential.backends.sql.Credential
+
+
+[database]
+
+#
+# Options defined in keystone.openstack.common.db.options
+#
+
+# The file name to use with SQLite (string value)
+#sqlite_db=keystone.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous=true
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection=<None>
+connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect=false
+
+# seconds between db connection retries (integer value)
+#db_retry_interval=1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval=true
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval=10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries=20
+
+
+[ec2]
+
+#
+# Options defined in keystone
+#
+
+# Keystone EC2Credential backend driver. (string value)
+#driver=keystone.contrib.ec2.backends.kvs.Ec2
+
+
+[endpoint_filter]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Endpoint Filter backend driver (string value)
+#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+
+# Toggle to return all active endpoints if no filter exists.
+# (boolean value)
+#return_all_endpoints_if_no_filter=true
+
+
+[federation]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Federation backend driver. (string value)
+#driver=keystone.contrib.federation.backends.sql.Federation
+
+# Value to be used when filtering assertion parameters from
+# the environment. (string value)
+#assertion_prefix=
+
+
+[identity]
+
+#
+# Options defined in keystone
+#
+
+# This references the domain to use for all Identity API v2
+# requests (which are not aware of domains). A domain with
+# this ID will be created for you by keystone-manage db_sync
+# in migration 008.  The domain referenced by this ID cannot
+# be deleted on the v3 API, to prevent accidentally breaking
+# the v2 API. There is nothing special about this domain,
+# other than the fact that it must exist to order to maintain
+# support for your v2 clients. (string value)
+#default_domain_id=default
+
+# A subset (or all) of domains can have their own identity
+# driver, each with their own partial configuration file in a
+# domain configuration directory. Only values specific to the
+# domain need to be placed in the domain specific
+# configuration file. This feature is disabled by default; set
+# to True to enable. (boolean value)
+#domain_specific_drivers_enabled=false
+
+# Path for Keystone to locate the domain specificidentity
+# configuration files if domain_specific_drivers_enabled is
+# set to true. (string value)
+#domain_config_dir=/etc/keystone/domains
+
+# Keystone Identity backend driver. (string value)
+#driver=keystone.identity.backends.sql.Identity
+
+# Maximum supported length for user passwords; decrease to
+# improve performance. (integer value)
+#max_password_length=4096
+
+# Maximum number of entities that will be returned in an
+# identity collection. (integer value)
+#list_limit=<None>
+
+
+[kvs]
+
+#
+# Options defined in keystone
+#
+
+# Extra dogpile.cache backend modules to register with the
+# dogpile.cache library. (list value)
+#backends=
+
+# Prefix for building the configuration dictionary for the KVS
+# region. This should not need to be changed unless there is
+# another dogpile.cache region with the same configuration
+# name. (string value)
+#config_prefix=keystone.kvs
+
+# Toggle to disable using a key-mangling function to ensure
+# fixed length keys. This is toggle-able for debugging
+# purposes, it is highly recommended to always leave this set
+# to True. (boolean value)
+#enable_key_mangler=true
+
+# Default lock timeout for distributed locking. (integer
+# value)
+#default_lock_timeout=5
+
+
+[ldap]
+
+#
+# Options defined in keystone
+#
+
+# URL for connecting to the LDAP server. (string value)
+#url=ldap://localhost
+
+# User BindDN to query the LDAP server. (string value)
+#user=<None>
+
+# Password for the BindDN to query the LDAP server. (string
+# value)
+#password=<None>
+
+# LDAP server suffix (string value)
+#suffix=cn=example,cn=com
+
+# If true, will add a dummy member to groups. This is required
+# if the objectclass for groups requires the "member"
+# attribute. (boolean value)
+#use_dumb_member=false
+
+# DN of the "dummy member" to use when "use_dumb_member" is
+# enabled. (string value)
+#dumb_member=cn=dumb,dc=nonexistent
+
+# allow deleting subtrees. (boolean value)
+#allow_subtree_delete=false
+
+# The LDAP scope for queries, this can be either "one"
+# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree).
+# (string value)
+#query_scope=one
+
+# Maximum results per page; a value of zero ("0") disables
+# paging. (integer value)
+#page_size=0
+
+# The LDAP dereferencing option for queries. This can be
+# either "never", "searching", "always", "finding" or
+# "default". The "default" option falls back to using default
+# dereferencing configured by your ldap.conf. (string value)
+#alias_dereferencing=default
+
+# Override the system's default referral chasing behavior for
+# queries. (boolean value)
+#chase_referrals=<None>
+
+# Search base for users. (string value)
+#user_tree_dn=<None>
+
+# LDAP search filter for users. (string value)
+#user_filter=<None>
+
+# LDAP objectClass for users. (string value)
+#user_objectclass=inetOrgPerson
+
+# LDAP attribute mapped to user id. (string value)
+#user_id_attribute=cn
+
+# LDAP attribute mapped to user name. (string value)
+#user_name_attribute=sn
+
+# LDAP attribute mapped to user email. (string value)
+#user_mail_attribute=email
+
+# LDAP attribute mapped to password. (string value)
+#user_pass_attribute=userPassword
+
+# LDAP attribute mapped to user enabled flag. (string value)
+#user_enabled_attribute=enabled
+
+# Bitmask integer to indicate the bit that the enabled value
+# is stored in if the LDAP server represents "enabled" as a
+# bit on an integer rather than a boolean. A value of "0"
+# indicates the mask is not used. If this is not set to "0"
+# the typical value is "2". This is typically used when
+# "user_enabled_attribute = userAccountControl". (integer
+# value)
+#user_enabled_mask=0
+
+# Default value to enable users. This should match an
+# appropriate int value if the LDAP server uses non-boolean
+# (bitmask) values to indicate if a user is enabled or
+# disabled. If this is not set to "True"the typical value is
+# "512". This is typically used when "user_enabled_attribute =
+# userAccountControl". (string value)
+#user_enabled_default=True
+
+# List of attributes stripped off the user on update. (list
+# value)
+#user_attribute_ignore=default_project_id,tenants
+
+# LDAP attribute mapped to default_project_id for users.
+# (string value)
+#user_default_project_id_attribute=<None>
+
+# Allow user creation in LDAP backend. (boolean value)
+#user_allow_create=true
+
+# Allow user updates in LDAP backend. (boolean value)
+#user_allow_update=true
+
+# Allow user deletion in LDAP backend. (boolean value)
+#user_allow_delete=true
+
+# If True, Keystone uses an alternative method to determine if
+# a user is enabled or not by checking if they are a member of
+# the "user_enabled_emulation_dn" group. (boolean value)
+#user_enabled_emulation=false
+
+# DN of the group entry to hold enabled users when using
+# enabled emulation. (string value)
+#user_enabled_emulation_dn=<None>
+
+# List of additional LDAP attributes used for mapping
+# Additional attribute mappings for users. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#user_additional_attribute_mapping=
+
+# Search base for projects (string value)
+#tenant_tree_dn=<None>
+
+# LDAP search filter for projects. (string value)
+#tenant_filter=<None>
+
+# LDAP objectClass for projects. (string value)
+#tenant_objectclass=groupOfNames
+
+# LDAP attribute mapped to project id. (string value)
+#tenant_id_attribute=cn
+
+# LDAP attribute mapped to project membership for user.
+# (string value)
+#tenant_member_attribute=member
+
+# LDAP attribute mapped to project name. (string value)
+#tenant_name_attribute=ou
+
+# LDAP attribute mapped to project description. (string value)
+#tenant_desc_attribute=description
+
+# LDAP attribute mapped to project enabled. (string value)
+#tenant_enabled_attribute=enabled
+
+# LDAP attribute mapped to project domain_id. (string value)
+#tenant_domain_id_attribute=businessCategory
+
+# List of attributes stripped off the project on update. (list
+# value)
+#tenant_attribute_ignore=
+
+# Allow tenant creation in LDAP backend. (boolean value)
+#tenant_allow_create=true
+
+# Allow tenant update in LDAP backend. (boolean value)
+#tenant_allow_update=true
+
+# Allow tenant deletion in LDAP backend. (boolean value)
+#tenant_allow_delete=true
+
+# If True, Keystone uses an alternative method to determine if
+# a project is enabled or not by checking if they are a member
+# of the "tenant_enabled_emulation_dn" group. (boolean value)
+#tenant_enabled_emulation=false
+
+# DN of the group entry to hold enabled projects when using
+# enabled emulation. (string value)
+#tenant_enabled_emulation_dn=<None>
+
+# Additional attribute mappings for projects. Attribute
+# mapping format is <ldap_attr>:<user_attr>, where ldap_attr
+# is the attribute in the LDAP entry and user_attr is the
+# Identity API attribute. (list value)
+#tenant_additional_attribute_mapping=
+
+# Search base for roles. (string value)
+#role_tree_dn=<None>
+
+# LDAP search filter for roles. (string value)
+#role_filter=<None>
+
+# LDAP objectClass for roles. (string value)
+#role_objectclass=organizationalRole
+
+# LDAP attribute mapped to role id. (string value)
+#role_id_attribute=cn
+
+# LDAP attribute mapped to role name. (string value)
+#role_name_attribute=ou
+
+# LDAP attribute mapped to role membership. (string value)
+#role_member_attribute=roleOccupant
+
+# List of attributes stripped off the role on update. (list
+# value)
+#role_attribute_ignore=
+
+# Allow role creation in LDAP backend. (boolean value)
+#role_allow_create=true
+
+# Allow role update in LDAP backend. (boolean value)
+#role_allow_update=true
+
+# Allow role deletion in LDAP backend. (boolean value)
+#role_allow_delete=true
+
+# Additional attribute mappings for roles. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#role_additional_attribute_mapping=
+
+# Search base for groups. (string value)
+#group_tree_dn=<None>
+
+# LDAP search filter for groups. (string value)
+#group_filter=<None>
+
+# LDAP objectClass for groups. (string value)
+#group_objectclass=groupOfNames
+
+# LDAP attribute mapped to group id. (string value)
+#group_id_attribute=cn
+
+# LDAP attribute mapped to group name. (string value)
+#group_name_attribute=ou
+
+# LDAP attribute mapped to show group membership. (string
+# value)
+#group_member_attribute=member
+
+# LDAP attribute mapped to group description. (string value)
+#group_desc_attribute=description
+
+# List of attributes stripped off the group on update. (list
+# value)
+#group_attribute_ignore=
+
+# Allow group creation in LDAP backend. (boolean value)
+#group_allow_create=true
+
+# Allow group update in LDAP backend. (boolean value)
+#group_allow_update=true
+
+# Allow group deletion in LDAP backend. (boolean value)
+#group_allow_delete=true
+
+# Additional attribute mappings for groups. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#group_additional_attribute_mapping=
+
+# CA certificate file path for communicating with LDAP
+# servers. (string value)
+#tls_cacertfile=<None>
+
+# CA certificate directory path for communicating with LDAP
+# servers. (string value)
+#tls_cacertdir=<None>
+
+# Enable TLS for communicating with LDAP servers. (boolean
+# value)
+#use_tls=false
+
+# valid options for tls_req_cert are demand, never, and allow.
+# (string value)
+#tls_req_cert=demand
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[memcache]
+
+#
+# Options defined in keystone
+#
+
+# Memcache servers in the format of "host:port" (list value)
+#servers=localhost:11211
+
+# Number of compare-and-set attempts to make when using
+# compare-and-set in the token memcache back end. (integer
+# value)
+#max_compare_and_set_retry=16
+
+
+[oauth1]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Credential backend driver. (string value)
+#driver=keystone.contrib.oauth1.backends.sql.OAuth1
+
+# Duration (in seconds) for the OAuth Request Token. (integer
+# value)
+#request_token_duration=28800
+
+# Duration (in seconds) for the OAuth Access Token. (integer
+# value)
+#access_token_duration=86400
+
+
+[os_inherit]
+
+#
+# Options defined in keystone
+#
+
+# role-assignment inheritance to projects from owning domain
+# can be optionally enabled. (boolean value)
+#enabled=false
+
+
+[paste_deploy]
+
+#
+# Options defined in keystone
+#
+
+# Name of the paste configuration file that defines the
+# available pipelines. (string value)
+#config_file=keystone-paste.ini
+
+
+[policy]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Policy backend driver. (string value)
+#driver=keystone.policy.backends.sql.Policy
+
+# Maximum number of entities that will be returned in a policy
+# collection. (integer value)
+#list_limit=<None>
+
+
+[revoke]
+
+#
+# Options defined in keystone
+#
+
+# An implementation of the backend for persisting revocation
+# events. (string value)
+#driver=keystone.contrib.revoke.backends.kvs.Revoke
+
+# This value (calculated in seconds) is added to token
+# expiration before a revocation event may be removed from the
+# backend. (integer value)
+#expiration_buffer=1800
+
+# Toggle for revocation event cacheing. This has no effect
+# unless global caching is enabled. (boolean value)
+#caching=true
+
+
+[signing]
+
+#
+# Options defined in keystone
+#
+
+# Deprecated in favor of provider in the [token] section.
+# (string value)
+#token_format=<None>
+
+# Path of the certfile for token signing. (string value)
+#certfile=/etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+#keyfile=/etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA Key for token signing. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Key Size (in bits) for token signing cert (auto generated
+# certificate). (integer value)
+#key_size=2048
+
+# Day the token signing cert is valid for (auto generated
+# certificate). (integer value)
+#valid_days=3650
+
+# Certificate Subject (auto generated certificate) for token
+# signing. (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# Options defined in keystone
+#
+
+# Toggle for SSL support on the keystone eventlet servers.
+# (boolean value)
+#enable=false
+
+# Path of the certfile for SSL. (string value)
+#certfile=/etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+#keyfile=/etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the ca cert file for SSL. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key file for SSL. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Require client certificate. (boolean value)
+#cert_required=false
+
+# SSL Key Length (in bits) (auto generated certificate).
+# (integer value)
+#key_size=1024
+
+# Days the certificate is valid for once signed (auto
+# generated certificate). (integer value)
+#valid_days=3650
+
+# SSL Certificate Subject (auto generated certificate).
+# (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+
+[stats]
+
+#
+# Options defined in keystone
+#
+
+# Keystone stats backend driver. (string value)
+#driver=keystone.contrib.stats.backends.kvs.Stats
+
+
+[token]
+
+#
+# Options defined in keystone
+#
+
+# External auth mechanisms that should add bind information to
+# token e.g. kerberos, x509. (list value)
+#bind=
+
+# Enforcement policy on tokens presented to keystone with bind
+# information. One of disabled, permissive, strict, required
+# or a specifically required bind mode e.g. kerberos or x509
+# to require binding to that authentication. (string value)
+#enforce_token_bind=permissive
+
+# Amount of time a token should remain valid (in seconds).
+# (integer value)
+#expiration=3600
+
+# Controls the token construction, validation, and revocation
+# operations. Core providers are
+# "keystone.token.providers.[pki|uuid].Provider". (string
+# value)
+provider=keystone.token.providers.uuid.Provider
+
+# Keystone Token persistence backend driver. (string value)
+driver=keystone.token.persistence.backends.sql.Token
+
+# Toggle for token system cacheing. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# Time to cache the revocation list and the revocation events
+# if revoke extension is enabled (in seconds). This has no
+# effect unless global and token caching are enabled. (integer
+# value)
+revocation_cache_time=3600
+
+# Time to cache tokens (in seconds). This has no effect unless
+# global and token caching are enabled. (integer value)
+#cache_time=<None>
+
+# Revoke token by token identifier.  Setting revoke_by_id to
+# True enables various forms of enumerating tokens, e.g. `list
+# tokens for user`.  These enumerations are processed to
+# determine the list of tokens to revoke.   Only disable if
+# you are switching to using the Revoke extension with a
+# backend other than KVS, which stores events in memory.
+# (boolean value)
+#revoke_by_id=true
+
+
+[trust]
+
+#
+# Options defined in keystone
+#
+
+# delegation and impersonation features can be optionally
+# disabled. (boolean value)
+#enabled=true
+
+# Keystone Trust backend driver. (string value)
+#driver=keystone.trust.backends.sql.Trust
+
+
+[extra_headers]
+Distribution = Ubuntu
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone_init b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone_init
new file mode 100644 (file)
index 0000000..729669b
--- /dev/null
@@ -0,0 +1,43 @@
+# create an administrative user
+
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 role-create --name=admin
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=admin --description="Admin Tenant"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --tenant=admin --role=admin
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --role=_member_ --tenant=admin
+
+# create a normal user
+
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=demo --pass={{ DEMO_PASS }} --email=DEMO_EMAIL
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=demo --description="Demo Tenant"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=demo --role=_member_ --tenant=demo
+
+# create a service tenant
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=service --description="Service Tenant"
+
+# regist keystone
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ HA_VIP }}:5000/v2.0 --internalurl=http://{{ HA_VIP }}:5000/v2.0 --adminurl=http://{{ HA_VIP }}:35357/v2.0
+
+# Create a glance user that the Image Service can use to authenticate with the Identity service
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=glance --tenant=service --role=admin
+
+#Register the Image Service with the Identity service so that other OpenStack services can locate it
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ HA_VIP }}:9292 --internalurl=http://{{ HA_VIP }}:9292 --adminurl=http://{{ HA_VIP }}:9292
+
+#Create a nova user that Compute uses to authenticate with the Identity Service
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=nova --tenant=service --role=admin
+
+# register Compute with the Identity Service so that other OpenStack services can locate it
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s
+
+# register netron user, role and service
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ HA_VIP }}:9696 --adminurl http://{{ HA_VIP }}:9696 --internalurl http://{{ HA_VIP }}:9696
diff --git a/compass/deploy/ansible/openstack_juno/roles/monitor/files/check_service.sh b/compass/deploy/ansible/openstack_juno/roles/monitor/files/check_service.sh
new file mode 100644 (file)
index 0000000..d309673
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/bash
+services=`cat /opt/service | uniq`
+for service in $services; do
+    if [ `/sbin/initctl list|awk '/stop\/waiting/{print $1}'|uniq | grep $service` ]; then
+       /sbin/start $service
+     fi
+done
diff --git a/compass/deploy/ansible/openstack_juno/roles/monitor/files/root b/compass/deploy/ansible/openstack_juno/roles/monitor/files/root
new file mode 100644 (file)
index 0000000..9c55c4f
--- /dev/null
@@ -0,0 +1 @@
+* * * * * /usr/local/bin/check_service.sh >> /var/log/check_service.log  2>&1
diff --git a/compass/deploy/ansible/openstack_juno/roles/monitor/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/monitor/tasks/main.yml
new file mode 100644 (file)
index 0000000..e5b93f3
--- /dev/null
@@ -0,0 +1,11 @@
+---
+- name: copy service check file
+  copy: src=check_service.sh dest=/usr/local/bin/check_service.sh mode=0777
+
+- name: copy cron file
+  copy: src=root dest=/var/spool/cron/crontabs/root mode=0600
+
+- name: restart cron
+  service: name=cron state=restarted
+
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/mq/tasks/main.yml
new file mode 100644 (file)
index 0000000..4ae4065
--- /dev/null
@@ -0,0 +1,5 @@
+---
+- include: rabbitmq.yml
+
+#- include: rabbitmq_cluster.yml
+#  when: HA_CLUSTER is defined
diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq.yml b/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq.yml
new file mode 100644 (file)
index 0000000..5714406
--- /dev/null
@@ -0,0 +1,45 @@
+---
+- name: create rabbitmq directory
+  file: path=/etc/rabbitmq state=directory mode=0755
+
+- name: copy rabbitmq config file
+  template: src=rabbitmq-env.conf dest=/etc/rabbitmq/rabbitmq-env.conf mode=0755
+
+- name: install rabbitmq-server
+  apt: name=rabbitmq-server state=present
+
+- name: stop rabbitmq-server
+  service: name=rabbitmq-server
+           state=stopped
+
+- name: update .erlang.cookie
+  template: src=.erlang.cookie dest=/var/lib/rabbitmq/.erlang.cookie
+        group=rabbitmq
+        owner=rabbitmq
+        mode=0400
+  when: ERLANG_TOKEN is defined
+
+- name: start and enable rabbitmq-server
+  service: name=rabbitmq-server
+           state=started
+           enabled=yes
+
+- name: generate mq service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - rabbitmq-server
+
+- name: modify rabbitmq password
+  command: rabbitmqctl change_password guest {{ RABBIT_PASS }}
+  when: "RABBIT_USER is defined and RABBIT_USER == 'guest'"
+  ignore_errors: True
+
+- name: add rabbitmq user
+  command: rabbitmqctl add_user {{ RABBIT_USER }} {{ RABBIT_PASS }}
+  when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
+  ignore_errors: True
+
+- name: set rabbitmq user permission
+  command: rabbitmqctl set_permissions -p / {{ RABBIT_USER }} ".*" ".*" ".*"
+  when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq_cluster.yml b/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq_cluster.yml
new file mode 100644 (file)
index 0000000..afd4c77
--- /dev/null
@@ -0,0 +1,27 @@
+---
+- name: stop rabbitmq app
+  command: rabbitmqctl stop_app
+  when: HA_CLUSTER[inventory_hostname] != '' 
+
+- name: rabbitmqctl reset
+  command: rabbitmqctl reset
+  when: HA_CLUSTER[inventory_hostname] != ''
+
+- name: stop  rabbitmq
+  shell:  rabbitmqctl stop
+
+- name: set detach
+  shell: rabbitmq-server -detached
+
+- name: join  cluster
+  command: rabbitmqctl join_cluster rabbit@{{ item }}
+  when: item  != inventory_hostname and HA_CLUSTER[item] == ''
+  with_items:
+    groups['controller']
+
+- name: start rabbitmq app
+  command: rabbitmqctl start_app
+
+- name: set the HA policy
+  rabbitmq_policy: name=ha-all pattern='^(?!amq\.).*' tags="ha-mode=all"
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/templates/.erlang.cookie b/compass/deploy/ansible/openstack_juno/roles/mq/templates/.erlang.cookie
new file mode 100644 (file)
index 0000000..cadcfaf
--- /dev/null
@@ -0,0 +1 @@
+{{ ERLANG_TOKEN }}
diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/templates/rabbitmq-env.conf b/compass/deploy/ansible/openstack_juno/roles/mq/templates/rabbitmq-env.conf
new file mode 100644 (file)
index 0000000..6dd7349
--- /dev/null
@@ -0,0 +1 @@
+RABBITMQ_NODE_IP_ADDRESS={{ HA_VIP }}
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-common/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-common/handlers/main.yml
new file mode 100644 (file)
index 0000000..36d779d
--- /dev/null
@@ -0,0 +1,13 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+  service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+  when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+  service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: restart neutron-dhcp-agent
+  service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+  service: name=neutron-metadata-agent state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/defaults/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/defaults/main.yml
new file mode 100644 (file)
index 0000000..825178b
--- /dev/null
@@ -0,0 +1,2 @@
+---
+neutron_ovs_bridge_mappings: ""
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/handlers/main.yml
new file mode 100644 (file)
index 0000000..36d779d
--- /dev/null
@@ -0,0 +1,13 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+  service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+  when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+  service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: restart neutron-dhcp-agent
+  service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+  service: name=neutron-metadata-agent state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/tasks/main.yml
new file mode 100644 (file)
index 0000000..93ee46f
--- /dev/null
@@ -0,0 +1,55 @@
+---
+
+- name: activate ipv4 forwarding
+  sysctl: name=net.ipv4.ip_forward value=1
+          state=present reload=yes
+
+- name: deactivate ipv4 rp filter
+  sysctl: name=net.ipv4.conf.all.rp_filter value=0
+          state=present reload=yes
+
+- name: deactivate ipv4 default rp filter
+  sysctl: name=net.ipv4.conf.default.rp_filter
+          value=0 state=present reload=yes
+
+- name: install compute-related neutron packages
+  apt: name={{ item }} state=present force=yes
+  with_items:
+    - neutron-common
+    - neutron-plugin-ml2
+    - openvswitch-datapath-dkms
+    - openvswitch-switch
+
+- name: generate neutron computer service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - neutron-plugin-openvswitch-agent
+
+- name: install neutron openvswitch agent
+  apt: name=neutron-plugin-openvswitch-agent
+       state=present force=yes
+  when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: config neutron
+  template: src=neutron-network.conf
+            dest=/etc/neutron/neutron.conf backup=yes
+  notify:
+    - restart neutron-plugin-openvswitch-agent
+
+- name: config ml2 plugin
+  template: src=ml2_conf.ini
+            dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+            backup=yes
+  notify:
+    - restart neutron-plugin-openvswitch-agent 
+
+- name: add br-int
+  openvswitch_bridge: bridge=br-int state=present
+  notify:
+    - restart neutron-plugin-openvswitch-agent
+    - restart nova-compute
+
+- include: ../../neutron-network/tasks/odl.yml
+  when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- meta: flush_handlers
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dhcp_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dhcp_agent.ini
new file mode 100644 (file)
index 0000000..19eb62e
--- /dev/null
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers.  Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dnsmasq-neutron.conf
new file mode 100644 (file)
index 0000000..7bcbd9d
--- /dev/null
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/etc/xorp/config.boot b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/etc/xorp/config.boot
new file mode 100644 (file)
index 0000000..32caf96
--- /dev/null
@@ -0,0 +1,25 @@
+interfaces {
+    restore-original-config-on-shutdown: false
+    interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+        description: "Internal pNodes interface"
+        disable: false
+        default-system-config
+    }
+}
+
+protocols {
+    igmp {
+        disable: false
+        interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+            vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+                disable: false
+                version: 3
+            }
+        }
+        traceoptions {
+            flag all {
+                disable: false
+            }
+        }
+    }
+}
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/l3_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/l3_agent.ini
new file mode 100644 (file)
index 0000000..b394c00
--- /dev/null
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured.  This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/metadata_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/metadata_agent.ini
new file mode 100644 (file)
index 0000000..6badf28
--- /dev/null
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing.  You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/ml2_conf.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/ml2_conf.ini
new file mode 100644 (file)
index 0000000..a790069
--- /dev/null
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron-network.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron-network.conf
new file mode 100644 (file)
index 0000000..93be9cb
--- /dev/null
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files.  This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog                           -> syslog
+# log_file and log_dir                 -> log_dir/log_file
+# (not log_file) and log_dir           -> log_dir/{binary_name}.log
+# use_stderr                           -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors                       -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions.  Note that this can be a colon-separated list of
+# paths.  For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# ===========  end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# ===========  end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as workers.  The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as RPC workers.  The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# ===========  end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron.conf
new file mode 100644 (file)
index 0000000..1575367
--- /dev/null
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files.  This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog                           -> syslog
+# log_file and log_dir                 -> log_dir/log_file
+# (not log_file) and log_dir           -> log_dir/{binary_name}.log
+# use_stderr                           -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors                       -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions.  Note that this can be a colon-separated list of
+# paths.  For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# ===========  end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# ===========  end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as workers.  The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as RPC workers.  The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# ===========  end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron_init.sh b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron_init.sh
new file mode 100644 (file)
index 0000000..b92e202
--- /dev/null
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/nova.conf
new file mode 100644 (file)
index 0000000..4988cb0
--- /dev/null
@@ -0,0 +1,73 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[conductor]
+manager = nova.conductor.manager.ConductorManager
+topic = conductor
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/handlers/main.yml
new file mode 100644 (file)
index 0000000..b4c1585
--- /dev/null
@@ -0,0 +1,24 @@
+---
+- name: restart nova-api
+  service: name=nova-api state=restarted enabled=yes
+
+- name: restart nova-cert
+  service: name=nova-cert state=restarted enabled=yes
+
+- name: restart nova-consoleauth
+  service: name=nova-consoleauth state=restarted enabled=yes
+
+- name: restart nova-scheduler
+  service: name=nova-scheduler state=restarted enabled=yes
+
+- name: restart nova-conductor
+  service: name=nova-conductor state=restarted enabled=yes
+
+- name: restart nova-novncproxy
+  service: name=nova-novncproxy state=restarted enabled=yes
+
+- name: remove nova-sqlite-db
+  shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
+
+- name: restart neutron-server
+  service: name=neutron-server state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/main.yml
new file mode 100644 (file)
index 0000000..9c04d74
--- /dev/null
@@ -0,0 +1,13 @@
+---
+- include: neutron_install.yml
+  tags:
+    - install
+    - neutron_install
+    - neutron 
+
+- include: neutron_config.yml
+  when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+  tags:
+    - config
+    - neutron_config
+    - neutron
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_config.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_config.yml
new file mode 100644 (file)
index 0000000..77cc29a
--- /dev/null
@@ -0,0 +1,10 @@
+---
+- name: neutron-db-manage upgrade to Juno
+  shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
+  register: result
+  until: result.rc == 0
+  retries: 5
+  delay: 3
+  notify:
+    - restart neutron-server
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_install.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_install.yml
new file mode 100644 (file)
index 0000000..6165299
--- /dev/null
@@ -0,0 +1,29 @@
+---
+- name: install controller-related neutron packages
+  apt: name={{ item }} state=present force=yes
+  with_items:
+    - neutron-server
+    - neutron-plugin-ml2
+
+- name: generate neutron controll service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - neutron-server
+    - neutron-plugin-ml2
+
+- name: get tenant id to fill neutron.conf
+  shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}'
+  register: NOVA_ADMIN_TENANT_ID
+
+- name: update neutron conf
+  template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes
+  notify:
+    - restart neutron-server
+
+- name: update ml2 plugin conf
+  template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
+  notify:
+    - restart neutron-server
+
+- meta: flush_handlers
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dhcp_agent.ini
new file mode 100644 (file)
index 0000000..19eb62e
--- /dev/null
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers.  Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dnsmasq-neutron.conf
new file mode 100644 (file)
index 0000000..7bcbd9d
--- /dev/null
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/etc/xorp/config.boot
new file mode 100644 (file)
index 0000000..32caf96
--- /dev/null
@@ -0,0 +1,25 @@
+interfaces {
+    restore-original-config-on-shutdown: false
+    interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+        description: "Internal pNodes interface"
+        disable: false
+        default-system-config
+    }
+}
+
+protocols {
+    igmp {
+        disable: false
+        interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+            vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+                disable: false
+                version: 3
+            }
+        }
+        traceoptions {
+            flag all {
+                disable: false
+            }
+        }
+    }
+}
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/l3_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/l3_agent.ini
new file mode 100644 (file)
index 0000000..b394c00
--- /dev/null
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured.  This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/metadata_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/metadata_agent.ini
new file mode 100644 (file)
index 0000000..6badf28
--- /dev/null
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing.  You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/ml2_conf.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/ml2_conf.ini
new file mode 100644 (file)
index 0000000..a790069
--- /dev/null
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron-network.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron-network.conf
new file mode 100644 (file)
index 0000000..93be9cb
--- /dev/null
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files.  This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog                           -> syslog
+# log_file and log_dir                 -> log_dir/log_file
+# (not log_file) and log_dir           -> log_dir/{binary_name}.log
+# use_stderr                           -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors                       -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions.  Note that this can be a colon-separated list of
+# paths.  For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# ===========  end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# ===========  end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as workers.  The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as RPC workers.  The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# ===========  end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron.conf
new file mode 100644 (file)
index 0000000..2a66e94
--- /dev/null
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files.  This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog                           -> syslog
+# log_file and log_dir                 -> log_dir/log_file
+# (not log_file) and log_dir           -> log_dir/{binary_name}.log
+# use_stderr                           -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors                       -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions.  Note that this can be a colon-separated list of
+# paths.  For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# ===========  end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# ===========  end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as workers.  The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as RPC workers.  The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# ===========  end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron_init.sh b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron_init.sh
new file mode 100644 (file)
index 0000000..b92e202
--- /dev/null
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/nova.conf
new file mode 100644 (file)
index 0000000..9587073
--- /dev/null
@@ -0,0 +1,69 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-network/handlers/main.yml
new file mode 100644 (file)
index 0000000..d6c5cc8
--- /dev/null
@@ -0,0 +1,21 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+  service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+  when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+  service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: kill dnsmasq
+  command: killall dnsmasq
+  ignore_errors: True
+
+- name: restart neutron-dhcp-agent
+  service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+  service: name=neutron-metadata-agent state=restarted enabled=yes
+
+- name: restart xorp
+  service: name=xorp state=restarted enabled=yes sleep=10
+  ignore_errors: True
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/igmp-router.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/igmp-router.yml
new file mode 100644 (file)
index 0000000..d6f38a0
--- /dev/null
@@ -0,0 +1,20 @@
+---
+- name: Install XORP to provide IGMP router functionality
+  apt: pkg=xorp
+
+- name: configure xorp
+  template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot
+  notify:
+    - restart xorp
+
+- name: set xorp defaults
+  lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes
+  notify:
+    - restart xorp
+
+- meta: flush_handlers
+
+- name: start and enable xorp service
+  service: name=xorp state=started enabled=yes
+  retries: 2
+  delay: 10
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/main.yml
new file mode 100644 (file)
index 0000000..1d4b591
--- /dev/null
@@ -0,0 +1,114 @@
+---
+- name: activate ipv4 forwarding
+  sysctl: name=net.ipv4.ip_forward value=1
+          state=present reload=yes
+
+- name: deactivate ipv4 rp filter
+  sysctl: name=net.ipv4.conf.all.rp_filter value=0
+          state=present reload=yes
+
+- name: deactivate ipv4 default rp filter
+  sysctl: name=net.ipv4.conf.default.rp_filter
+          value=0 state=present reload=yes
+
+- name: install neutron network related packages
+  apt: name={{ item }} state=present force=yes
+  with_items:
+    - neutron-plugin-ml2
+    - openvswitch-datapath-dkms
+    - openvswitch-switch
+    - neutron-l3-agent
+    - neutron-dhcp-agent
+
+- name: generate neutron service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - openvswitch-switch
+    - neutron-l3-agent
+    - neutron-dhcp-agent
+    - neutron-plugin-openvswitch-agent
+    - neutron-metadata-agent
+    - xorp
+
+- name: install neutron openvswitch agent
+  apt: name=neutron-plugin-openvswitch-agent
+       state=present force=yes
+  when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: config neutron
+  template: src=neutron-network.conf
+            dest=/etc/neutron/neutron.conf backup=yes
+  notify:
+    - restart neutron-plugin-openvswitch-agent
+    - restart neutron-l3-agent
+    - kill dnsmasq
+    - restart neutron-dhcp-agent
+    - restart neutron-metadata-agent
+
+- name: config l3 agent
+  template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
+            backup=yes
+  notify:
+    - restart neutron-l3-agent
+
+- name: config dhcp agent
+  template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
+            backup=yes
+  notify:
+    - kill dnsmasq
+    - restart neutron-dhcp-agent
+
+- name: update dnsmasq-neutron.conf
+  template: src=dnsmasq-neutron.conf
+            dest=/etc/neutron/dnsmasq-neutron.conf
+  notify:
+    - kill dnsmasq
+    - restart neutron-dhcp-agent
+
+- name: config metadata agent
+  template: src=metadata_agent.ini
+            dest=/etc/neutron/metadata_agent.ini backup=yes
+  notify:
+    - restart neutron-metadata-agent
+
+- name: config ml2 plugin
+  template: src=ml2_conf.ini
+            dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+            backup=yes
+  notify:
+    - restart neutron-plugin-openvswitch-agent
+
+- meta: flush_handlers
+
+- name: add br-int
+  openvswitch_bridge: bridge=br-int state=present
+
+- name: add br-ex
+  openvswitch_bridge: bridge=br-ex state=present 
+  when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: assign a port to br-ex for physical ext interface
+  openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }}
+                    state=present
+  when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- include: igmp-router.yml
+  when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: assert kernel support for vxlan
+  command: modinfo -F version vxlan
+  when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: assert iproute2 suppport for vxlan
+  command: ip link add type vxlan help
+  register: iproute_out
+  failed_when: iproute_out.rc == 255
+  when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- include: odl.yml
+  when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart ovs service
+  service: name=openvswitch-switch state=restarted enabled=yes
+
+- meta: flush_handlers
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/odl.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/odl.yml
new file mode 100644 (file)
index 0000000..a2b449c
--- /dev/null
@@ -0,0 +1,13 @@
+---
+- name: ovs set manager
+  command: ovs-vsctl set-manager tcp:{{ controller }}:6640
+
+- name: get ovs uuid
+  shell: ovs-vsctl get Open_vSwitch . _uuid
+  register: ovs_uuid
+
+- name: set bridge_mappings
+  command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }}
+
+- name: set local ip
+  command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:local_ip={{ internal_ip }}
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dhcp_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dhcp_agent.ini
new file mode 100644 (file)
index 0000000..19eb62e
--- /dev/null
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers.  Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dnsmasq-neutron.conf
new file mode 100644 (file)
index 0000000..7bcbd9d
--- /dev/null
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/etc/xorp/config.boot b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/etc/xorp/config.boot
new file mode 100644 (file)
index 0000000..32caf96
--- /dev/null
@@ -0,0 +1,25 @@
+interfaces {
+    restore-original-config-on-shutdown: false
+    interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+        description: "Internal pNodes interface"
+        disable: false
+        default-system-config
+    }
+}
+
+protocols {
+    igmp {
+        disable: false
+        interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+            vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+                disable: false
+                version: 3
+            }
+        }
+        traceoptions {
+            flag all {
+                disable: false
+            }
+        }
+    }
+}
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/l3_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/l3_agent.ini
new file mode 100644 (file)
index 0000000..b394c00
--- /dev/null
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured.  This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/metadata_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/metadata_agent.ini
new file mode 100644 (file)
index 0000000..6badf28
--- /dev/null
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing.  You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/ml2_conf.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/ml2_conf.ini
new file mode 100644 (file)
index 0000000..a790069
--- /dev/null
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron-network.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron-network.conf
new file mode 100644 (file)
index 0000000..93be9cb
--- /dev/null
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files.  This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog                           -> syslog
+# log_file and log_dir                 -> log_dir/log_file
+# (not log_file) and log_dir           -> log_dir/{binary_name}.log
+# use_stderr                           -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors                       -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions.  Note that this can be a colon-separated list of
+# paths.  For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# ===========  end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# ===========  end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as workers.  The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as RPC workers.  The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# ===========  end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron.conf
new file mode 100644 (file)
index 0000000..1575367
--- /dev/null
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files.  This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog                           -> syslog
+# log_file and log_dir                 -> log_dir/log_file
+# (not log_file) and log_dir           -> log_dir/{binary_name}.log
+# use_stderr                           -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors                       -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions.  Note that this can be a colon-separated list of
+# paths.  For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# ===========  end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# ===========  end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as workers.  The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as RPC workers.  The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# ===========  end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron_init.sh b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron_init.sh
new file mode 100644 (file)
index 0000000..b92e202
--- /dev/null
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/nova.conf
new file mode 100644 (file)
index 0000000..9587073
--- /dev/null
@@ -0,0 +1,69 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-compute/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/nova-compute/handlers/main.yml
new file mode 100644 (file)
index 0000000..c135003
--- /dev/null
@@ -0,0 +1,3 @@
+---
+- name: restart nova-compute
+  service: name=nova-compute state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-compute/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/nova-compute/tasks/main.yml
new file mode 100644 (file)
index 0000000..51c8dfa
--- /dev/null
@@ -0,0 +1,21 @@
+---
+- name: install nova-compute related packages
+  apt: name=nova-compute-kvm state=present force=yes
+
+- name: update nova-compute conf
+  template: src={{ item }} dest=/etc/nova/{{ item }}
+  with_items:
+    - nova.conf
+    - nova-compute.conf
+  notify:
+    - restart nova-compute
+
+- name: generate neutron controll service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - nova-compute
+
+- meta: flush_handlers
+
+- name: remove nova sqlite db
+  shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova-compute.conf b/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova-compute.conf
new file mode 100644 (file)
index 0000000..401dee7
--- /dev/null
@@ -0,0 +1,7 @@
+[DEFAULT]
+compute_driver=libvirt.LibvirtDriver
+force_raw_images = true
+[libvirt]
+virt_type=qemu
+images_type = raw
+mem_stats_period_seconds=0
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova.conf
new file mode 100644 (file)
index 0000000..4988cb0
--- /dev/null
@@ -0,0 +1,73 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[conductor]
+manager = nova.conductor.manager.ConductorManager
+topic = conductor
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/nova-controller/handlers/main.yml
new file mode 100644 (file)
index 0000000..b4c1585
--- /dev/null
@@ -0,0 +1,24 @@
+---
+- name: restart nova-api
+  service: name=nova-api state=restarted enabled=yes
+
+- name: restart nova-cert
+  service: name=nova-cert state=restarted enabled=yes
+
+- name: restart nova-consoleauth
+  service: name=nova-consoleauth state=restarted enabled=yes
+
+- name: restart nova-scheduler
+  service: name=nova-scheduler state=restarted enabled=yes
+
+- name: restart nova-conductor
+  service: name=nova-conductor state=restarted enabled=yes
+
+- name: restart nova-novncproxy
+  service: name=nova-novncproxy state=restarted enabled=yes
+
+- name: remove nova-sqlite-db
+  shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
+
+- name: restart neutron-server
+  service: name=neutron-server state=restarted enabled=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/main.yml
new file mode 100644 (file)
index 0000000..72a9f4d
--- /dev/null
@@ -0,0 +1,13 @@
+---
+- include: nova_install.yml
+  tags:
+    - install
+    - nova_install
+    - nova
+
+- include: nova_config.yml
+  when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+  tags:
+    - config
+    - nova_config
+    - nova
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_config.yml b/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_config.yml
new file mode 100644 (file)
index 0000000..62351fa
--- /dev/null
@@ -0,0 +1,16 @@
+---
+- name: nova db sync
+  command: su -s /bin/sh -c "nova-manage db sync" nova
+  register: result
+  until: result.rc == 0
+  retries: 5
+  delay: 3
+  notify:
+    - restart nova-api
+    - restart nova-cert
+    - restart nova-consoleauth
+    - restart nova-scheduler
+    - restart nova-conductor
+    - restart nova-novncproxy
+
+- meta: flush_handlers
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_install.yml b/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_install.yml
new file mode 100644 (file)
index 0000000..a1cded5
--- /dev/null
@@ -0,0 +1,35 @@
+---
+- name: install nova related packages
+  apt: name={{ item }} state=present force=yes
+  with_items:
+    - nova-api
+    - nova-cert
+    - nova-conductor
+    - nova-consoleauth
+    - nova-novncproxy
+    - nova-scheduler
+    - python-novaclient
+    - python-oslo.rootwrap
+
+- name: generate nova controll service list
+  shell: echo {{ item }} >> /opt/service
+  with_items:
+    - nova-api
+    - nova-cert
+    - nova-conductor
+    - nova-consoleauth
+    - nova-novncproxy
+    - nova-scheduler
+
+- name: update nova conf
+  template: src=nova.conf
+            dest=/etc/nova/nova.conf
+            backup=yes
+  notify:
+    - restart nova-api
+    - restart nova-cert
+    - restart nova-consoleauth
+    - restart nova-scheduler
+    - restart nova-conductor
+    - restart nova-novncproxy
+    - remove nova-sqlite-db
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dhcp_agent.ini
new file mode 100644 (file)
index 0000000..19eb62e
--- /dev/null
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers.  Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dnsmasq-neutron.conf
new file mode 100644 (file)
index 0000000..7bcbd9d
--- /dev/null
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/etc/xorp/config.boot
new file mode 100644 (file)
index 0000000..32caf96
--- /dev/null
@@ -0,0 +1,25 @@
+interfaces {
+    restore-original-config-on-shutdown: false
+    interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+        description: "Internal pNodes interface"
+        disable: false
+        default-system-config
+    }
+}
+
+protocols {
+    igmp {
+        disable: false
+        interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+            vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+                disable: false
+                version: 3
+            }
+        }
+        traceoptions {
+            flag all {
+                disable: false
+            }
+        }
+    }
+}
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/l3_agent.ini b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/l3_agent.ini
new file mode 100644 (file)
index 0000000..b394c00
--- /dev/null
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured.  This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/metadata_agent.ini b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/metadata_agent.ini
new file mode 100644 (file)
index 0000000..6badf28
--- /dev/null
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing.  You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/ml2_conf.ini b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/ml2_conf.ini
new file mode 100644 (file)
index 0000000..a790069
--- /dev/null
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron-network.conf b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron-network.conf
new file mode 100644 (file)
index 0000000..93be9cb
--- /dev/null
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files.  This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog                           -> syslog
+# log_file and log_dir                 -> log_dir/log_file
+# (not log_file) and log_dir           -> log_dir/{binary_name}.log
+# use_stderr                           -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors                       -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions.  Note that this can be a colon-separated list of
+# paths.  For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# ===========  end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# ===========  end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as workers.  The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as RPC workers.  The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# ===========  end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron.conf b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron.conf
new file mode 100644 (file)
index 0000000..1575367
--- /dev/null
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files.  This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog                           -> syslog
+# log_file and log_dir                 -> log_dir/log_file
+# (not log_file) and log_dir           -> log_dir/{binary_name}.log
+# use_stderr                           -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors                       -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions.  Note that this can be a colon-separated list of
+# paths.  For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# ===========  end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# ===========  end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as workers.  The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as RPC workers.  The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# ===========  end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron_init.sh b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron_init.sh
new file mode 100644 (file)
index 0000000..b92e202
--- /dev/null
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/nova.conf
new file mode 100644 (file)
index 0000000..c8991a3
--- /dev/null
@@ -0,0 +1,72 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+osapi_compute_listen={{ internal_ip }}
+metadata_listen={{ internal_ip }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml
new file mode 100644 (file)
index 0000000..21f4ef0
--- /dev/null
@@ -0,0 +1,14 @@
+---
+- name: copy local sources.list
+  template: src=sources.list dest=/etc/apt/sources.list backup=yes
+  when: LOCAL_REPO is defined
+
+- name: copy deb packages
+  shell: cp -rf /opt/repo/pool/main/ /var/cache/apt/archive/
+  ignore_errors: True
+
+- name: add juno cloudarchive
+  apt_repository: repo="{{ juno_cloud_archive }}" state=present
+
+- name: first update pkgs
+  apt: update_cache=yes
diff --git a/compass/deploy/ansible/openstack_juno/roles/repo/templates/sources.list b/compass/deploy/ansible/openstack_juno/roles/repo/templates/sources.list
new file mode 100644 (file)
index 0000000..8b062e7
--- /dev/null
@@ -0,0 +1 @@
+{{ LOCAL_REPO }}
diff --git a/compass/deploy/ansible/openstack_juno/single-controller.yml b/compass/deploy/ansible/openstack_juno/single-controller.yml
new file mode 100644 (file)
index 0000000..15220ca
--- /dev/null
@@ -0,0 +1,38 @@
+---
+- hosts: all
+  remote_user: root
+  sudo: true
+  roles:
+    - repo
+
+- hosts: controller
+  sudo: True
+  roles: 
+    - common
+    - database
+    - mq
+    - keystone
+    - nova-controller
+    - neutron-controller
+    - dashboard
+    - cinder-controller
+    - glance
+
+- hosts: network
+  sudo: True
+  roles:
+    - common
+    - neutron-network
+
+- hosts: storage
+  sudo: True
+  roles:
+    - common
+    - cinder-volume
+
+- hosts: compute
+  sudo: True
+  roles:
+    - common
+    - nova-compute
+    - neutron-compute
diff --git a/compass/deploy/ansible/openstack_juno/storage.yml b/compass/deploy/ansible/openstack_juno/storage.yml
new file mode 100644 (file)
index 0000000..3c0aa41
--- /dev/null
@@ -0,0 +1,8 @@
+---
+- hosts: all
+  remote_user: vagrant
+  sudo: True
+  roles:
+    - repo
+    - common
+    - cinder-volume
index 8362b9a..bb9d66c 100644 (file)
@@ -1,3 +1,4 @@
+export COMPASS_SERVER=10.1.0.12
 export COMPASS_SERVER_URL="http://10.1.0.12/api"
 export COMPASS_USER_EMAIL="admin@huawei.com"
 export COMPASS_USER_PASSWORD="admin"
diff --git a/compass/deploy/conf/cluster.conf b/compass/deploy/conf/cluster.conf
new file mode 100644 (file)
index 0000000..4f43027
--- /dev/null
@@ -0,0 +1,20 @@
+export VIRT_NUMBER=5
+export VIRT_CPUS=4
+export VIRT_MEM=16384
+export VIRT_DISK=30G
+export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
+#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
+export ADAPTER_NAME="openstack_juno"
+export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
+export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes"
+export HOSTNAMES="host1,host2,host3,host4,host5"
+export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute"
+export DEFAULT_ROLES=""
+export SWITCH_IPS="1.1.1.1"
+export SWITCH_CREDENTIAL="version=2c,community=public"
+export DEPLOYMENT_TIMEOUT="150"
+export POLL_SWITCHES_FLAG="nopoll_switches"
+export DASHBOARD_URL=""
+export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+source ${REGTEST_DIR}/base.conf
+export VIP="10.1.0.222"
index e63e514..32981aa 100644 (file)
@@ -1,6 +1,6 @@
 export VIRT_NUMBER=5
 export VIRT_CPUS=4
-export VIRT_MEM=4096
+export VIRT_MEM=16384
 export VIRT_DISK=30G
 export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
 #export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
index 18857cd..41ef209 100644 (file)
@@ -16,6 +16,12 @@ fi
 cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py
 sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py
 #source ../compass-install/ci/allinone.conf
+/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \
+    "ssh root@${COMPASS_SERVER} mkdir -p /opt/compass/bin/ansible_callbacks" vagrant
+
+/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \
+    "scp -r ${SCRIPT_DIR}/../deploy/status_callback.py root@${COMPASS_SERVER}:/opt/compass/bin/ansible_callbacks/status_callback.py" \
+    vagrant
 bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \
 --compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \
 --cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \
@@ -32,7 +38,8 @@ bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_
 --network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \
 --host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \
 --machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \
---deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}"
+--deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \
+--cluster_vip="${VIP}"
 deploy_result=$?
 tear_down_machines
 cd ../compass-install
diff --git a/compass/deploy/remote_excute.exp b/compass/deploy/remote_excute.exp
new file mode 100644 (file)
index 0000000..9dd112b
--- /dev/null
@@ -0,0 +1,23 @@
+#!/usr/bin/expect
+
+set command [lindex $argv 0]
+set passwd [lindex $argv 1]
+
+eval spawn "$command"
+set timeout 60
+
+expect {
+    -re ".*es.*o.*"
+    {
+        exp_send "yes\r"
+        exp_continue
+    }
+
+    -re ".*sword:" {
+        exp_send "$passwd\r"
+
+    }
+
+}
+
+interact
diff --git a/compass/deploy/status_callback.py b/compass/deploy/status_callback.py
new file mode 100644 (file)
index 0000000..8619132
--- /dev/null
@@ -0,0 +1,174 @@
+# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+import httplib
+import json
+import sys
+import logging
+
+def task_error(host, data):
+    logging.info("task_error: host=%s,data=%s" % (host, data))
+
+    if type(data) == dict:
+        invocation = data.pop('invocation', {})
+
+    notify_host("localhost", host, "failed")
+
+class CallbackModule(object):
+    """
+    logs playbook results, per host, in /var/log/ansible/hosts
+    """
+
+    def on_any(self, *args, **kwargs):
+        pass
+
+    def runner_on_failed(self, host, res, ignore_errors=False):
+        task_error(host, res)
+
+    def runner_on_ok(self, host, res):
+        pass
+
+    def runner_on_skipped(self, host, item=None):
+        pass
+
+    def runner_on_unreachable(self, host, res):
+        pass
+
+    def runner_on_no_hosts(self):
+        pass
+
+    def runner_on_async_poll(self, host, res, jid, clock):
+        pass
+
+    def runner_on_async_ok(self, host, res, jid):
+        pass
+
+    def runner_on_async_failed(self, host, res, jid):
+        task_error(host, res)
+
+    def playbook_on_start(self):
+        pass
+
+    def playbook_on_notify(self, host, handler):
+        pass
+
+    def playbook_on_no_hosts_matched(self):
+        pass
+
+    def playbook_on_no_hosts_remaining(self):
+        pass
+
+    def playbook_on_task_start(self, name, is_conditional):
+        pass
+
+    def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+        pass
+
+    def playbook_on_setup(self):
+        pass
+
+    def playbook_on_import_for_host(self, host, imported_file):
+        pass
+
+    def playbook_on_not_import_for_host(self, host, missing_file):
+        pass
+
+    def playbook_on_play_start(self, name):
+        pass
+
+    def playbook_on_stats(self, stats):
+        logging.info("playbook_on_stats enter")
+        hosts = sorted(stats.processed.keys())
+        host_vars = self.playbook.inventory.get_variables(hosts[0])
+        cluster_name = host_vars['cluster_name']
+        failures = False
+        unreachable = False
+
+        for host in hosts:
+            summary = stats.summarize(host)
+
+            if summary['failures'] > 0:
+                failures = True
+            if summary['unreachable'] > 0:
+                unreachable = True
+
+        if failures or unreachable:
+            for host in hosts:
+                notify_host("localhost", host, "error")
+            return
+
+        for host in hosts:
+            clusterhost_name = host + "." + cluster_name
+            notify_host("localhost", clusterhost_name, "succ")
+
+
+def raise_for_status(resp):
+    if resp.status < 200 or resp.status > 300:
+        raise RuntimeError("%s, %s, %s" % (resp.status, resp.reason, resp.read()))
+
+def auth(conn):
+    credential = {}
+    credential['email'] = "admin@huawei.com"
+    credential['password'] = "admin"
+    url = "/api/users/token"
+    headers = {"Content-type": "application/json",
+               "Accept": "*/*"}
+    conn.request("POST", url, json.dumps(credential), headers)
+    resp = conn.getresponse()
+
+    raise_for_status(resp)
+    return json.loads(resp.read())["token"]
+
+def notify_host(compass_host, host, status):
+    if status == "succ":
+        body = {"ready": True}
+        url = "/api/clusterhosts/%s/state_internal" % host
+    elif status == "error":
+        body = {"state": "ERROR"}
+        host = host.strip("host")
+        url = "/api/clusterhosts/%s/state" % host
+    else:
+        logging.error("notify_host: host %s with status %s is not supported" \
+                % (host, status))
+        return
+
+    headers = {"Content-type": "application/json",
+               "Accept": "*/*"}
+
+    conn = httplib.HTTPConnection(compass_host, 80)
+    token = auth(conn)
+    headers["X-Auth-Token"] = token
+    logging.info("host=%s,url=%s,body=%s,headers=%s" % (compass_host,url,json.dumps(body),headers))
+    conn.request("POST", url, json.dumps(body), headers)
+    resp = conn.getresponse()
+    try:
+        raise_for_status(resp)
+        logging.info("notify host status success!!! status=%s, body=%s" % (resp.status, resp.read()))
+    except Exception as e:
+        logging.error("http request failed %s" % str(e))
+        raise
+    finally:
+        conn.close()
+
+if __name__ == "__main__":
+    if len(sys.argv) != 3:
+        logging.error("params: host, status is need")
+        sys.exit(1)
+
+    host = sys.argv[1]
+    status = sys.argv[2]
+    notify_host(host, status)
index 86f03a7..31d41d2 100755 (executable)
@@ -225,7 +225,8 @@ fi
 
 ##install kmod-VirtualBox
 if ! lsmod | grep vboxdrv; then
-  if ! sudo /etc/init.d/vboxdrv setup; then
+  sudo /etc/init.d/vboxdrv setup
+  if ! lsmod | grep vboxdrv; then
     printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
     exit 1
   fi
index 5f63120..6c98ed9 100644 (file)
@@ -43,6 +43,7 @@ SUBDIRS += f_l23network
 SUBDIRS += f_resolvconf
 SUBDIRS += f_ntp
 SUBDIRS += f_odl_docker
+SUBDIRS += f_lith_odl_docker
 #SUBDIRS += f_odl
 
 # f_example is only an example of how to generate a .deb package and
@@ -64,6 +65,7 @@ all:
        @echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE)
        @echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE)
        $(MAKE) -C f_odl_docker -f Makefile all
+       $(MAKE) -C f_lith_odl_docker -f Makefile all
        @make -C docker
        @docker/runcontext $(DOCKERIMG) $(MAKE) $(MAKEFLAGS) iso
 
diff --git a/fuel/build/f_lith_odl_docker/Makefile b/fuel/build/f_lith_odl_docker/Makefile
new file mode 100755 (executable)
index 0000000..e89da94
--- /dev/null
@@ -0,0 +1,52 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+BUILDTAG := robust_stefan
+RELEASE := Lithium_rc0
+
+# Edit this to match the GENESIS / OPNFV in your environment
+export OPNFV_PUPPET := $(BUILD_BASE)/../../common/puppet-opnfv
+include ../config.mk
+
+.PHONY: all
+all:
+       @mkdir -p puppet/modules/opnfv/odl_docker/${RELEASE}
+       @rm -rf tmp
+       @mkdir -p tmp
+       @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/${RELEASE}/dockerfile tmp/.
+       @docker build -t ${BUILDTAG} tmp/dockerfile/.
+       @docker save ${BUILDTAG} > puppet/modules/opnfv/odl_docker/${RELEASE}/odl_docker_image.tar
+       @wget ${DOCKER_REPO}/${DOCKER_TAG} -O  puppet/modules/opnfv/odl_docker/${RELEASE}/docker-latest
+       @echo "OPFNV_PUPPET is: ${OPNFV_PUPPET}"
+       @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/${RELEASE}/dockerfile/container_scripts  puppet/modules/opnfv/odl_docker/${RELEASE}/.
+
+.PHONY: clean
+clean:
+       @rm -rf tmp
+       @rm -rf release
+
+.PHONY: build-clean
+build-clean:
+       @rm -rf tmp
+       @rm -rf release
+       @rm -rf puppet/modules/opnfv/odl_docker/${RELEASE}/odl_docker_image.tar
+       @rm -rf puppet/modules/opnfv/odl_docker/${RELEASE}/docker-latest
+
+.PHONY: validate-cache
+validate-cache:
+       @echo "No cache validation schema available for $(shell pwd)"
+       @echo "Continuing ..."
+
+.PHONY: release
+release:
+       # Fetch PP from OPNFV Common
+       @cp -Rvp ${OPNFV_PUPPET}/manifests/odl_docker.pp ${PUPPET_DEST}
+       @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile b/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile
new file mode 100755 (executable)
index 0000000..e3c7ee5
--- /dev/null
@@ -0,0 +1,72 @@
+####################################################################
+#
+#   Dockerfile to build a ODL (Karaf) Docker Container
+#
+#   Copyright daniel.smith@ericsson.com
+#   License: Apache GPL
+#
+####################################################################
+
+
+#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
+FROM ubuntu:12.04
+
+# Maintainer Info
+MAINTAINER Daniel Smith
+
+#Run apt-get update one start just to check for updates when building
+RUN echo "Updating APT"
+RUN apt-get update
+RUN echo "Adding wget"
+RUN apt-get install -y wget
+RUN apt-get install -y net-tools
+RUN apt-get install -y openjdk-7-jre
+RUN apt-get install -y openjdk-7-jdk
+RUN apt-get install -y openssh-server
+RUN apt-get install -y vim
+RUN apt-get install -y expect
+RUN apt-get install -y daemontools
+RUN mkdir -p /opt/odl_source
+RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
+
+
+#Now lets got and fetch the ODL distribution
+RUN echo "Fetching ODL"
+RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz
+
+RUN echo "Untarring ODL inplace"
+RUN mkdir -p /opt/odl
+RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl
+
+RUN echo "Installing DLUX and other features into ODL"
+COPY tmp/dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
+COPY tmp/dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
+RUN chmod 777 /etc/init.d/start_odl_docker.sh
+RUN chmod 777 /etc/init.d/speak.sh
+
+
+# Expose the ports
+# PORTS FOR BASE SYSTEM AND DLUX
+EXPOSE 8101
+EXPOSE 6633
+EXPOSE 1099
+EXPOSE 43506
+EXPOSE 8181
+EXPOSE 8185
+EXPOSE 9000
+EXPOSE 39378
+EXPOSE 33714
+EXPOSE 44444
+EXPOSE 6653
+
+# PORTS FOR OVSDB AND ODL CONTROL
+EXPOSE 12001
+EXPOSE 6640
+EXPOSE 8080
+EXPOSE 7800
+EXPOSE 55130
+EXPOSE 52150
+EXPOSE 36826
+
+# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
+CMD ["/etc/init.d/start_odl_docker.sh"]
diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh
new file mode 100755 (executable)
index 0000000..3e5d0b2
--- /dev/null
@@ -0,0 +1,8 @@
+#!/usr/bin/expect
+spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
+expect "root>"
+send "feature:list | grep -i odl-restconf\r"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh
new file mode 100755 (executable)
index 0000000..3ba07a8
--- /dev/null
@@ -0,0 +1,17 @@
+#!/usr/bin/expect
+# Ericsson Research Canada
+#
+# Author: Daniel Smith <daniel.smith@ericsson.com>
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+#
+#  NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#  DEPRECATED AFTER ARNO
+
+spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
+expect "root>"
+send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs  odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh
new file mode 100755 (executable)
index 0000000..1c72dda
--- /dev/null
@@ -0,0 +1,38 @@
+#!/bin/bash
+#  Ericsson Research Canada
+#
+#  Author: Daniel Smith <daniel.smith@ericsson.com>
+#
+#  Start up script for calling karaf / ODL inside a docker container.
+#
+#  This script will also call a couple expect scripts to load the feature set that we want
+
+
+#ENV
+export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+
+#MAIN
+echo "Starting up the da Sheilds..."
+/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server &
+echo "Sleeping 5 bad hack"
+sleep 10
+echo "should see stuff listening now"
+netstat -na
+echo " should see proess running for karaf"
+ps -efa
+echo " Starting the packages we want"
+/etc/init.d/speak.sh
+echo "Printout the status - if its right, you should see 8181 appear now"
+netstat -na
+ps -efa
+
+
+
+## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
+## Cheap - but effective
+while true;
+do
+       echo "Checking status of ODL:"
+       /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status
+       sleep 60
+done
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp
new file mode 100644 (file)
index 0000000..e456180
--- /dev/null
@@ -0,0 +1,81 @@
+class opnfv::odl_lith_docker
+{
+  case $::fuel_settings['role'] {
+    /controller/: {
+
+      file { '/opt':
+        ensure => 'directory',
+      }
+
+      file { '/opt/opnfv':
+        ensure => 'directory',
+        owner  => 'root',
+        group  => 'root',
+        mode   => 777,
+      }
+
+      file { '/opt/opnfv/odl':
+        ensure => 'directory',
+      }
+
+      file { '/opt/opnfv/odl/lithium':
+       ensure => 'directory',
+      }
+
+      file { '/opt/opnfv/odl/lithium/odl_docker_image.tar':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/odl_docker/Lithium_rc0/odl_docker_image.tar',
+        mode   => 750,
+      }
+
+      file { '/opt/opnfv/odl/lithium/docker-latest':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/odl_docker/Lithium_rc0/docker-latest',
+        mode   => 750,
+      }
+
+      file { '/opt/opnfv/odl/start_odl_container.sh':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh',
+        mode   => 750,
+      }
+      file { '/opt/opnfv/odl/stage_odl.sh':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/scripts/stage_odl.sh',
+        mode   => 750,
+      }
+      file { '/opt/opnfv/odl/config_net_odl.sh':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/scripts/config_net_odl.sh',
+        mode   => 750,
+      }
+      file { '/opt/opnfv/odl/change.sh':
+        ensure => present,
+        source => '/etc/puppet/modules/opnfv/scripts/change.sh',
+        mode   => 750,
+      }
+
+
+      # fix failed to find the cgroup root issue
+      # https://github.com/docker/docker/issues/8791
+      case $::operatingsystem {
+        'ubuntu': {
+          package {'cgroup-lite':
+            ensure => present,
+          }
+
+          service {'cgroup-lite':
+            ensure  => running,
+            enable  => true,
+            require => Package['cgroup-lite'],
+          }
+        }
+        'centos': {
+          package {'docker-io':
+            ensure => latest,
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh
new file mode 100644 (file)
index 0000000..04d7b53
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+#!/usr/bin/expect
+spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client
+expect "root>"
+send "feature:list | grep -i odl-restconf\r"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh
new file mode 100644 (file)
index 0000000..a7d0e6c
--- /dev/null
@@ -0,0 +1,20 @@
+#!/usr/bin/expect
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+#  NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+
+spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client
+expect "root>"
+send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs  odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh
new file mode 100644 (file)
index 0000000..96a40ec
--- /dev/null
@@ -0,0 +1,48 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+#  NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+#  Start up script for calling karaf / ODL inside a docker container.
+#
+#  This script will also call a couple expect scripts to load the feature set that we want
+
+
+#ENV
+export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+
+#MAIN
+echo "Starting up the da Sheilds..."
+/opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/karaf server &
+echo "Sleeping 5 bad hack"
+sleep 10
+echo "should see stuff listening now"
+netstat -na
+echo " should see proess running for karaf"
+ps -efa
+echo " Starting the packages we want"
+/etc/init.d/speak.sh
+echo "Printout the status - if its right, you should see 8181 appear now"
+netstat -na
+ps -efa
+
+
+
+## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
+## Cheap - but effective
+while true;
+do
+        echo "Checking status of ODL:"
+        /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/status
+        sleep 60
+done
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh
new file mode 100644 (file)
index 0000000..f7f3d6e
--- /dev/null
@@ -0,0 +1,219 @@
+#!/bin/bash
+# script to remove bridges and reset networking for ODL
+
+
+#VARS
+MODE=0
+DNS=8.8.8.8
+
+#ENV
+source ~/openrc
+
+# GET IPS for that node
+function get_ips {
+       BR_MGMT=`grep address /etc/network/ifcfg_backup/ifcfg-br-mgmt | awk -F" " '{print $2}'`
+       BR_STORAGE=`grep address /etc/network/ifcfg_backup/ifcfg-br-storage | awk -F" " '{print $2}'`
+       BR_FW_ADMIN=`grep address /etc/network/ifcfg_backup/ifcfg-br-fw-admin | awk -F" " '{print $2}'`
+       BR_EX=`grep address /etc/network/ifcfg_backup/ifcfg-br-ex | awk -F" " '{print $2}'`
+       DEF_NETMASK=255.255.255.0
+       DEF_GW=172.30.9.1
+}
+
+function backup_ifcfg {
+        echo " backing up "
+        mkdir -p /etc/network/ifcfg_backup
+        mv /etc/network/interfaces.d/ifcfg-br-ex /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-fw-admin /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-mgmt /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-storage /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-br-prv /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-eth0 /etc/network/ifcfg_backup/.
+        mv /etc/network/interfaces.d/ifcfg-eth1 /etc/network/ifcfg_backup/.
+        rm -rf /etc/network/interfaces.d/ifcfg-eth1.300
+        rm -rf /etc/network/interfaces.d/ifcfg-eth1.301
+        rm -rf /etc/network/interfaces.d/ifcfg-eth1
+        rm -rf /etc/network/interfaces.d/ifcfg-eth0
+
+}
+
+
+function create_ifcfg_br_mgmt {
+        echo "migrating br_mgmt"
+        echo "auto eth1.300" >> /etc/network/interfaces.d/ifcfg-eth1.300
+        echo "iface eth1.300 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.300
+        echo "     address $BR_MGMT" >> /etc/network/interfaces.d/ifcfg-eth1.300
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.300
+}
+
+function create_ifcfg_br_storage {
+        echo "migration br_storage"
+        echo "auto eth1.301" >> /etc/network/interfaces.d/ifcfg-eth1.301
+        echo "iface eth1.301 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.301
+        echo "     address $BR_STORAGE" >> /etc/network/interfaces.d/ifcfg-eth1.301
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.301
+}
+
+function create_ifcfg_br_fw_admin {
+        echo " migratinng br_fw_admin"
+        echo "auto eth1" >> /etc/network/interfaces.d/ifcfg-eth1
+        echo "iface eth1 inet static" >> /etc/network/interfaces.d/ifcfg-eth1
+        echo "     address $BR_FW_ADMIN" >> /etc/network/interfaces.d/ifcfg-eth1
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1
+}
+
+function create_ifcfg_eth0 {
+        echo "migratinng br-ex to eth0 - temporarily"
+        echo "auto eth0" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "iface eth0 inet static" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "     address $BR_EX" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "     netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth0
+        echo "     gateway $DEF_GW" >> /etc/network/interfaces.d/ifcfg-eth0
+}
+
+function set_mode {
+       if [ -d "/var/lib/glance/images" ]
+       then 
+               echo " controller "
+               MODE=0
+       else 
+               echo " compute "
+               MODE=1
+       fi
+}
+
+
+function stop_ovs {
+        echo "Stopping OpenVSwitch"
+        service openvswitch-switch stop
+
+}
+
+function start_ovs {
+        echo "Starting OVS"
+        service openvswitch-switch start
+        ovs-vsctl show
+}
+
+
+function clean_ovs {
+        echo "cleaning OVS DB"
+        stop_ovs
+        rm -rf /var/log/openvswitch/*
+        mkdir -p /opt/opnfv/odl/ovs_back
+        cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
+        rm -rf /etc/openvswitch/conf.db
+        echo "restarting OVS - you should see Nothing there"
+        start_ovs
+}
+
+
+
+function reboot_me {
+        reboot
+}
+
+function allow_challenge {
+       sed -i -e 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/g' /etc/ssh/sshd_config
+       service ssh restart
+}
+
+function clean_neutron {
+       subnets=( `neutron subnet-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+       networks=( `neutron net-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+       ports=( `neutron port-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+       routers=( `neutron router-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+
+       #display all elements
+       echo "SUBNETS: ${subnets[@]} "
+       echo "NETWORKS: ${networks[@]} "
+       echo "PORTS: ${ports[@]} "
+       echo "ROUTERS: ${routers[@]} "
+       
+       
+       # get port and subnet for each router
+       for i in "${routers[@]}"
+       do
+               routerport=( `neutron router-port-list $i | awk -F" " '{print $2}' | grep -v id |  sed '/^$/d' `)
+               routersnet=( `neutron router-port-list $i | awk -F" " '{print $8}' | grep -v fixed |  sed '/^$/d' | sed 's/,$//' | sed -e 's/^"//'  -e 's/"$//' `)
+       done
+
+       echo "ROUTER PORTS: ${routerport[@]} "
+       echo "ROUTER SUBNET: ${routersnet[@]} "
+       
+       #remove router subnets
+       echo "router-interface-delete"
+       for i in "${routersnet[@]}"
+       do
+               neutron router-interface-delete ${routers[0]} $i
+       done
+
+       #remove subnets
+       echo "subnet-delete"
+       for i in "${subnets[@]}"
+       do
+               neutron subnet-delete $i
+       done
+
+       #remove nets
+       echo "net-delete"
+       for i in "${networks[@]}"
+       do
+               neutron net-delete $i
+       done
+
+       #remove routers
+       echo "router-delete"
+       for i in "${routers[@]}"
+       do
+               neutron router-delete $i
+       done
+
+       #remove ports
+       echo "port-delete"
+       for i in "${ports[@]}"
+       do
+               neutron port-delete $i
+       done
+
+       #remove subnets
+       echo "subnet-delete second pass"
+       for i in "${subnets[@]}"
+       do
+               neutron subnet-delete $i
+       done
+
+}
+
+function set_dns {
+       sed -i -e 's/nameserver 10.20.0.2/nameserver $DNS/g' /etc/resolv.conf
+}
+
+
+#OUTPUT
+
+function check {
+       echo $BR_MGMT
+       echo $BR_STORAGE
+       echo $BR_FW_ADMIN
+       echo $BR_EX
+}
+
+### MAIN
+
+
+set_mode
+backup_ifcfg
+get_ips
+create_ifcfg_br_mgmt
+create_ifcfg_br_storage
+create_ifcfg_br_fw_admin
+if [ $MODE == "0" ]
+then
+        create_ifcfg_eth0
+fi
+allow_challenge
+clean_ovs
+check
+reboot_me
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh
new file mode 100755 (executable)
index 0000000..145da80
--- /dev/null
@@ -0,0 +1,192 @@
+#!/bin/bash
+#
+# Author: Daniel Smith (Ericsson)
+#
+# Script to update neutron configuration for OVSDB/ODL integratino
+#
+#  Usage - Set / pass CONTROL_HOST to your needs
+#
+### SET THIS VALUE TO MATCH YOUR SYSTEM
+CONTROL_HOST=192.168.0.2
+BR_EX_IP=172.30.9.70
+
+# ENV
+source ~/openrc
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUNCTIONS
+# Update ml2_conf.ini
+function update_ml2conf {
+        echo "Backing up and modifying ml2_conf.ini"
+        cp $ML2_CONF $ML2_CONF.bak
+        sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+        sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
+        sed -i -e 's/bridge_mappings=physnet2:br-prv/bridge_mappings=physnet1:br-ex/g' $ML2_CONF
+        echo "[ml2_odl]" >> $ML2_CONF
+        echo "password = admin" >> $ML2_CONF
+        echo "username = admin" >> $ML2_CONF
+        echo "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+}
+
+function reset_neutrondb {
+        echo "Reseting DB"
+        mysql -e "drop database if exists neutron_ml2;"
+        mysql -e "create database neutron_ml2 character set utf8;"
+        mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+        neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+}
+
+function restart_neutron {
+        echo "Restarting Neutron Server"
+        service neutron-server restart
+        echo "Should see Neutron runing now"
+        service neutron-server status
+        echo "Shouldnt be any nets, but should work (return empty)"
+        neutron net-list
+}
+
+function stop_neutron {
+        echo "Stopping Neutron / OVS components"
+        service  neutron-plugin-openvswitch-agent stop
+        if [ $MODE == "0" ]
+        then
+                service neutron-server stop
+        fi
+}
+
+function disable_agent {
+       echo "Disabling Neutron Plugin Agents from running"
+       service neutron-plugin-openvswitch-agent stop
+       echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override
+}
+
+
+
+function verify_ML2_working {
+        echo "checking that we can talk via ML2 properly"
+        curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
+        if grep "network" /tmp/check_ml2
+        then
+                echo "Success - ML2 to ODL is working"
+        else
+                echo "im sorry Jim, but its dead"
+        fi
+
+}
+
+
+function set_mode {
+        if [ -d "/var/lib/glance/images" ]
+        then
+                echo "Controller Mode"
+                MODE=0
+        else
+                echo "Compute Mode"
+                MODE=1
+        fi
+}
+
+function stop_ovs {
+        echo "Stopping OpenVSwitch"
+        service openvswitch-switch stop
+
+}
+
+function start_ovs {
+       echo "Starting OVS"
+       service openvswitch-vswitch start
+       ovs-vsctl show
+}
+
+
+function control_setup {
+        echo "Modifying Controller"
+        stop_neutron
+        stop_ovs
+       disable_agent
+        rm -rf /var/log/openvswitch/*
+        mkdir -p /opt/opnfv/odl/ovs_back
+        mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
+        mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
+       rm -rf /etc/openvswitch/conf.db
+       rm -rf /etc/openvswitch/.conf*
+        service openvswitch-switch start
+        ovs-vsctl add-br br-ex
+        ovs-vsctl add-port br-ex eth0
+        ovs-vsctl set interface br-ex type=external
+        ifconfig br-ex 172.30.9.70/24 up
+        service neutron-server restart
+
+        echo "setting up networks"
+        ip link add link eth1 name br-mgmt type vlan id 300
+       ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
+        ip link add link eth1 name br-storage type vlan id 301
+       ip link add link eth1 name br-prv type vlan id 1000
+       ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
+       ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
+
+       echo "Setting ODL Manager IP"
+        ovs-vsctl set-manager tcp:192.168.0.2:6640
+
+        echo "Verifying ODL ML2 plugin is working"
+        verify_ML2_working
+
+       # BAD HACK - Should be parameterized - this is to catch up 
+       route add default gw 172.30.9.1
+
+}
+
+function clean_ovs {
+       echo "cleaning OVS DB"
+       stop_ovs
+       rm -rf /var/log/openvswitch/*
+       mkdir -p /opt/opnfv/odl/ovs_back
+       cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
+       rm -rf /etc/openvswitch/conf.db
+       echo "restarting OVS - you should see Nothing there"
+       start_ovs
+}
+
+function compute_setup {
+        echo "Modifying Compute"
+        echo "Disabling neutron openvswitch plugin"
+        stop_neutron
+       disable_agent
+        ip link add link eth1 name br-mgmt type vlan id 300
+        ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
+        ip link add link eth1 name br-storage type vlan id 301
+       ip link add link eth1 name br-prv type vlan id 1000
+        ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
+        ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
+
+        echo "set manager, and route for ODL controller"
+        ovs-vsctl set-manager tcp:192.168.0.2:6640
+        route add 172.17.0.1 gw 192.168.0.2
+        verify_ML2_working
+}
+
+
+# MAIN
+echo "Starting to make call"
+update_ml2conf
+echo "Check Mode"
+set_mode
+
+if [ $MODE == "0" ];
+then
+        echo "Calling control setup"
+        control_setup
+elif [ $MODE == "1" ];
+then
+        echo "Calling compute setup"
+        compute_setup
+
+else
+        echo "Something is bad - call for help"
+        exit
+fi
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh
new file mode 100755 (executable)
index 0000000..fa14b47
--- /dev/null
@@ -0,0 +1,54 @@
+#!/bin/bash
+#   Author: Daniel Smith (Ericsson)
+#   Stages ODL Controlleer
+#   Inputs:  odl_docker_image.tar
+#   Usage:  ./stage_odl.sh
+
+# ENVS
+source ~/.bashrc
+source ~/openrc
+
+LOCALPATH=/opt/opnfv/odl
+DOCKERBIN=docker-latest
+ODLIMGNAME=odl_docker_image.tar
+DNS=8.8.8.8
+HOST_IP=`ifconfig br-ex | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
+
+
+
+# DEBUG ECHOS
+echo $LOCALPATH
+echo $DOCKERBIN
+echo $ODLIMGNAME
+echo $DNS
+echo $HOST_IP
+
+
+# Set DNS to someting external and default GW - ODL requires a connection to the internet
+sed -i -e 's/nameserver 10.20.0.2/nameserver 8.8.8.8/g' /etc/resolv.conf
+route delete default gw 10.20.0.2
+route add default gw 172.30.9.1
+
+# Start Docker daemon and in background
+echo "Starting Docker"
+chmod +x $LOCALPATH/$DOCKERBIN
+$LOCALPATH/$DOCKERBIN -d &
+#courtesy sleep for virtual env
+sleep 2
+
+# Import the ODL Container
+echo "Importing ODL Container"
+$LOCALPATH/$DOCKERBIN load -i $LOCALPATH/$ODLIMGNAME
+
+# Start ODL, load DLUX and OVSDB modules
+echo "Removing any old install found - file not found is ok here"
+$LOCALPATH/$DOCKERBIN rm odl_docker
+echo "Starting up ODL controller in Daemon mode - no shell possible"
+$LOCALPATH/$DOCKERBIN  run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
+
+# Following, you should see the docker ps listed and a port opened
+echo " you should reach ODL controller at http://HOST_IP:8181/dlux/index.html"
+$LOCALPATH/$DOCKERBINNAME ps -a
+netstat -lnt
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh
new file mode 100755 (executable)
index 0000000..7b91f4f
--- /dev/null
@@ -0,0 +1,95 @@
+#!/bin/bash
+#  Ericsson Canada Inc.
+#  Authoer: Daniel Smith
+#
+#   A helper script to install and setup the ODL docker container on the controller
+#
+#
+#   Inputs:  odl_docker_image.tar
+#
+#   Usage:  ./start_odl_docker.sh
+echo "DEPRECATED - USE stage_odl.sh instead  - this will be removed shortly once automated deployment is working - SR1"
+
+
+# ENVS
+source ~/.bashrc
+source ~/openrc
+
+# VARS
+
+# Switch for Dev mode - uses apt-get on control to cheat and get docker installed locally rather than from puppet source
+
+DEV=1
+
+# Switch for 1:1 port mapping of EXPOSED ports in Docker to the host, if set to 0, then random ports will be used - NOTE: this doesnt work for all web services X port on Host --> Y port in Container,
+# especially for SSL/HTTPS cases. Be aware.
+
+MATCH_PORT=1
+
+LOCALPATH=/opt/opnfv/odl
+DOCKERBINNAME=docker-latest
+DOCKERIMAGENAME=odl_docker_image.tar
+DNS=8.8.8.8
+HOST_IP=`ifconfig br-fw-admin  | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
+
+
+# Set this to "1" if you want to have your docker container startup into a shell
+
+
+ENABLE_SHELL=1
+
+
+echo " Fetching Docker "
+if [ "$DEV" -eq "1" ];
+# If testing Locally (on a control node) you can set DEV=1 to enable apt-get based install on the control node (not desired target, but good for testing).
+then
+        echo "Dev Mode - Fetching from Internet";
+        echo " this wont work in production builds";
+        apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+        mkdir -p $LOCALPATH
+        wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O $LOCALPATH/$DOCKERBINNAME
+        wget http://ftp.us.debian.org/debian/pool/main/d/docker.io/docker.io_1.3.3~dfsg1-2_amd64.deb
+        chmod 777 $LOCALPATH/$DOCKERBINNAME
+        echo "done ";
+else
+        echo "Using Binaries delivered from Puppet"
+       echo "Starting Docker in Daemon mode"
+       chmod +x $LOCALPATH/$DOCKERBINNAME
+       $LOCALPATH/$DOCKERBINNAME -d &
+
+  # wait until docker will be fully initialized
+  # before any further action against just started docker
+  sleep 5
+fi
+
+
+# We need to perform some cleanup of the Openstack Environment
+echo "TODO -- This should be automated in the Fuel deployment at some point"
+echo "However, the timing should come after basic tests are running, since this "
+echo " part will remove the subnet router association that is deployed automativally"
+echo " via fuel. Refer to the ODL + Openstack Integration Page "
+
+# Import the ODL container into docker
+
+echo "Importing ODL container into docker"
+$LOCALPATH/$DOCKERBINNAME load -i $LOCALPATH/$DOCKERIMAGENAME
+
+echo " starting up ODL - DLUX and Mapping Ports"
+if [ "$MATCH_PORT" -eq "1" ]
+then
+        echo "Starting up Docker..."
+        $LOCALPATH/$DOCKERBINNAME rm odl_docker
+fi
+
+if [ "$ENABLE_SHELL" -eq "1" ];
+then
+        echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)"
+        $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel  /bin/bash
+else
+        echo "Starting Container in Daemon mode - no shell will be provided and docker attach will not provide shell)"
+        $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
+        echo "should see the process listed here in docker ps -a"
+        $LOCALPATH/$DOCKERBINNAME ps -a;
+        echo "Match Port  enabled, you can reach the DLUX login at: "
+        echo "http://$HOST_IP:8181/dlux.index.html"
+fi
diff --git a/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh b/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh
new file mode 100644 (file)
index 0000000..d292acd
--- /dev/null
@@ -0,0 +1,164 @@
+#!/bin/bash
+#
+# Author: Daniel Smith (Ericsson)
+#
+# Script to update neutron configuration for OVSDB/ODL integratino
+#
+#  Usage - Set / pass CONTROL_HOST to your needs
+#
+CONTROL_HOST=172.30.9.70
+
+# ENV
+source ~/openrc
+
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUNCTIONS
+
+# Update ml2_conf.ini
+function update_ml2conf {
+        echo "Backing up and modifying ml2_conf.ini"
+        cp $ML2_CONF $ML2_CONF.bak
+        sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+        sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
+        cat "[ml2_odl]" >> $ML2_CONF
+        cat "password = admin" >> $ML2_CONF
+        cat "username = admin" >> $ML2_CONF
+        cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+}
+
+function reset_neutrondb {
+        echo "Reseting DB"
+        mysql -e "drop database if exists neutron_ml2;"
+        mysql -e "create database neutron_ml2 character set utf8;"
+        mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+        neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+}
+
+function restart_neutron {
+        echo "Restarting Neutron Server"
+        service neutron-server restart
+        echo "Should see Neutron runing now"
+        service neutron-server status
+        echo "Shouldnt be any nets, but should work (return empty)"
+        neutron net-list
+}
+
+function stop_neutron {
+        echo "Stopping Neutron / OVS components"
+        service  neutron-plugin-openvswitch-agent stop
+        if [ $MODE == "0" ]
+        then
+                service neutron-server stop
+        fi
+}
+
+
+
+function verify_ML2_working {
+        echo "checking that we can talk via ML2 properly"
+        curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
+        if grep "network" /tmp/check_ml2
+        then
+                echo "Success - ML2 to ODL is working"
+        else
+                echo "im sorry Jim, but its dead"
+        fi
+
+}
+
+
+function set_mode {
+        if ls -l /var/lib/glance/images
+        then
+                echo "Controller Mode"
+                MODE=0
+        else
+                echo "Compute Mode"
+                MODE=1
+        fi
+}
+
+function stop_ovs {
+        echo "Stopping OpenVSwitch"
+        service openvswitch-switch stop
+
+}
+
+function control_setup {
+        echo "Modifying Controller"
+        stop_neutron
+        stop_ovs
+        rm -rf /var/log/openvswitch/*
+        mkdir -p /opt/opnfv/odl/ovs_back
+        mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
+        mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
+        service openvswitch-switch start
+        ovs-vsctl set-manager tcp:172.30.9.70:6640
+        ovs-vsctl add-br br-eth0
+        ovs-vsctl add-br br-ex
+        ovs-vsctl add-port br-eth0 eth0
+        ovs-vsctl add-port br-eth0 br-eth0--br-ex
+        ovs-vsctl add-port br-ex br-ex--br-eth0
+        ovs-vsctl set interface br-ex--br-eth0 type=patch
+        ovs-vsctl set interface br-eth0--br-ex type=patch
+        ovs-vsctl set interface br-ex--br-eth0 options:peer=br-eth0--br-ex
+        ovs-vsctl set interface br-eth0--br-ex options:peer=br-ex--br-eth0
+        ifconfig br-ex 172.30.9.70/24 up
+        service neutron-server restart
+
+        echo "setting up networks"
+        ip link add link eth1 name br-mgmt type vlan id 300
+        ip link add link eth1 name br-storage type vlan id 301
+        /etc/init.d/networking restart
+
+
+        echo "Reset Neutron DB"
+        #reset_neutrondb
+        echo "Restarting Neutron Components"
+        #restart_neutron
+        echo "Verifying ODL ML2 plugin is working"
+        verify_ML2_working
+
+}
+
+function compute_setup {
+        echo "do compute stuff here"
+        echo "stopping neutron openvswitch plugin"
+        stop_neutron
+        ip link add link eth1 name br-mgmt type vlan id 300
+        ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24
+        ip link add link eth1 name br-storage type vlan id 301
+        ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24
+        ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-mgmt | awk -F" " '{print $2}'`/24
+        echo "set manager, and route for ODL controller"
+        ovs-vsctl set-manager tcp:192.168.0.2:6640
+        route add 172.17.0.1 gw 192.168.0.2
+        verify_ML2_working
+}
+
+
+# MAIN
+echo "Starting to make call"
+update_ml2conf
+echo "Check Mode"
+set_mode
+
+if [ $MODE == "0" ];
+then
+        echo "Calling control setup"
+        control_setup
+elif [ $MODE == "1" ];
+then
+        echo "Calling compute setup"
+        compute_setup
+
+else
+        echo "Something is bad - call for help"
+        exit
+fi
+
+
diff --git a/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh
new file mode 100644 (file)
index 0000000..3b688ae
--- /dev/null
@@ -0,0 +1,146 @@
+#!/bin/bash
+CONTROL_HOST=172.17.0.3
+
+# ENV
+source ~/openrc
+
+
+
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUCNTIONS
+
+
+# Update ml2_conf.ini
+function update_ml2conf {
+        echo "Backing up and modifying ml2_conf.ini"
+        cp $ML2_CONF $ML2_CONF.bak
+        sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+#!/bin/bash
+CONTROL_HOST=172.17.0.3
+
+# ENV
+source ~/openrc
+
+
+
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUCNTIONS
+
+
+# Update ml2_conf.ini
+function update_ml2conf {
+        echo "Backing up and modifying ml2_conf.ini"
+        cp $ML2_CONF $ML2_CONF.bak
+        sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+        sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
+        cat "[ml2_odl]" >> $ML2_CONF
+        cat "password = admin" >> $ML2_CONF
+        cat "username = admin" >> $ML2_CONF
+        cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+}
+
+function reset_neutrondb {
+        echo "Reseting DB"
+        mysql -e "drop database if exists neutron_ml2;"
+        mysql -e "create database neutron_ml2 character set utf8;"
+        mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+        neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+}
+
+function restart_neutron {
+        echo "Restarting Neutron Server"
+        service neutron-server restart
+        echo "Should see Neutron runing now"
+        service neutron-server status
+        echo "Shouldnt be any nets, but should work (return empty)"
+        neutron net-list
+}
+
+function stop_neutron {
+        echo "Stopping Neutron / OVS components"
+        service  neutron-plugin-openvswitch-agent stop
+        if [ $MODE == "0" ]
+        then
+                service neutron-server stop
+        fi
+}
+
+
+
+function verify_ML2_working {
+        echo "checking that we can talk via ML2 properly"
+        curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
+        if grep "network" /tmp/check_ml2
+        then
+                echo "Success - ML2 to ODL is working"
+        else
+                echo "im sorry Jim, but its dead"
+        fi
+
+}
+
+
+function set_mode {
+        if df -k | grep glance
+        then
+                echo "Controller Mode"
+                MODE=0
+        else
+                echo "Compute Mode"
+                MODE=1
+        fi
+}
+
+function stop_ovs {
+        echo "Stopping OpenVSwitch"
+        service openvswitch-switch stop
+
+}
+
+function control_setup {
+        echo "do control stuff here"
+        echo "Reset Neutron DB"
+        #reset_neutrondb
+        echo "Restarting Neutron Components"
+        #restart_neutron
+        echo "Verifying ODL ML2 plugin is working"
+        verify_ML2_working
+
+}
+
+function compute_setup {
+        echo "do compute stuff here"
+        stop_neutron
+        verify_ML2_working
+}
+
+
+# MAIN
+echo "Starting to make call"
+#update_ml2conf
+echo "Check Mode"
+set_mode
+
+if [ $MODE == "0" ];
+then
+        echo "Calling control setup"
+        control_setup
+elif [ $MODE == "1" ];
+then
+        echo "Calling compute setup"
+        compute_setup
+
+else
+        echo "Something is bad - call for help"
+        exit
+fi
+
+
diff --git a/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh
new file mode 100755 (executable)
index 0000000..dd4fc9f
--- /dev/null
@@ -0,0 +1,90 @@
+#!/bin/bash
+# a "cheat" way to install docker on the controller
+# can only be used if you have a connecting out to the internet
+
+# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
+
+OLDGW=$1
+#!/bin/bash
+# a "cheat" way to install docker on the controller
+# can only be used if you have a connecting out to the internet
+
+# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
+
+OLDGW=$1
+NEWGW=$2
+IMAGEPATH=/opt/opnfv
+IMAGENAME=odl_docker_image.tar
+SOURCES=/etc/apt/sources.list
+
+
+if [ "$#" -ne 2]; then
+        echo "Two args not provided, will not touch networking"
+else
+
+        # Fix routes
+        echo "Fixing routes"
+        #DEBUG
+        netstat -rn
+
+        echo "delete old def route"
+        route delete default gw $1
+        echo "adding new def route"
+        route add default gw $2
+
+        echo " you should see a good nslookup now"
+        nslookup www.google.ca
+#!/bin/bash
+# a "cheat" way to install docker on the controller
+# can only be used if you have a connecting out to the internet
+
+# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
+
+OLDGW=$1
+NEWGW=$2
+IMAGEPATH=/opt/opnfv
+IMAGENAME=odl_docker_image.tar
+SOURCES=/etc/apt/sources.list
+
+
+if [ "$#" -ne 2]; then
+        echo "Two args not provided, will not touch networking"
+else
+
+        # Fix routes
+        echo "Fixing routes"
+        #DEBUG
+        netstat -rn
+
+        echo "delete old def route"
+        route delete default gw $1
+        echo "adding new def route"
+        route add default gw $2
+
+        echo " you should see a good nslookup now"
+        nslookup www.google.ca
+fi
+
+
+if egrep "mirrors.txt" $SOURCES
+then
+        echo "Sources was already updated, not touching"
+else
+        echo "adding the closests mirrors and docker mirror to the mix"
+        echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise main restricted universe multiverse" >> /etc/apt/sources.list
+        echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-updates main restricted universe multiverse" >> /etc/apt/sources.list
+        echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-backports main restricted universe multiverse" >> /etc/apt/sources.list
+        echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-security main restricted universe multiverse" >> /etc/apt/sources.list
+        apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+        echo "deb https://get.docker.com/ubuntu docker main " > /etc/apt/sources.list.d/docker.list
+fi
+
+echo "Updating"
+apt-get update
+echo "Installing Docker"
+apt-get install -y lxc-docker
+
+echo "Loading ODL Docker Image"
+docker load -i $IMAGEPATH/$IMAGENAME
+
+
diff --git a/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh
new file mode 100644 (file)
index 0000000..42c9451
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+
+
+ok .. so they created br-int
+
+so lets add a physical nic to it
+
+
+# First - Removal all the bridges you find
+
+for i in $(ovs-vsctl list-br)
+do
+       if [ "$i" == "br-int" ];
+       then    
+               echo "skipped br-int"
+       elif [ "$i" == "br-prv"];
+       then
+               echo "skipped br-pr"
+       else
+               ovs-vsctl del-br $i
+       fi
+done
index c286127..56f0216 100644 (file)
@@ -30,7 +30,7 @@ class opnfv::odl_docker
         mode   => 750,
       }
 
-      file { '/opt/opnfv/odl/start_odl_conatiner.sh':
+      file { '/opt/opnfv/odl/start_odl_container.sh':
         ensure => present,
         source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh',
         mode   => 750,
index 347ac74..7b91f4f 100755 (executable)
@@ -2,7 +2,7 @@
 #  Ericsson Canada Inc.
 #  Authoer: Daniel Smith
 #
-#   A helper script to install and setup the ODL docker conatiner on the controller
+#   A helper script to install and setup the ODL docker container on the controller
 #
 #
 #   Inputs:  odl_docker_image.tar
@@ -86,7 +86,7 @@ then
         echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)"
         $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel  /bin/bash
 else
-        echo "Starting Conatiner in Daemon mode - no shell will be provided and docker attach will not provide shell)"
+        echo "Starting Container in Daemon mode - no shell will be provided and docker attach will not provide shell)"
         $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
         echo "should see the process listed here in docker ps -a"
         $LOCALPATH/$DOCKERBINNAME ps -a;
index 54f1c86..436f496 100644 (file)
@@ -25,4 +25,6 @@ class opnfv {
   include opnfv::add_packages
   # Setup OpenDaylight
   include opnfv::odl_docker
+  # Setup OpenDaylight
+  include opnfv::odl_lith_docker
 }
index df23249..d5b70d0 100755 (executable)
@@ -1,12 +1,8 @@
-#!/bin/bash -x
-set -o xtrace
+#!/bin/bash
 set -o errexit
-set -o nounset
-set -o pipefail
-
-WORKSPACE=$(readlink -e ..)
-ISO_LOCATION="$(readlink -f $(find $WORKSPACE -iname 'fuel*iso' -type f))"
-INTERFACE="fuel"
-
-cd "${WORKSPACE}/deploy"
-./deploy_fuel.sh "$ISO_LOCATION" $INTERFACE 2>&1 | tee deploy_fuel.log
+topdir=$(dirname $(readlink -f $BASH_SOURCE))
+deploydir=$(cd ${topdir}/../deploy; pwd)
+pushd ${deploydir} > /dev/null
+echo -e "python deploy.py $@\n"
+python deploy.py $@
+popd > /dev/null
\ No newline at end of file
index d392f8f..33baff1 100644 (file)
 
-======== How to prepare and run the OPNFV Autodeployment =======
+======== PREREQUISITES ========
 
-in fuel/build/deploy run these:
+the following applications and python modules are required to be installed:
 
+- example for Ubuntu environment:
 
+sudo apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager
+sshpass fuseiso genisoimage blackbox xterm python-pip
+sudo restart libvirt-bin
+sudo pip install pyyaml netaddr paramiko lxml scp
 
---- Step.1 Install prerequisites
 
-sudo ./install-ubuntu-packages.sh
 
+======== PREPARE and RUN the OPNFV Autodeployment ========
 
 
+--- Step.1 Prepare the DEA and DHA configuration files and the OPNFV ISO file
 
+Make sure that you are using the right DEA - Deployment Environment Adapter and
+DHA - Deployment Hardware Adapter configuration files, the ones provided are only templates
+you will have to modify them according to your needs
 
+- If wou wish to deploy OPNFV cloud environment on top of KVM/Libvirt
+  virtualization use as example the following configuration files:
 
---- Step.2-A If wou want to deploy OPNFV cloud environment on top of KVM/Libvirt virtualization
-             run the following environment setup script
+  =>   libvirt/conf/ha
+                dea.yaml
+                dha.yaml
 
-sudo python setup_environment.py <storage_directory> <path_to_dha_file>
+  =>   libvirt/conf/multinode
+                dea.yaml
+                dha.yaml
 
-Example:
-         sudo python setup_environment.py /mnt/images dha.yaml
 
+- If you wish to deploy OPNFV cloud environment on baremetal
+  use as example the following configuration files:
 
+  =>   baremetal/conf/ericsson_montreal_lab/ha
+                dea.yaml
+                dha.yaml
 
+  =>   baremetal/conf/ericsson_montreal_lab/multinode
+                dea.yaml
+                dha.yaml
 
+  =>   baremetal/conf/linux_foundation_lab/ha
+                dea.yaml
+                dha.yaml
 
+  =>   baremetal/conf/linux_foundation_lab/multinode
+                dea.yaml
+                dha.yaml
 
---- Step.2-B If you want to deploy OPNFV cloud environment on baremetal run the
-             following environment setup script
 
-sudo python setup_vfuel.py <storage_directory> <path_to_dha_file>
+--- Step.2 Run Autodeployment:
 
-Example:
-         sudo python setup_vfuel.py /mnt/images dha.yaml
+usage: python deploy.py [-h] [-nf] [-s [STORAGE_DIR]] [-b [PXE_BRIDGE]]
+                        [iso_file] dea_file dha_file
 
+positional arguments:
+  iso_file          ISO File [default: OPNFV.iso]
+  dea_file          Deployment Environment Adapter: dea.yaml
+  dha_file          Deployment Hardware Adapter: dha.yaml
 
-WARNING!:
-setup_vfuel.py adds the following snippet into /etc/network/interfaces
-making sure to replace in setup_vfuel.py interfafe 'p1p1.20' with your actual outbound
-interface in order to provide network access to the Fuel master for DNS and NTP.
+optional arguments:
+  -h, --help        show this help message and exit
+  -nf               Do not install Fuel Master (and Node VMs when using
+                    libvirt)
+  -s [STORAGE_DIR]  Storage Directory [default: images]
+  -b [PXE_BRIDGE]   Linux Bridge for booting up the Fuel Master VM [default:
+                    pxebr]
 
-iface vfuelnet inet static
-       bridge_ports em1
-       address 10.40.0.1
-       netmask 255.255.255.0
-       pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
-       pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
-       post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
-       post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
 
+* WARNING:
 
+If optional argument -s <storage_dir> is not specified, Autodeployment will use
+"<current_working_dir>/images" as default, and it will create it, if it hasn't been created before
 
+If optional argument -b <pxe_bridge> is not specified, Autodeployment will use "pxebr" as default,
+if the bridge does not exist, the application will terminate with an error message
 
+IF optional argument <iso_file> is not specified, Autodeployment will use "<current_working_dir>/OPNFV.iso"
+as default, if the iso file does not exist, the application will terminate with an error message
 
+<pxe_bridge> is not required for Autodeployment in virtual environment, even if it is specified
+it will not be used at all
 
---- Step.3 Start Autodeployment
-Make sure you use the right Deployment Environment Adapter and
-Deployment Hardware Adaper configuration files:
 
-       - for baremetal:  baremetal/dea.yaml   baremetal/dha.yaml
+* EXAMPLES:
 
-       - for libvirt:    libvirt/dea.yaml   libvirt/dha.yaml
+- Install Fuel Master and deploy OPNFV Cloud from scratch on Baremetal Environment
 
+sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/baremetal/dea.yaml ~/CONF/baremetal/dha.yaml -s /mnt/images -b pxebr
 
-sudo python deploy.py [-nf] <isofile> <deafile> <dhafile>
 
-Example:
-         sudo python deploy.py ~/ISO/opnfv.iso baremetal/dea.yaml baremetal/dha.yaml
+- Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual Environment
+
+sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/virtual/dea.yaml ~/CONF/virtual/dha.yaml -s /mnt/images
+
+
+
+- Deploy OPNFV Cloud on an already active Environment where Fuel Master VM is running
+  so no need to install Fuel again
+
+sudo python deploy.py -nf ~/CONF/baremetal/dea.yaml ~/CONF/baremetal/dha.yaml
+
+sudo python deploy.py -nf ~/CONF/virtual/dea.yaml ~/CONF/virtual/dha.yaml
 
diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml
new file mode 100644 (file)
index 0000000..dc8014d
--- /dev/null
@@ -0,0 +1,993 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Tue May  5 15:33:07 UTC 2015
+comment: Test environment Ericsson Montreal
+environment_name: opnfv
+environment_mode: ha
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 2
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 3
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 4
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 5
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 6
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+fuel:
+  ADMIN_NETWORK:
+    ipaddress: 10.40.0.2
+    netmask: 255.255.255.0
+    dhcp_pool_start: 10.40.0.3
+    dhcp_pool_end: 10.40.0.254
+  DNS_UPSTREAM: 10.118.32.193
+  DNS_DOMAIN: opnfvericsson.ca
+  DNS_SEARCH: opnfvericsson.ca
+  FUEL_ACCESS:
+    user: admin
+    password: admin
+  HOSTNAME: opnfv
+  NTP1: 10.118.34.219
+  NTP2:
+  NTP3:
+interfaces:
+  interfaces_1:
+    eth0:
+    - fuelweb_admin
+    eth2:
+    - public
+    - management
+    - storage
+    - private
+transformations:
+  transformations_1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-eth4
+    - action: add-port
+      bridge: br-eth4
+      name: eth4
+    - action: add-br
+      name: br-eth5
+    - action: add-port
+      bridge: br-eth5
+      name: eth5
+    - action: add-br
+      name: br-ex
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-storage
+      tags:
+      - 220
+      - 0
+      vlan_ids:
+      - 220
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-mgmt
+      tags:
+      - 320
+      - 0
+      vlan_ids:
+      - 320
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-ex
+      tags:
+      - 20
+      - 0
+      vlan_ids:
+      - 20
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+  transformations_2:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-eth4
+    - action: add-port
+      bridge: br-eth4
+      name: eth4
+    - action: add-br
+      name: br-eth5
+    - action: add-port
+      bridge: br-eth5
+      name: eth5
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-storage
+      tags:
+      - 220
+      - 0
+      vlan_ids:
+      - 220
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-mgmt
+      tags:
+      - 320
+      - 0
+      vlan_ids:
+      - 320
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+opnfv:
+  compute: {}
+  controller: {}
+network:
+  networking_parameters:
+    base_mac: fa:16:3e:00:00:00
+    dns_nameservers:
+    - 10.118.32.193
+    floating_ranges:
+    - - 10.118.34.226
+      - 10.118.34.230
+    gre_id_range:
+    - 2
+    - 65535
+    internal_cidr: 192.168.111.0/24
+    internal_gateway: 192.168.111.1
+    net_l23_provider: ovs
+    segmentation_type: vlan
+    vlan_range:
+    - 2022
+    - 2023
+  networks:
+  - cidr: 10.118.34.192/24
+    gateway: 10.118.34.193
+    ip_ranges:
+    - - 10.118.34.220
+      - 10.118.34.225
+    meta:
+      assign_vip: true
+      cidr: 10.118.34.192/24
+      configurable: true
+      floating_range_var: floating_ranges
+      ip_range:
+      - 10.118.34.220
+      - 10.118.34.225
+      map_priority: 1
+      name: public
+      notation: ip_ranges
+      render_addr_mask: public
+      render_type: null
+      use_gateway: true
+      vlan_start: null
+    name: public
+    vlan_start: null
+  - cidr: 192.168.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.0.2
+      - 192.168.0.254
+    meta:
+      assign_vip: true
+      cidr: 192.168.0.0/24
+      configurable: true
+      map_priority: 2
+      name: management
+      notation: cidr
+      render_addr_mask: internal
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 320
+    name: management
+    vlan_start: 320
+  - cidr: 192.168.1.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.1.2
+      - 192.168.1.254
+    meta:
+      assign_vip: false
+      cidr: 192.168.1.0/24
+      configurable: true
+      map_priority: 2
+      name: storage
+      notation: cidr
+      render_addr_mask: storage
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 220
+    name: storage
+    vlan_start: 220
+  - cidr: null
+    gateway: null
+    ip_ranges: []
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 2
+      name: private
+      neutron_vlan_range: true
+      notation: null
+      render_addr_mask: null
+      render_type: null
+      seg_type: vlan
+      use_gateway: false
+      vlan_start: null
+    name: private
+    vlan_start: null
+  - cidr: 10.40.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 10.40.0.3
+      - 10.40.0.254
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 0
+      notation: ip_ranges
+      render_addr_mask: null
+      render_type: null
+      unmovable: true
+      use_gateway: true
+    name: fuelweb_admin
+    vlan_start: null
+settings:
+  editable:
+    access:
+      email:
+        description: Email address for Administrator
+        label: email
+        type: text
+        value: admin@localhost
+        weight: 40
+      metadata:
+        label: Access
+        weight: 10
+      password:
+        description: Password for Administrator
+        label: password
+        type: password
+        value: admin
+        weight: 20
+      tenant:
+        description: Tenant (project) name for Administrator
+        label: tenant
+        regex:
+          error: Invalid tenant name
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 30
+      user:
+        description: Username for Administrator
+        label: username
+        regex:
+          error: Invalid username
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 10
+    additional_components:
+      ceilometer:
+        description: If selected, Ceilometer component will be installed
+        label: Install Ceilometer
+        type: checkbox
+        value: false
+        weight: 40
+      heat:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 30
+      metadata:
+        label: Additional Components
+        weight: 20
+      murano:
+        description: If selected, Murano component will be installed
+        label: Install Murano
+        restrictions:
+        - cluster:net_provider != 'neutron'
+        type: checkbox
+        value: false
+        weight: 20
+      sahara:
+        description: If selected, Sahara component will be installed
+        label: Install Sahara
+        type: checkbox
+        value: false
+        weight: 10
+    common:
+      auth_key:
+        description: Public key(s) to include in authorized_keys on deployed nodes
+        label: Public Key
+        type: text
+        value: ''
+        weight: 70
+      auto_assign_floating_ip:
+        description: If selected, OpenStack will automatically assign a floating IP
+          to a new instance
+        label: Auto assign floating IP
+        restrictions:
+        - cluster:net_provider == 'neutron'
+        type: checkbox
+        value: false
+        weight: 40
+      compute_scheduler_driver:
+        label: Scheduler driver
+        type: radio
+        value: nova.scheduler.filter_scheduler.FilterScheduler
+        values:
+        - data: nova.scheduler.filter_scheduler.FilterScheduler
+          description: Currently the most advanced OpenStack scheduler. See the OpenStack
+            documentation for details.
+          label: Filter scheduler
+        - data: nova.scheduler.simple.SimpleScheduler
+          description: This is 'naive' scheduler which tries to find the least loaded
+            host
+          label: Simple scheduler
+        weight: 40
+      debug:
+        description: Debug logging mode provides more information, but requires more
+          disk space.
+        label: OpenStack debug logging
+        type: checkbox
+        value: false
+        weight: 20
+      disable_offload:
+        description: If set, generic segmentation offload (gso) and generic receive
+          offload (gro) on physical nics will be disabled. See ethtool man.
+        label: Disable generic offload on physical nics
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+            == 'gre'
+        type: checkbox
+        value: true
+        weight: 80
+      libvirt_type:
+        label: Hypervisor type
+        type: radio
+        value: kvm
+        values:
+        - data: kvm
+          description: Choose this type of hypervisor if you run OpenStack on hardware
+          label: KVM
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: qemu
+          description: Choose this type of hypervisor if you run OpenStack on virtual
+            hosts.
+          label: QEMU
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: vcenter
+          description: Choose this type of hypervisor if you run OpenStack in a vCenter
+            environment.
+          label: vCenter
+          restrictions:
+          - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+            == 'neutron'
+        weight: 30
+      metadata:
+        label: Common
+        weight: 30
+      nova_quota:
+        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+          quotas will increase load on the Nova database.
+        label: Nova quotas
+        type: checkbox
+        value: false
+        weight: 25
+      resume_guests_state_on_host_boot:
+        description: Whether to resume previous guests state when the host reboots.
+          If enabled, this option causes guests assigned to the host to resume their
+          previous state. If the guest was running a restart will be attempted when
+          nova-compute starts. If the guest was not running previously, a restart
+          will not be attempted.
+        label: Resume guests state on host boot
+        type: checkbox
+        value: true
+        weight: 60
+      use_cow_images:
+        description: For most cases you will want qcow format. If it's disabled, raw
+          image format will be used to run VMs. OpenStack with raw format currently
+          does not support snapshotting.
+        label: Use qcow format for images
+        type: checkbox
+        value: true
+        weight: 50
+    corosync:
+      group:
+        description: ''
+        label: Group
+        type: text
+        value: 226.94.1.1
+        weight: 10
+      metadata:
+        label: Corosync
+        restrictions:
+        - action: hide
+          condition: 'true'
+        weight: 50
+      port:
+        description: ''
+        label: Port
+        type: text
+        value: '12000'
+        weight: 20
+      verified:
+        description: Set True only if multicast is configured correctly on router.
+        label: Need to pass network verification.
+        type: checkbox
+        value: false
+        weight: 10
+    external_dns:
+      dns_list:
+        description: List of upstream DNS servers, separated by comma
+        label: DNS list
+        type: text
+        value: 10.118.32.193
+        weight: 10
+      metadata:
+        label: Upstream DNS
+        weight: 90
+    external_ntp:
+      metadata:
+        label: Upstream NTP
+        weight: 100
+      ntp_list:
+        description: List of upstream NTP servers, separated by comma
+        label: NTP servers list
+        type: text
+        value: 10.118.34.219
+        weight: 10
+    kernel_params:
+      kernel:
+        description: Default kernel parameters
+        label: Initial parameters
+        type: text
+        value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+        weight: 45
+      metadata:
+        label: Kernel parameters
+        weight: 40
+    neutron_mellanox:
+      metadata:
+        enabled: true
+        label: Mellanox Neutron components
+        toggleable: false
+        weight: 50
+      plugin:
+        label: Mellanox drivers and SR-IOV plugin
+        type: radio
+        value: disabled
+        values:
+        - data: disabled
+          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+            not be installed.
+          label: Mellanox drivers and plugins disabled
+          restrictions:
+          - settings:storage.iser.value == true
+        - data: drivers_only
+          description: If selected, Mellanox Ethernet drivers will be installed to
+            support networking over Mellanox NIC. Mellanox Neutron plugin will not
+            be installed.
+          label: Install only Mellanox drivers
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm'
+        - data: ethernet
+          description: If selected, both Mellanox Ethernet drivers and Mellanox network
+            acceleration (Neutron) plugin will be installed.
+          label: Install Mellanox drivers and SR-IOV plugin
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+        weight: 60
+      vf_num:
+        description: Note that one virtual function will be reserved to the storage
+          network, in case of choosing iSER.
+        label: Number of virtual NICs
+        restrictions:
+        - settings:neutron_mellanox.plugin.value != 'ethernet'
+        type: text
+        value: '16'
+        weight: 70
+    nsx_plugin:
+      connector_type:
+        description: Default network transport type to use
+        label: NSX connector type
+        type: select
+        value: stt
+        values:
+        - data: gre
+          label: GRE
+        - data: ipsec_gre
+          label: GRE over IPSec
+        - data: stt
+          label: STT
+        - data: ipsec_stt
+          label: STT over IPSec
+        - data: bridge
+          label: Bridge
+        weight: 80
+      l3_gw_service_uuid:
+        description: UUID for the default L3 gateway service to use with this cluster
+        label: L3 service UUID
+        regex:
+          error: Invalid L3 gateway service UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 50
+      metadata:
+        enabled: false
+        label: VMware NSX
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+            != 'nsx'
+        weight: 20
+      nsx_controllers:
+        description: One or more IPv4[:port] addresses of NSX controller node, separated
+          by comma (e.g. 10.30.30.2,192.168.110.254:443)
+        label: NSX controller endpoint
+        regex:
+          error: Invalid controller endpoints, specify valid IPv4[:port] pair
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+        type: text
+        value: ''
+        weight: 60
+      nsx_password:
+        description: Password for Administrator
+        label: NSX password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: ''
+        weight: 30
+      nsx_username:
+        description: NSX administrator's username
+        label: NSX username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      packages_url:
+        description: URL to NSX specific packages
+        label: URL to NSX bits
+        regex:
+          error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+            http://10.20.0.2/nsx)
+          source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+        type: text
+        value: ''
+        weight: 70
+      replication_mode:
+        description: ''
+        label: NSX cluster has Service nodes
+        type: checkbox
+        value: true
+        weight: 90
+      transport_zone_uuid:
+        description: UUID of the pre-existing default NSX Transport zone
+        label: Transport zone UUID
+        regex:
+          error: Invalid transport zone UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 40
+    provision:
+      metadata:
+        label: Provision
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 80
+      method:
+        description: Which provision method to use for this cluster.
+        label: Provision method
+        type: radio
+        value: cobbler
+        values:
+        - data: image
+          description: Copying pre-built images on a disk.
+          label: Image
+        - data: cobbler
+          description: Install from scratch using anaconda or debian-installer.
+          label: Classic (use anaconda or debian-installer)
+    public_network_assignment:
+      assign_to_all_nodes:
+        description: When disabled, public network will be assigned to controllers
+          and zabbix-server only
+        label: Assign public network to all nodes
+        type: checkbox
+        value: false
+        weight: 10
+      metadata:
+        label: Public network assignment
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron'
+        weight: 50
+    storage:
+      ephemeral_ceph:
+        description: Configures Nova to store ephemeral volumes in RBD. This works
+          best if Ceph is enabled for volumes and images, too. Enables live migration
+          of all types of Ceph backed VMs (without this option, live migration will
+          only work with VMs launched from Cinder volumes).
+        label: Ceph RBD for ephemeral volumes (Nova)
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: checkbox
+        value: true
+        weight: 75
+      images_ceph:
+        description: Configures Glance to use the Ceph RBD backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: Ceph RBD for images (Glance)
+        type: checkbox
+        value: true
+        weight: 30
+      images_vcenter:
+        description: Configures Glance to use the vCenter/ESXi backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: VMWare vCenter/ESXi datastore for images (Glance)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter'
+        type: checkbox
+        value: false
+        weight: 35
+      iser:
+        description: 'High performance block storage: Cinder volumes over iSER protocol
+          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+          and will use a dedicated virtual function for the storage network.'
+        label: iSER protocol for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+          != 'kvm'
+        type: checkbox
+        value: false
+        weight: 11
+      metadata:
+        label: Storage
+        weight: 60
+      objects_ceph:
+        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+        label: Ceph RadosGW for objects (Swift API)
+        restrictions:
+        - settings:storage.images_ceph.value == false
+        type: checkbox
+        value: false
+        weight: 80
+      osd_pool_size:
+        description: Configures the default number of object replicas in Ceph. This
+          number must be equal to or lower than the number of deployed 'Storage -
+          Ceph OSD' nodes.
+        label: Ceph object replication factor
+        regex:
+          error: Invalid number
+          source: ^[1-9]\d*$
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: text
+        value: '2'
+        weight: 85
+      vc_datacenter:
+        description: Inventory path to a datacenter. If you want to use ESXi host
+          as datastore, it should be "ha-datacenter".
+        label: Datacenter name
+        regex:
+          error: Empty datacenter
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 65
+      vc_datastore:
+        description: Datastore associated with the datacenter.
+        label: Datastore name
+        regex:
+          error: Empty datastore
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 60
+      vc_host:
+        description: IP Address of vCenter/ESXi
+        label: vCenter/ESXi IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 45
+      vc_image_dir:
+        description: The name of the directory where the glance images will be stored
+          in the VMware datastore.
+        label: Datastore Images directory
+        regex:
+          error: Empty images directory
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: /openstack_glance
+        weight: 70
+      vc_password:
+        description: vCenter/ESXi admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: password
+        value: ''
+        weight: 55
+      vc_user:
+        description: vCenter/ESXi admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 50
+      volumes_ceph:
+        description: Configures Cinder to store volumes in Ceph RBD images.
+        label: Ceph RBD for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+          == 'vcenter'
+        type: checkbox
+        value: true
+        weight: 20
+      volumes_lvm:
+        description: Requires at least one Storage - Cinder LVM node.
+        label: Cinder LVM over iSCSI for volumes
+        restrictions:
+        - settings:storage.volumes_ceph.value == true
+        type: checkbox
+        value: false
+        weight: 10
+      volumes_vmdk:
+        description: Configures Cinder to store volumes via VMware vCenter.
+        label: VMware vCenter for volumes (Cinder)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+          == true
+        type: checkbox
+        value: false
+        weight: 15
+    syslog:
+      metadata:
+        label: Syslog
+        weight: 50
+      syslog_port:
+        description: Remote syslog port
+        label: Port
+        regex:
+          error: Invalid Syslog port
+          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+        type: text
+        value: '514'
+        weight: 20
+      syslog_server:
+        description: Remote syslog hostname
+        label: Hostname
+        type: text
+        value: ''
+        weight: 10
+      syslog_transport:
+        label: Syslog transport protocol
+        type: radio
+        value: tcp
+        values:
+        - data: udp
+          description: ''
+          label: UDP
+        - data: tcp
+          description: ''
+          label: TCP
+        weight: 30
+    vcenter:
+      cluster:
+        description: vCenter cluster name. If you have multiple clusters, use comma
+          to separate names
+        label: Cluster
+        regex:
+          error: Invalid cluster list
+          source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+        type: text
+        value: ''
+        weight: 40
+      datastore_regex:
+        description: The Datastore regexp setting specifies the data stores to use
+          with Compute. For example, "nas.*". If you want to use all available datastores,
+          leave this field blank
+        label: Datastore regexp
+        regex:
+          error: Invalid datastore regexp
+          source: ^(\S.*\S|\S|)$
+        type: text
+        value: ''
+        weight: 50
+      host_ip:
+        description: IP Address of vCenter
+        label: vCenter IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        type: text
+        value: ''
+        weight: 10
+      metadata:
+        label: vCenter
+        restrictions:
+        - action: hide
+          condition: settings:common.libvirt_type.value != 'vcenter'
+        weight: 20
+      use_vcenter:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 5
+      vc_password:
+        description: vCenter admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: admin
+        weight: 30
+      vc_user:
+        description: vCenter admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      vlan_interface:
+        description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+          vmnic1). If empty "vmnic0" is used by default
+        label: ESXi VLAN interface
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+            != 'VlanManager'
+        type: text
+        value: ''
+        weight: 60
+    zabbix:
+      metadata:
+        label: Zabbix Access
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 70
+      password:
+        description: Password for Zabbix Administrator
+        label: password
+        type: password
+        value: zabbix
+        weight: 20
+      username:
+        description: Username for Zabbix Administrator
+        label: username
+        type: text
+        value: admin
+        weight: 10
\ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml
new file mode 100644 (file)
index 0000000..562d6cd
--- /dev/null
@@ -0,0 +1,54 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Mon May  4 09:03:46 UTC 2015
+comment: Test environment Ericsson Montreal
+
+# Adapter to use for this definition
+adapter: hp
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+  pxeMac: 14:58:D0:54:7A:D8
+  ipmiIp: 10.118.32.198
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 2
+  pxeMac: 14:58:D0:55:E2:E0
+  ipmiIp: 10.118.32.202
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 3
+  pxeMac: 9C:B6:54:8A:25:C0
+  ipmiIp: 10.118.32.213
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 4
+  pxeMac: 14:58:D0:54:28:80
+  ipmiIp: 10.118.32.201
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 5
+  pxeMac: 14:58:D0:54:E7:88
+  ipmiIp: 10.118.32.203
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 6
+  pxeMac: 14:58:D0:54:7A:28
+  ipmiIp: 10.118.32.205
+  ipmiUser: <username>
+  ipmiPass: <password>
+# Adding the Fuel node as node id 7 which may not be correct - please
+# adjust as needed.
+- id: 7
+  libvirtName: vFuel
+  libvirtTemplate: baremetal/vms/fuel.xml
+  isFuel: yes
+  username: root
+  password: r00tme
+
+disks:
+  fuel: 30G
\ No newline at end of file
@@ -8,13 +8,29 @@ environment_mode: multinode
 wanted_release: Juno on Ubuntu 12.04.4
 nodes:
 - id: 1
-  interfaces: interface1
-  transformations: controller1
-  role: controller
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
 - id: 2
-  interfaces: interface1
-  transformations: compute1
-  role: compute
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 3
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 4
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 5
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 6
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
 fuel:
   ADMIN_NETWORK:
     ipaddress: 10.40.0.2
@@ -28,11 +44,11 @@ fuel:
     user: admin
     password: admin
   HOSTNAME: opnfv
-  NTP1: 0.ca.pool.ntp.org
-  NTP2: 1.ca.pool.ntp.org
-  NTP3: 2.ca.pool.ntp.org
+  NTP1: 10.118.34.219
+  NTP2:
+  NTP3:
 interfaces:
-  interface1:
+  interfaces_1:
     eth0:
     - fuelweb_admin
     eth2:
@@ -41,7 +57,7 @@ interfaces:
     - storage
     - private
 transformations:
-  controller1:
+  transformations_1:
     - action: add-br
       name: br-eth0
     - action: add-port
@@ -80,6 +96,8 @@ transformations:
       name: br-storage
     - action: add-br
       name: br-fw-admin
+    - action: add-br
+      name: br-prv
     - action: add-patch
       bridges:
       - br-eth2
@@ -104,25 +122,15 @@ transformations:
       bridges:
       - br-eth0
       - br-fw-admin
-      trunks:
-      - 0
     - action: add-patch
       bridges:
       - br-eth2
       - br-ex
-      tags:
-      - 120
-      - 0
-      vlan_ids:
-      - 120
-      - 0
-    - action: add-br
-      name: br-prv
     - action: add-patch
       bridges:
       - br-eth2
       - br-prv
-  compute1:
+  transformations_2:
     - action: add-br
       name: br-eth0
     - action: add-port
@@ -159,6 +167,8 @@ transformations:
       name: br-storage
     - action: add-br
       name: br-fw-admin
+    - action: add-br
+      name: br-prv
     - action: add-patch
       bridges:
       - br-eth2
@@ -183,10 +193,6 @@ transformations:
       bridges:
       - br-eth0
       - br-fw-admin
-      trunks:
-      - 0
-    - action: add-br
-      name: br-prv
     - action: add-patch
       bridges:
       - br-eth2
@@ -199,10 +205,9 @@ network:
     base_mac: fa:16:3e:00:00:00
     dns_nameservers:
     - 10.118.32.193
-    - 8.8.8.8
     floating_ranges:
-    - - 172.16.0.130
-      - 172.16.0.254
+    - - 10.118.36.48
+      - 10.118.36.62
     gre_id_range:
     - 2
     - 65535
@@ -214,11 +219,11 @@ network:
     - 2022
     - 2023
   networks:
-  - cidr: 172.16.0.0/24
-    gateway: 172.16.0.1
+  - cidr: 10.118.36.32/27
+    gateway: 10.118.36.1
     ip_ranges:
-    - - 172.16.0.2
-      - 172.16.0.126
+    - - 10.118.36.33
+      - 10.118.36.47
     meta:
       assign_vip: true
       cidr: 172.16.0.0/24
@@ -235,7 +240,7 @@ network:
       use_gateway: true
       vlan_start: null
     name: public
-    vlan_start: 120
+    vlan_start: null
   - cidr: 192.168.0.0/24
     gateway: null
     ip_ranges:
@@ -251,7 +256,7 @@ network:
       render_addr_mask: internal
       render_type: cidr
       use_gateway: false
-      vlan_start: 101
+      vlan_start: 320
     name: management
     vlan_start: 320
   - cidr: 192.168.1.0/24
@@ -269,7 +274,7 @@ network:
       render_addr_mask: storage
       render_type: cidr
       use_gateway: false
-      vlan_start: 102
+      vlan_start: 220
     name: storage
     vlan_start: 220
   - cidr: null
@@ -501,7 +506,7 @@ settings:
         description: List of upstream DNS servers, separated by comma
         label: DNS list
         type: text
-        value: 10.118.32.193, 8.8.8.8
+        value: 10.118.32.193
         weight: 10
       metadata:
         label: Upstream DNS
@@ -514,7 +519,7 @@ settings:
         description: List of upstream NTP servers, separated by comma
         label: NTP servers list
         type: text
-        value: 0.pool.ntp.org, 1.pool.ntp.org
+        value: 10.118.34.219
         weight: 10
     kernel_params:
       kernel:
@@ -604,7 +609,7 @@ settings:
         weight: 20
       nsx_controllers:
         description: One or more IPv4[:port] addresses of NSX controller node, separated
-          by comma (e.g. 10.40.30.2,192.168.110.254:443)
+          by comma (e.g. 10.30.30.2,192.168.110.254:443)
         label: NSX controller endpoint
         regex:
           error: Invalid controller endpoints, specify valid IPv4[:port] pair
@@ -698,14 +703,14 @@ settings:
         restrictions:
         - settings:common.libvirt_type.value == 'vcenter'
         type: checkbox
-        value: false
+        value: true
         weight: 75
       images_ceph:
         description: Configures Glance to use the Ceph RBD backend to store images.
           If enabled, this option will prevent Swift from installing.
         label: Ceph RBD for images (Glance)
         type: checkbox
-        value: false
+        value: true
         weight: 30
       images_vcenter:
         description: Configures Glance to use the vCenter/ESXi backend to store images.
@@ -839,7 +844,7 @@ settings:
         - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
           == 'vcenter'
         type: checkbox
-        value: false
+        value: true
         weight: 20
       volumes_lvm:
         description: Requires at least one Storage - Cinder LVM node.
@@ -979,4 +984,4 @@ settings:
         label: username
         type: text
         value: admin
-        weight: 10
+        weight: 10
\ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml
new file mode 100644 (file)
index 0000000..562d6cd
--- /dev/null
@@ -0,0 +1,54 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Mon May  4 09:03:46 UTC 2015
+comment: Test environment Ericsson Montreal
+
+# Adapter to use for this definition
+adapter: hp
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+  pxeMac: 14:58:D0:54:7A:D8
+  ipmiIp: 10.118.32.198
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 2
+  pxeMac: 14:58:D0:55:E2:E0
+  ipmiIp: 10.118.32.202
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 3
+  pxeMac: 9C:B6:54:8A:25:C0
+  ipmiIp: 10.118.32.213
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 4
+  pxeMac: 14:58:D0:54:28:80
+  ipmiIp: 10.118.32.201
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 5
+  pxeMac: 14:58:D0:54:E7:88
+  ipmiIp: 10.118.32.203
+  ipmiUser: <username>
+  ipmiPass: <password>
+- id: 6
+  pxeMac: 14:58:D0:54:7A:28
+  ipmiIp: 10.118.32.205
+  ipmiUser: <username>
+  ipmiPass: <password>
+# Adding the Fuel node as node id 7 which may not be correct - please
+# adjust as needed.
+- id: 7
+  libvirtName: vFuel
+  libvirtTemplate: baremetal/vms/fuel.xml
+  isFuel: yes
+  username: root
+  password: r00tme
+
+disks:
+  fuel: 30G
\ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml
new file mode 100644 (file)
index 0000000..8aafc9a
--- /dev/null
@@ -0,0 +1,950 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Thu May 21 13:34:13 CEST 2015
+comment: HA deployment with Ceph
+environment_name: opnfv
+environment_mode: ha
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 2
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 3
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 4
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 5
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+fuel:
+  ADMIN_NETWORK:
+    ipaddress: 10.20.0.2
+    netmask: 255.255.0.0
+    dhcp_pool_start: 10.20.0.3
+    dhcp_pool_end: 10.20.0.254
+  DNS_UPSTREAM: 8.8.8.8
+  DNS_DOMAIN: domain.tld
+  DNS_SEARCH: domain.tld
+  FUEL_ACCESS:
+    user: admin
+    password: admin
+  HOSTNAME: opnfv
+  NTP1: 0.pool.ntp.org
+  NTP2: 1.pool.ntp.org
+  NTP3: 2.pool.ntp.org
+interfaces:
+  interfaces_1:
+    eth0:
+    - public
+    eth1:
+    - fuelweb_admin
+    - management
+    - storage
+    - private
+transformations:
+  transformations_1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-ex
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 301
+      - 0
+      vlan_ids:
+      - 301
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-mgmt
+      tags:
+      - 300
+      - 0
+      vlan_ids:
+      - 300
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-ex
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-prv
+  transformations_2:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 301
+      - 0
+      vlan_ids:
+      - 301
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-mgmt
+      tags:
+      - 300
+      - 0
+      vlan_ids:
+      - 300
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-prv
+opnfv:
+  compute: {}
+  controller: {}
+network:
+  networking_parameters:
+    base_mac: fa:16:3e:00:00:00
+    dns_nameservers:
+    - 8.8.4.4
+    - 8.8.8.8
+    floating_ranges:
+    - - 172.30.9.80
+      - 172.30.9.89
+    gre_id_range:
+    - 2
+    - 65535
+    internal_cidr: 192.168.111.0/24
+    internal_gateway: 192.168.111.1
+    net_l23_provider: ovs
+    segmentation_type: vlan
+    vlan_range:
+    - 1000
+    - 1010
+  networks:
+  - cidr: 172.30.9.0/24
+    gateway: 172.30.9.1
+    ip_ranges:
+    - - 172.30.9.70
+      - 172.30.9.79
+    meta:
+      assign_vip: true
+      cidr: 172.16.0.0/24
+      configurable: true
+      floating_range_var: floating_ranges
+      ip_range:
+      - 172.16.0.2
+      - 172.16.0.126
+      map_priority: 1
+      name: public
+      notation: ip_ranges
+      render_addr_mask: public
+      render_type: null
+      use_gateway: true
+      vlan_start: null
+    name: public
+    vlan_start: null
+  - cidr: 192.168.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.0.2
+      - 192.168.0.254
+    meta:
+      assign_vip: true
+      cidr: 192.168.0.0/24
+      configurable: true
+      map_priority: 2
+      name: management
+      notation: cidr
+      render_addr_mask: internal
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 101
+    name: management
+    vlan_start: 300
+  - cidr: 192.168.1.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.1.2
+      - 192.168.1.254
+    meta:
+      assign_vip: false
+      cidr: 192.168.1.0/24
+      configurable: true
+      map_priority: 2
+      name: storage
+      notation: cidr
+      render_addr_mask: storage
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 102
+    name: storage
+    vlan_start: 301
+  - cidr: null
+    gateway: null
+    ip_ranges: []
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 2
+      name: private
+      neutron_vlan_range: true
+      notation: null
+      render_addr_mask: null
+      render_type: null
+      seg_type: vlan
+      use_gateway: false
+      vlan_start: null
+    name: private
+    vlan_start: null
+  - cidr: 10.20.0.0/16
+    gateway: null
+    ip_ranges:
+    - - 10.20.0.3
+      - 10.20.255.254
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 0
+      notation: ip_ranges
+      render_addr_mask: null
+      render_type: null
+      unmovable: true
+      use_gateway: true
+    name: fuelweb_admin
+    vlan_start: null
+settings:
+  editable:
+    access:
+      email:
+        description: Email address for Administrator
+        label: email
+        type: text
+        value: admin@localhost
+        weight: 40
+      metadata:
+        label: Access
+        weight: 10
+      password:
+        description: Password for Administrator
+        label: password
+        type: password
+        value: admin
+        weight: 20
+      tenant:
+        description: Tenant (project) name for Administrator
+        label: tenant
+        regex:
+          error: Invalid tenant name
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 30
+      user:
+        description: Username for Administrator
+        label: username
+        regex:
+          error: Invalid username
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 10
+    additional_components:
+      ceilometer:
+        description: If selected, Ceilometer component will be installed
+        label: Install Ceilometer
+        type: checkbox
+        value: false
+        weight: 40
+      heat:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 30
+      metadata:
+        label: Additional Components
+        weight: 20
+      murano:
+        description: If selected, Murano component will be installed
+        label: Install Murano
+        restrictions:
+        - cluster:net_provider != 'neutron'
+        type: checkbox
+        value: false
+        weight: 20
+      sahara:
+        description: If selected, Sahara component will be installed
+        label: Install Sahara
+        type: checkbox
+        value: false
+        weight: 10
+    common:
+      auth_key:
+        description: Public key(s) to include in authorized_keys on deployed nodes
+        label: Public Key
+        type: text
+        value: ''
+        weight: 70
+      auto_assign_floating_ip:
+        description: If selected, OpenStack will automatically assign a floating IP
+          to a new instance
+        label: Auto assign floating IP
+        restrictions:
+        - cluster:net_provider == 'neutron'
+        type: checkbox
+        value: false
+        weight: 40
+      compute_scheduler_driver:
+        label: Scheduler driver
+        type: radio
+        value: nova.scheduler.filter_scheduler.FilterScheduler
+        values:
+        - data: nova.scheduler.filter_scheduler.FilterScheduler
+          description: Currently the most advanced OpenStack scheduler. See the OpenStack
+            documentation for details.
+          label: Filter scheduler
+        - data: nova.scheduler.simple.SimpleScheduler
+          description: This is 'naive' scheduler which tries to find the least loaded
+            host
+          label: Simple scheduler
+        weight: 40
+      debug:
+        description: Debug logging mode provides more information, but requires more
+          disk space.
+        label: OpenStack debug logging
+        type: checkbox
+        value: false
+        weight: 20
+      disable_offload:
+        description: If set, generic segmentation offload (gso) and generic receive
+          offload (gro) on physical nics will be disabled. See ethtool man.
+        label: Disable generic offload on physical nics
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+            == 'gre'
+        type: checkbox
+        value: true
+        weight: 80
+      libvirt_type:
+        label: Hypervisor type
+        type: radio
+        value: kvm
+        values:
+        - data: kvm
+          description: Choose this type of hypervisor if you run OpenStack on hardware
+          label: KVM
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: qemu
+          description: Choose this type of hypervisor if you run OpenStack on virtual
+            hosts.
+          label: QEMU
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: vcenter
+          description: Choose this type of hypervisor if you run OpenStack in a vCenter
+            environment.
+          label: vCenter
+          restrictions:
+          - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+            == 'neutron'
+        weight: 30
+      metadata:
+        label: Common
+        weight: 30
+      nova_quota:
+        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+          quotas will increase load on the Nova database.
+        label: Nova quotas
+        type: checkbox
+        value: false
+        weight: 25
+      resume_guests_state_on_host_boot:
+        description: Whether to resume previous guests state when the host reboots.
+          If enabled, this option causes guests assigned to the host to resume their
+          previous state. If the guest was running a restart will be attempted when
+          nova-compute starts. If the guest was not running previously, a restart
+          will not be attempted.
+        label: Resume guests state on host boot
+        type: checkbox
+        value: true
+        weight: 60
+      use_cow_images:
+        description: For most cases you will want qcow format. If it's disabled, raw
+          image format will be used to run VMs. OpenStack with raw format currently
+          does not support snapshotting.
+        label: Use qcow format for images
+        type: checkbox
+        value: true
+        weight: 50
+    corosync:
+      group:
+        description: ''
+        label: Group
+        type: text
+        value: 226.94.1.1
+        weight: 10
+      metadata:
+        label: Corosync
+        restrictions:
+        - action: hide
+          condition: 'true'
+        weight: 50
+      port:
+        description: ''
+        label: Port
+        type: text
+        value: '12000'
+        weight: 20
+      verified:
+        description: Set True only if multicast is configured correctly on router.
+        label: Need to pass network verification.
+        type: checkbox
+        value: false
+        weight: 10
+    external_dns:
+      dns_list:
+        description: List of upstream DNS servers, separated by comma
+        label: DNS list
+        type: text
+        value: 8.8.8.8, 8.8.4.4
+        weight: 10
+      metadata:
+        label: Upstream DNS
+        weight: 90
+    external_ntp:
+      metadata:
+        label: Upstream NTP
+        weight: 100
+      ntp_list:
+        description: List of upstream NTP servers, separated by comma
+        label: NTP servers list
+        type: text
+        value: 0.pool.ntp.org, 1.pool.ntp.org
+        weight: 10
+    kernel_params:
+      kernel:
+        description: Default kernel parameters
+        label: Initial parameters
+        type: text
+        value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+        weight: 45
+      metadata:
+        label: Kernel parameters
+        weight: 40
+    neutron_mellanox:
+      metadata:
+        enabled: true
+        label: Mellanox Neutron components
+        toggleable: false
+        weight: 50
+      plugin:
+        label: Mellanox drivers and SR-IOV plugin
+        type: radio
+        value: disabled
+        values:
+        - data: disabled
+          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+            not be installed.
+          label: Mellanox drivers and plugins disabled
+          restrictions:
+          - settings:storage.iser.value == true
+        - data: drivers_only
+          description: If selected, Mellanox Ethernet drivers will be installed to
+            support networking over Mellanox NIC. Mellanox Neutron plugin will not
+            be installed.
+          label: Install only Mellanox drivers
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm'
+        - data: ethernet
+          description: If selected, both Mellanox Ethernet drivers and Mellanox network
+            acceleration (Neutron) plugin will be installed.
+          label: Install Mellanox drivers and SR-IOV plugin
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+        weight: 60
+      vf_num:
+        description: Note that one virtual function will be reserved to the storage
+          network, in case of choosing iSER.
+        label: Number of virtual NICs
+        restrictions:
+        - settings:neutron_mellanox.plugin.value != 'ethernet'
+        type: text
+        value: '16'
+        weight: 70
+    nsx_plugin:
+      connector_type:
+        description: Default network transport type to use
+        label: NSX connector type
+        type: select
+        value: stt
+        values:
+        - data: gre
+          label: GRE
+        - data: ipsec_gre
+          label: GRE over IPSec
+        - data: stt
+          label: STT
+        - data: ipsec_stt
+          label: STT over IPSec
+        - data: bridge
+          label: Bridge
+        weight: 80
+      l3_gw_service_uuid:
+        description: UUID for the default L3 gateway service to use with this cluster
+        label: L3 service UUID
+        regex:
+          error: Invalid L3 gateway service UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 50
+      metadata:
+        enabled: false
+        label: VMware NSX
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+            != 'nsx'
+        weight: 20
+      nsx_controllers:
+        description: One or more IPv4[:port] addresses of NSX controller node, separated
+          by comma (e.g. 10.30.30.2,192.168.110.254:443)
+        label: NSX controller endpoint
+        regex:
+          error: Invalid controller endpoints, specify valid IPv4[:port] pair
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+        type: text
+        value: ''
+        weight: 60
+      nsx_password:
+        description: Password for Administrator
+        label: NSX password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: ''
+        weight: 30
+      nsx_username:
+        description: NSX administrator's username
+        label: NSX username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      packages_url:
+        description: URL to NSX specific packages
+        label: URL to NSX bits
+        regex:
+          error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+            http://10.20.0.2/nsx)
+          source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+        type: text
+        value: ''
+        weight: 70
+      replication_mode:
+        description: ''
+        label: NSX cluster has Service nodes
+        type: checkbox
+        value: true
+        weight: 90
+      transport_zone_uuid:
+        description: UUID of the pre-existing default NSX Transport zone
+        label: Transport zone UUID
+        regex:
+          error: Invalid transport zone UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 40
+    provision:
+      metadata:
+        label: Provision
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 80
+      method:
+        description: Which provision method to use for this cluster.
+        label: Provision method
+        type: radio
+        value: cobbler
+        values:
+        - data: image
+          description: Copying pre-built images on a disk.
+          label: Image
+        - data: cobbler
+          description: Install from scratch using anaconda or debian-installer.
+          label: Classic (use anaconda or debian-installer)
+    public_network_assignment:
+      assign_to_all_nodes:
+        description: When disabled, public network will be assigned to controllers
+          and zabbix-server only
+        label: Assign public network to all nodes
+        type: checkbox
+        value: false
+        weight: 10
+      metadata:
+        label: Public network assignment
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron'
+        weight: 50
+    storage:
+      ephemeral_ceph:
+        description: Configures Nova to store ephemeral volumes in RBD. This works
+          best if Ceph is enabled for volumes and images, too. Enables live migration
+          of all types of Ceph backed VMs (without this option, live migration will
+          only work with VMs launched from Cinder volumes).
+        label: Ceph RBD for ephemeral volumes (Nova)
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: checkbox
+        value: true
+        weight: 75
+      images_ceph:
+        description: Configures Glance to use the Ceph RBD backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: Ceph RBD for images (Glance)
+        type: checkbox
+        value: true
+        weight: 30
+      images_vcenter:
+        description: Configures Glance to use the vCenter/ESXi backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: VMWare vCenter/ESXi datastore for images (Glance)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter'
+        type: checkbox
+        value: false
+        weight: 35
+      iser:
+        description: 'High performance block storage: Cinder volumes over iSER protocol
+          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+          and will use a dedicated virtual function for the storage network.'
+        label: iSER protocol for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+          != 'kvm'
+        type: checkbox
+        value: false
+        weight: 11
+      metadata:
+        label: Storage
+        weight: 60
+      objects_ceph:
+        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+        label: Ceph RadosGW for objects (Swift API)
+        restrictions:
+        - settings:storage.images_ceph.value == false
+        type: checkbox
+        value: false
+        weight: 80
+      osd_pool_size:
+        description: Configures the default number of object replicas in Ceph. This
+          number must be equal to or lower than the number of deployed 'Storage -
+          Ceph OSD' nodes.
+        label: Ceph object replication factor
+        regex:
+          error: Invalid number
+          source: ^[1-9]\d*$
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: text
+        value: '2'
+        weight: 85
+      vc_datacenter:
+        description: Inventory path to a datacenter. If you want to use ESXi host
+          as datastore, it should be "ha-datacenter".
+        label: Datacenter name
+        regex:
+          error: Empty datacenter
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 65
+      vc_datastore:
+        description: Datastore associated with the datacenter.
+        label: Datastore name
+        regex:
+          error: Empty datastore
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 60
+      vc_host:
+        description: IP Address of vCenter/ESXi
+        label: vCenter/ESXi IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 45
+      vc_image_dir:
+        description: The name of the directory where the glance images will be stored
+          in the VMware datastore.
+        label: Datastore Images directory
+        regex:
+          error: Empty images directory
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: /openstack_glance
+        weight: 70
+      vc_password:
+        description: vCenter/ESXi admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: password
+        value: ''
+        weight: 55
+      vc_user:
+        description: vCenter/ESXi admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 50
+      volumes_ceph:
+        description: Configures Cinder to store volumes in Ceph RBD images.
+        label: Ceph RBD for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+          == 'vcenter'
+        type: checkbox
+        value: true
+        weight: 20
+      volumes_lvm:
+        description: Requires at least one Storage - Cinder LVM node.
+        label: Cinder LVM over iSCSI for volumes
+        restrictions:
+        - settings:storage.volumes_ceph.value == true
+        type: checkbox
+        value: false
+        weight: 10
+      volumes_vmdk:
+        description: Configures Cinder to store volumes via VMware vCenter.
+        label: VMware vCenter for volumes (Cinder)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+          == true
+        type: checkbox
+        value: false
+        weight: 15
+    syslog:
+      metadata:
+        label: Syslog
+        weight: 50
+      syslog_port:
+        description: Remote syslog port
+        label: Port
+        regex:
+          error: Invalid Syslog port
+          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+        type: text
+        value: '514'
+        weight: 20
+      syslog_server:
+        description: Remote syslog hostname
+        label: Hostname
+        type: text
+        value: ''
+        weight: 10
+      syslog_transport:
+        label: Syslog transport protocol
+        type: radio
+        value: tcp
+        values:
+        - data: udp
+          description: ''
+          label: UDP
+        - data: tcp
+          description: ''
+          label: TCP
+        weight: 30
+    vcenter:
+      cluster:
+        description: vCenter cluster name. If you have multiple clusters, use comma
+          to separate names
+        label: Cluster
+        regex:
+          error: Invalid cluster list
+          source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+        type: text
+        value: ''
+        weight: 40
+      datastore_regex:
+        description: The Datastore regexp setting specifies the data stores to use
+          with Compute. For example, "nas.*". If you want to use all available datastores,
+          leave this field blank
+        label: Datastore regexp
+        regex:
+          error: Invalid datastore regexp
+          source: ^(\S.*\S|\S|)$
+        type: text
+        value: ''
+        weight: 50
+      host_ip:
+        description: IP Address of vCenter
+        label: vCenter IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        type: text
+        value: ''
+        weight: 10
+      metadata:
+        label: vCenter
+        restrictions:
+        - action: hide
+          condition: settings:common.libvirt_type.value != 'vcenter'
+        weight: 20
+      use_vcenter:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 5
+      vc_password:
+        description: vCenter admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: admin
+        weight: 30
+      vc_user:
+        description: vCenter admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      vlan_interface:
+        description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+          vmnic1). If empty "vmnic0" is used by default
+        label: ESXi VLAN interface
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+            != 'VlanManager'
+        type: text
+        value: ''
+        weight: 60
+    zabbix:
+      metadata:
+        label: Zabbix Access
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 70
+      password:
+        description: Password for Zabbix Administrator
+        label: password
+        type: password
+        value: zabbix
+        weight: 20
+      username:
+        description: Username for Zabbix Administrator
+        label: username
+        type: text
+        value: admin
+        weight: 10
\ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml
new file mode 100644 (file)
index 0000000..5acd389
--- /dev/null
@@ -0,0 +1,49 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Fri May  8 08:03:49 UTC 2015
+comment: Config for LF Pod1
+
+# Adapter to use for this definition
+adapter: ipmi
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+  pxeMac: 00:25:b5:b0:00:ef
+  ipmiIp: 172.30.8.69
+  ipmiUser: admin
+  ipmiPass: octopus
+- id: 2
+  pxeMac: 00:25:b5:b0:00:cf
+  ipmiIp: 172.30.8.78
+  ipmiUser: admin
+  ipmiPass: octopus
+- id: 3
+  pxeMac: 00:25:b5:b0:00:8f
+  ipmiIp: 172.30.8.68
+  ipmiUser: admin
+  ipmiPass: octopus
+- id: 4
+  pxeMac: 00:25:b5:b0:00:6f
+  ipmiIp: 172.30.8.77
+  ipmiUser: admin
+  ipmiPass: octopus
+- id: 5
+  pxeMac: 00:25:b5:b0:00:4f
+  ipmiIp: 172.30.8.67
+  ipmiUser: admin
+  ipmiPass: octopus
+# Adding the Fuel node as node id 6 which may not be correct - please
+# adjust as needed.
+- id: 6
+  libvirtName: vFuel
+  libvirtTemplate: baremetal/vms/fuel_lf.xml
+  isFuel: yes
+  username: root
+  password: r00tme
+
+disks:
+  fuel: 30G
\ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml
new file mode 100644 (file)
index 0000000..5a93e96
--- /dev/null
@@ -0,0 +1,950 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Tue May  5 15:33:07 UTC 2015
+comment: Test environment Ericsson Montreal
+environment_name: opnfv
+environment_mode: multinode
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 2
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 3
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 4
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 5
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+fuel:
+  ADMIN_NETWORK:
+    ipaddress: 10.20.0.2
+    netmask: 255.255.0.0
+    dhcp_pool_start: 10.20.0.3
+    dhcp_pool_end: 10.20.0.254
+  DNS_UPSTREAM: 8.8.8.8
+  DNS_DOMAIN: domain.tld
+  DNS_SEARCH: domain.tld
+  FUEL_ACCESS:
+    user: admin
+    password: admin
+  HOSTNAME: opnfv
+  NTP1: 0.pool.ntp.org
+  NTP2: 1.pool.ntp.org
+  NTP3: 2.pool.ntp.org
+interfaces:
+  interfaces_1:
+    eth0:
+    - public
+    eth1:
+    - fuelweb_admin
+    - management
+    - storage
+    - private
+transformations:
+  transformations_1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-ex
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 301
+      - 0
+      vlan_ids:
+      - 301
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-mgmt
+      tags:
+      - 300
+      - 0
+      vlan_ids:
+      - 300
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-ex
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-prv
+  transformations_2:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 301
+      - 0
+      vlan_ids:
+      - 301
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-mgmt
+      tags:
+      - 300
+      - 0
+      vlan_ids:
+      - 300
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-prv
+opnfv:
+  compute: {}
+  controller: {}
+network:
+  networking_parameters:
+    base_mac: fa:16:3e:00:00:00
+    dns_nameservers:
+    - 8.8.4.4
+    - 8.8.8.8
+    floating_ranges:
+    - - 172.30.9.80
+      - 172.30.9.89
+    gre_id_range:
+    - 2
+    - 65535
+    internal_cidr: 192.168.111.0/24
+    internal_gateway: 192.168.111.1
+    net_l23_provider: ovs
+    segmentation_type: vlan
+    vlan_range:
+    - 1000
+    - 1010
+  networks:
+  - cidr: 172.30.9.0/24
+    gateway: 172.30.9.1
+    ip_ranges:
+    - - 172.30.9.70
+      - 172.30.9.79
+    meta:
+      assign_vip: true
+      cidr: 172.16.0.0/24
+      configurable: true
+      floating_range_var: floating_ranges
+      ip_range:
+      - 172.16.0.2
+      - 172.16.0.126
+      map_priority: 1
+      name: public
+      notation: ip_ranges
+      render_addr_mask: public
+      render_type: null
+      use_gateway: true
+      vlan_start: null
+    name: public
+    vlan_start: null
+  - cidr: 192.168.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.0.2
+      - 192.168.0.254
+    meta:
+      assign_vip: true
+      cidr: 192.168.0.0/24
+      configurable: true
+      map_priority: 2
+      name: management
+      notation: cidr
+      render_addr_mask: internal
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 101
+    name: management
+    vlan_start: 300
+  - cidr: 192.168.1.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.1.2
+      - 192.168.1.254
+    meta:
+      assign_vip: false
+      cidr: 192.168.1.0/24
+      configurable: true
+      map_priority: 2
+      name: storage
+      notation: cidr
+      render_addr_mask: storage
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 102
+    name: storage
+    vlan_start: 301
+  - cidr: null
+    gateway: null
+    ip_ranges: []
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 2
+      name: private
+      neutron_vlan_range: true
+      notation: null
+      render_addr_mask: null
+      render_type: null
+      seg_type: vlan
+      use_gateway: false
+      vlan_start: null
+    name: private
+    vlan_start: null
+  - cidr: 10.20.0.0/16
+    gateway: null
+    ip_ranges:
+    - - 10.20.0.3
+      - 10.20.255.254
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 0
+      notation: ip_ranges
+      render_addr_mask: null
+      render_type: null
+      unmovable: true
+      use_gateway: true
+    name: fuelweb_admin
+    vlan_start: null
+settings:
+  editable:
+    access:
+      email:
+        description: Email address for Administrator
+        label: email
+        type: text
+        value: admin@localhost
+        weight: 40
+      metadata:
+        label: Access
+        weight: 10
+      password:
+        description: Password for Administrator
+        label: password
+        type: password
+        value: admin
+        weight: 20
+      tenant:
+        description: Tenant (project) name for Administrator
+        label: tenant
+        regex:
+          error: Invalid tenant name
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 30
+      user:
+        description: Username for Administrator
+        label: username
+        regex:
+          error: Invalid username
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 10
+    additional_components:
+      ceilometer:
+        description: If selected, Ceilometer component will be installed
+        label: Install Ceilometer
+        type: checkbox
+        value: false
+        weight: 40
+      heat:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 30
+      metadata:
+        label: Additional Components
+        weight: 20
+      murano:
+        description: If selected, Murano component will be installed
+        label: Install Murano
+        restrictions:
+        - cluster:net_provider != 'neutron'
+        type: checkbox
+        value: false
+        weight: 20
+      sahara:
+        description: If selected, Sahara component will be installed
+        label: Install Sahara
+        type: checkbox
+        value: false
+        weight: 10
+    common:
+      auth_key:
+        description: Public key(s) to include in authorized_keys on deployed nodes
+        label: Public Key
+        type: text
+        value: ''
+        weight: 70
+      auto_assign_floating_ip:
+        description: If selected, OpenStack will automatically assign a floating IP
+          to a new instance
+        label: Auto assign floating IP
+        restrictions:
+        - cluster:net_provider == 'neutron'
+        type: checkbox
+        value: false
+        weight: 40
+      compute_scheduler_driver:
+        label: Scheduler driver
+        type: radio
+        value: nova.scheduler.filter_scheduler.FilterScheduler
+        values:
+        - data: nova.scheduler.filter_scheduler.FilterScheduler
+          description: Currently the most advanced OpenStack scheduler. See the OpenStack
+            documentation for details.
+          label: Filter scheduler
+        - data: nova.scheduler.simple.SimpleScheduler
+          description: This is 'naive' scheduler which tries to find the least loaded
+            host
+          label: Simple scheduler
+        weight: 40
+      debug:
+        description: Debug logging mode provides more information, but requires more
+          disk space.
+        label: OpenStack debug logging
+        type: checkbox
+        value: false
+        weight: 20
+      disable_offload:
+        description: If set, generic segmentation offload (gso) and generic receive
+          offload (gro) on physical nics will be disabled. See ethtool man.
+        label: Disable generic offload on physical nics
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+            == 'gre'
+        type: checkbox
+        value: true
+        weight: 80
+      libvirt_type:
+        label: Hypervisor type
+        type: radio
+        value: kvm
+        values:
+        - data: kvm
+          description: Choose this type of hypervisor if you run OpenStack on hardware
+          label: KVM
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: qemu
+          description: Choose this type of hypervisor if you run OpenStack on virtual
+            hosts.
+          label: QEMU
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: vcenter
+          description: Choose this type of hypervisor if you run OpenStack in a vCenter
+            environment.
+          label: vCenter
+          restrictions:
+          - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+            == 'neutron'
+        weight: 30
+      metadata:
+        label: Common
+        weight: 30
+      nova_quota:
+        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+          quotas will increase load on the Nova database.
+        label: Nova quotas
+        type: checkbox
+        value: false
+        weight: 25
+      resume_guests_state_on_host_boot:
+        description: Whether to resume previous guests state when the host reboots.
+          If enabled, this option causes guests assigned to the host to resume their
+          previous state. If the guest was running a restart will be attempted when
+          nova-compute starts. If the guest was not running previously, a restart
+          will not be attempted.
+        label: Resume guests state on host boot
+        type: checkbox
+        value: true
+        weight: 60
+      use_cow_images:
+        description: For most cases you will want qcow format. If it's disabled, raw
+          image format will be used to run VMs. OpenStack with raw format currently
+          does not support snapshotting.
+        label: Use qcow format for images
+        type: checkbox
+        value: true
+        weight: 50
+    corosync:
+      group:
+        description: ''
+        label: Group
+        type: text
+        value: 226.94.1.1
+        weight: 10
+      metadata:
+        label: Corosync
+        restrictions:
+        - action: hide
+          condition: 'true'
+        weight: 50
+      port:
+        description: ''
+        label: Port
+        type: text
+        value: '12000'
+        weight: 20
+      verified:
+        description: Set True only if multicast is configured correctly on router.
+        label: Need to pass network verification.
+        type: checkbox
+        value: false
+        weight: 10
+    external_dns:
+      dns_list:
+        description: List of upstream DNS servers, separated by comma
+        label: DNS list
+        type: text
+        value: 8.8.8.8, 8.8.4.4
+        weight: 10
+      metadata:
+        label: Upstream DNS
+        weight: 90
+    external_ntp:
+      metadata:
+        label: Upstream NTP
+        weight: 100
+      ntp_list:
+        description: List of upstream NTP servers, separated by comma
+        label: NTP servers list
+        type: text
+        value: 0.pool.ntp.org, 1.pool.ntp.org
+        weight: 10
+    kernel_params:
+      kernel:
+        description: Default kernel parameters
+        label: Initial parameters
+        type: text
+        value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+        weight: 45
+      metadata:
+        label: Kernel parameters
+        weight: 40
+    neutron_mellanox:
+      metadata:
+        enabled: true
+        label: Mellanox Neutron components
+        toggleable: false
+        weight: 50
+      plugin:
+        label: Mellanox drivers and SR-IOV plugin
+        type: radio
+        value: disabled
+        values:
+        - data: disabled
+          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+            not be installed.
+          label: Mellanox drivers and plugins disabled
+          restrictions:
+          - settings:storage.iser.value == true
+        - data: drivers_only
+          description: If selected, Mellanox Ethernet drivers will be installed to
+            support networking over Mellanox NIC. Mellanox Neutron plugin will not
+            be installed.
+          label: Install only Mellanox drivers
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm'
+        - data: ethernet
+          description: If selected, both Mellanox Ethernet drivers and Mellanox network
+            acceleration (Neutron) plugin will be installed.
+          label: Install Mellanox drivers and SR-IOV plugin
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+        weight: 60
+      vf_num:
+        description: Note that one virtual function will be reserved to the storage
+          network, in case of choosing iSER.
+        label: Number of virtual NICs
+        restrictions:
+        - settings:neutron_mellanox.plugin.value != 'ethernet'
+        type: text
+        value: '16'
+        weight: 70
+    nsx_plugin:
+      connector_type:
+        description: Default network transport type to use
+        label: NSX connector type
+        type: select
+        value: stt
+        values:
+        - data: gre
+          label: GRE
+        - data: ipsec_gre
+          label: GRE over IPSec
+        - data: stt
+          label: STT
+        - data: ipsec_stt
+          label: STT over IPSec
+        - data: bridge
+          label: Bridge
+        weight: 80
+      l3_gw_service_uuid:
+        description: UUID for the default L3 gateway service to use with this cluster
+        label: L3 service UUID
+        regex:
+          error: Invalid L3 gateway service UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 50
+      metadata:
+        enabled: false
+        label: VMware NSX
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+            != 'nsx'
+        weight: 20
+      nsx_controllers:
+        description: One or more IPv4[:port] addresses of NSX controller node, separated
+          by comma (e.g. 10.30.30.2,192.168.110.254:443)
+        label: NSX controller endpoint
+        regex:
+          error: Invalid controller endpoints, specify valid IPv4[:port] pair
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+        type: text
+        value: ''
+        weight: 60
+      nsx_password:
+        description: Password for Administrator
+        label: NSX password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: ''
+        weight: 30
+      nsx_username:
+        description: NSX administrator's username
+        label: NSX username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      packages_url:
+        description: URL to NSX specific packages
+        label: URL to NSX bits
+        regex:
+          error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+            http://10.20.0.2/nsx)
+          source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+        type: text
+        value: ''
+        weight: 70
+      replication_mode:
+        description: ''
+        label: NSX cluster has Service nodes
+        type: checkbox
+        value: true
+        weight: 90
+      transport_zone_uuid:
+        description: UUID of the pre-existing default NSX Transport zone
+        label: Transport zone UUID
+        regex:
+          error: Invalid transport zone UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 40
+    provision:
+      metadata:
+        label: Provision
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 80
+      method:
+        description: Which provision method to use for this cluster.
+        label: Provision method
+        type: radio
+        value: cobbler
+        values:
+        - data: image
+          description: Copying pre-built images on a disk.
+          label: Image
+        - data: cobbler
+          description: Install from scratch using anaconda or debian-installer.
+          label: Classic (use anaconda or debian-installer)
+    public_network_assignment:
+      assign_to_all_nodes:
+        description: When disabled, public network will be assigned to controllers
+          and zabbix-server only
+        label: Assign public network to all nodes
+        type: checkbox
+        value: false
+        weight: 10
+      metadata:
+        label: Public network assignment
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron'
+        weight: 50
+    storage:
+      ephemeral_ceph:
+        description: Configures Nova to store ephemeral volumes in RBD. This works
+          best if Ceph is enabled for volumes and images, too. Enables live migration
+          of all types of Ceph backed VMs (without this option, live migration will
+          only work with VMs launched from Cinder volumes).
+        label: Ceph RBD for ephemeral volumes (Nova)
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: checkbox
+        value: true
+        weight: 75
+      images_ceph:
+        description: Configures Glance to use the Ceph RBD backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: Ceph RBD for images (Glance)
+        type: checkbox
+        value: true
+        weight: 30
+      images_vcenter:
+        description: Configures Glance to use the vCenter/ESXi backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: VMWare vCenter/ESXi datastore for images (Glance)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter'
+        type: checkbox
+        value: false
+        weight: 35
+      iser:
+        description: 'High performance block storage: Cinder volumes over iSER protocol
+          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+          and will use a dedicated virtual function for the storage network.'
+        label: iSER protocol for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+          != 'kvm'
+        type: checkbox
+        value: false
+        weight: 11
+      metadata:
+        label: Storage
+        weight: 60
+      objects_ceph:
+        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+        label: Ceph RadosGW for objects (Swift API)
+        restrictions:
+        - settings:storage.images_ceph.value == false
+        type: checkbox
+        value: false
+        weight: 80
+      osd_pool_size:
+        description: Configures the default number of object replicas in Ceph. This
+          number must be equal to or lower than the number of deployed 'Storage -
+          Ceph OSD' nodes.
+        label: Ceph object replication factor
+        regex:
+          error: Invalid number
+          source: ^[1-9]\d*$
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: text
+        value: '2'
+        weight: 85
+      vc_datacenter:
+        description: Inventory path to a datacenter. If you want to use ESXi host
+          as datastore, it should be "ha-datacenter".
+        label: Datacenter name
+        regex:
+          error: Empty datacenter
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 65
+      vc_datastore:
+        description: Datastore associated with the datacenter.
+        label: Datastore name
+        regex:
+          error: Empty datastore
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 60
+      vc_host:
+        description: IP Address of vCenter/ESXi
+        label: vCenter/ESXi IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 45
+      vc_image_dir:
+        description: The name of the directory where the glance images will be stored
+          in the VMware datastore.
+        label: Datastore Images directory
+        regex:
+          error: Empty images directory
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: /openstack_glance
+        weight: 70
+      vc_password:
+        description: vCenter/ESXi admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: password
+        value: ''
+        weight: 55
+      vc_user:
+        description: vCenter/ESXi admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 50
+      volumes_ceph:
+        description: Configures Cinder to store volumes in Ceph RBD images.
+        label: Ceph RBD for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+          == 'vcenter'
+        type: checkbox
+        value: true
+        weight: 20
+      volumes_lvm:
+        description: Requires at least one Storage - Cinder LVM node.
+        label: Cinder LVM over iSCSI for volumes
+        restrictions:
+        - settings:storage.volumes_ceph.value == true
+        type: checkbox
+        value: false
+        weight: 10
+      volumes_vmdk:
+        description: Configures Cinder to store volumes via VMware vCenter.
+        label: VMware vCenter for volumes (Cinder)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+          == true
+        type: checkbox
+        value: false
+        weight: 15
+    syslog:
+      metadata:
+        label: Syslog
+        weight: 50
+      syslog_port:
+        description: Remote syslog port
+        label: Port
+        regex:
+          error: Invalid Syslog port
+          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+        type: text
+        value: '514'
+        weight: 20
+      syslog_server:
+        description: Remote syslog hostname
+        label: Hostname
+        type: text
+        value: ''
+        weight: 10
+      syslog_transport:
+        label: Syslog transport protocol
+        type: radio
+        value: tcp
+        values:
+        - data: udp
+          description: ''
+          label: UDP
+        - data: tcp
+          description: ''
+          label: TCP
+        weight: 30
+    vcenter:
+      cluster:
+        description: vCenter cluster name. If you have multiple clusters, use comma
+          to separate names
+        label: Cluster
+        regex:
+          error: Invalid cluster list
+          source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+        type: text
+        value: ''
+        weight: 40
+      datastore_regex:
+        description: The Datastore regexp setting specifies the data stores to use
+          with Compute. For example, "nas.*". If you want to use all available datastores,
+          leave this field blank
+        label: Datastore regexp
+        regex:
+          error: Invalid datastore regexp
+          source: ^(\S.*\S|\S|)$
+        type: text
+        value: ''
+        weight: 50
+      host_ip:
+        description: IP Address of vCenter
+        label: vCenter IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        type: text
+        value: ''
+        weight: 10
+      metadata:
+        label: vCenter
+        restrictions:
+        - action: hide
+          condition: settings:common.libvirt_type.value != 'vcenter'
+        weight: 20
+      use_vcenter:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 5
+      vc_password:
+        description: vCenter admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: admin
+        weight: 30
+      vc_user:
+        description: vCenter admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      vlan_interface:
+        description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+          vmnic1). If empty "vmnic0" is used by default
+        label: ESXi VLAN interface
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+            != 'VlanManager'
+        type: text
+        value: ''
+        weight: 60
+    zabbix:
+      metadata:
+        label: Zabbix Access
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 70
+      password:
+        description: Password for Zabbix Administrator
+        label: password
+        type: password
+        value: zabbix
+        weight: 20
+      username:
+        description: Username for Zabbix Administrator
+        label: username
+        type: text
+        value: admin
+        weight: 10
\ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml
new file mode 100644 (file)
index 0000000..5acd389
--- /dev/null
@@ -0,0 +1,49 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Fri May  8 08:03:49 UTC 2015
+comment: Config for LF Pod1
+
+# Adapter to use for this definition
+adapter: ipmi
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+  pxeMac: 00:25:b5:b0:00:ef
+  ipmiIp: 172.30.8.69
+  ipmiUser: admin
+  ipmiPass: octopus
+- id: 2
+  pxeMac: 00:25:b5:b0:00:cf
+  ipmiIp: 172.30.8.78
+  ipmiUser: admin
+  ipmiPass: octopus
+- id: 3
+  pxeMac: 00:25:b5:b0:00:8f
+  ipmiIp: 172.30.8.68
+  ipmiUser: admin
+  ipmiPass: octopus
+- id: 4
+  pxeMac: 00:25:b5:b0:00:6f
+  ipmiIp: 172.30.8.77
+  ipmiUser: admin
+  ipmiPass: octopus
+- id: 5
+  pxeMac: 00:25:b5:b0:00:4f
+  ipmiIp: 172.30.8.67
+  ipmiUser: admin
+  ipmiPass: octopus
+# Adding the Fuel node as node id 6 which may not be correct - please
+# adjust as needed.
+- id: 6
+  libvirtName: vFuel
+  libvirtTemplate: baremetal/vms/fuel_lf.xml
+  isFuel: yes
+  username: root
+  password: r00tme
+
+disks:
+  fuel: 30G
\ No newline at end of file
diff --git a/fuel/deploy/baremetal/dha.yaml b/fuel/deploy/baremetal/dha.yaml
deleted file mode 100644 (file)
index 6240f07..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version: 1.1
-created: Mon May  4 09:03:46 UTC 2015
-comment: Test environment Ericsson Montreal
-
-# Adapter to use for this definition
-adapter: ipmi
-
-# Node list.
-# Mandatory properties are id and role.
-# The MAC address of the PXE boot interface for Fuel is not
-# mandatory to be defined.
-# All other properties are adapter specific.
-
-nodes:
-- id: 1
-  pxeMac: 14:58:D0:54:7A:28
-  ipmiIp: 10.118.32.205
-  ipmiUser: username
-  ipmiPass: password
-- id: 2
-  pxeMac: 14:58:D0:55:E2:E0
-  ipmiIp: 10.118.32.202
-  ipmiUser: username
-  ipmiPass: password
-# Adding the Fuel node as node id 3 which may not be correct - please
-# adjust as needed.
-- id: 3
-  libvirtName: vFuel
-  libvirtTemplate: vFuel
-  isFuel: yes
-  username: root
-  password: r00tme
-
-# Deployment power on strategy
-# all:      Turn on all nodes at once. There will be no correlation
-#           between the DHA and DEA node numbering. MAC addresses
-#           will be used to select the node roles though.
-# sequence: Turn on the nodes in sequence starting with the lowest order
-#           node and wait for the node to be detected by Fuel. Not until
-#           the node has been detected and assigned a role will the next
-#           node be turned on.
-powerOnStrategy: sequence
-
-# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
-# calling the DHA adapter function "dha_fuelCustomInstall()"  with two
-# arguments: node ID and the ISO file name to deploy. The custom install
-# function is then to handle all necessary logic to boot the Fuel master
-# from the ISO and then return.
-# Allowed values: true, false
-fuelCustomInstall: true
-
similarity index 99%
rename from fuel/deploy/baremetal/vm/vFuel
rename to fuel/deploy/baremetal/vms/fuel.xml
index 1b4f4eb..9f1eeac 100644 (file)
@@ -1,5 +1,5 @@
 <domain type='kvm'>
-  <name>vFuel</name>
+  <name>fuel</name>
   <memory unit='KiB'>8290304</memory>
   <currentMemory unit='KiB'>8290304</currentMemory>
   <vcpu placement='static'>2</vcpu>
diff --git a/fuel/deploy/baremetal/vms/fuel_lf.xml b/fuel/deploy/baremetal/vms/fuel_lf.xml
new file mode 100644 (file)
index 0000000..2dd9738
--- /dev/null
@@ -0,0 +1,93 @@
+<domain type='kvm' id='62'>
+  <name>vFuel</name>
+  <memory unit='KiB'>8290304</memory>
+  <currentMemory unit='KiB'>8290304</currentMemory>
+  <vcpu placement='static'>4</vcpu>
+  <resource>
+    <partition>/machine</partition>
+  </resource>
+  <os>
+    <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>
+    <boot dev='cdrom'/>
+    <boot dev='hd'/>
+    <bootmenu enable='no'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <pae/>
+  </features>
+  <cpu mode='custom' match='exact'>
+    <model fallback='allow'>SandyBridge</model>
+  </cpu>
+  <clock offset='utc'>
+    <timer name='rtc' tickpolicy='catchup'/>
+    <timer name='pit' tickpolicy='delay'/>
+    <timer name='hpet' present='no'/>
+  </clock>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>restart</on_crash>
+  <pm>
+    <suspend-to-mem enabled='no'/>
+    <suspend-to-disk enabled='no'/>
+  </pm>
+  <devices>
+    <emulator>/usr/libexec/qemu-kvm</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='raw'/>
+      <source file='/home/opnfv/images/vFuel.raw'/>
+      <target dev='vda' bus='virtio'/>
+    </disk>
+    <disk type='block' device='cdrom'>
+      <driver name='qemu' type='raw'/>
+      <target dev='hdb' bus='ide'/>
+      <readonly/>
+    </disk>
+    <controller type='usb' index='0' model='ich9-ehci1'>
+    </controller>
+    <controller type='usb' index='0' model='ich9-uhci1'>
+      <master startport='0'/>
+    </controller>
+    <controller type='usb' index='0' model='ich9-uhci2'>
+      <master startport='2'/>
+    </controller>
+    <controller type='usb' index='0' model='ich9-uhci3'>
+      <master startport='4'/>
+    </controller>
+    <controller type='pci' index='0' model='pci-root'>
+    </controller>
+    <controller type='ide' index='0'>
+    </controller>
+    <controller type='virtio-serial' index='0'>
+    </controller>
+    <interface type='bridge'>
+      <source bridge='pxebr'/>
+      <model type='virtio'/>
+    </interface>
+    <serial type='pty'>
+      <source path='/dev/pts/0'/>
+      <target port='0'/>
+    </serial>
+    <console type='pty' tty='/dev/pts/0'>
+      <source path='/dev/pts/0'/>
+      <target type='serial' port='0'/>
+    </console>
+    <input type='mouse' bus='ps2'/>
+    <input type='keyboard' bus='ps2'/>
+    <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' keymap='sv'>
+      <listen type='address' address='127.0.0.1'/>
+    </graphics>
+    <sound model='ich6'>
+    </sound>
+    <video>
+      <model type='cirrus' vram='9216' heads='1'/>
+    </video>
+    <memballoon model='virtio'>
+    </memballoon>
+  </devices>
+  <seclabel type='dynamic' model='selinux' relabel='yes'>
+    <label>system_u:system_r:svirt_t:s0:c52,c932</label>
+    <imagelabel>system_u:object_r:svirt_image_t:s0:c52,c932</imagelabel>
+  </seclabel>
+</domain>
\ No newline at end of file
index 4d1315a..a2f2a10 100644 (file)
@@ -26,7 +26,7 @@ class ConfigureNodes(object):
         log('Configure nodes')
         for node_id, roles_blade in self.node_id_roles_dict.iteritems():
             exec_cmd('fuel node set --node-id %s --role %s --env %s'
-                     % (node_id, ','.join(roles_blade[0]), self.env_id))
+                     % (node_id, roles_blade[0], self.env_id))
 
         self.download_deployment_config()
         for node_id, roles_blade in self.node_id_roles_dict.iteritems():
@@ -37,8 +37,7 @@ class ConfigureNodes(object):
         self.upload_deployment_config()
 
     def modify_node_network_schemes(self, node_id, roles_blade):
-        log('Modify node network transformations in environment %s'
-            % self.env_id)
+        log('Modify network transformations for node %s' % node_id)
         type = self.dea.get_node_property(roles_blade[1], 'transformations')
         transformations = self.dea.get_transformations(type)
 
@@ -53,7 +52,6 @@ class ConfigureNodes(object):
             with io.open(node_file, 'w') as stream:
                yaml.dump(node, stream, default_flow_style=False)
 
-
     def download_deployment_config(self):
         log('Download deployment config for environment %s' % self.env_id)
         exec_cmd('fuel deployment --env %s --default --dir %s'
index c8714f8..c423834 100644 (file)
@@ -19,6 +19,8 @@ parse = common.parse
 err = common.err
 check_file_exists = common.check_file_exists
 log = common.log
+commafy = common.commafy
+ArgParser = common.ArgParser
 
 class Deploy(object):
 
@@ -57,8 +59,8 @@ class Deploy(object):
                 log('Deleting node %s' % node[N['id']])
                 exec_cmd('fuel node --node-id %s --delete-from-db'
                          % node[N['id']])
-                exec_cmd('dockerctl shell cobbler cobbler system remove '
-                         '--name node-%s' % node[N['id']])
+                exec_cmd('cobbler system remove --name node-%s'
+                         % node[N['id']], False)
 
     def check_previous_installation(self):
         log('Check previous installation')
@@ -120,7 +122,7 @@ class Deploy(object):
                     self.node_ids_dict[blade] = node[N['id']]
 
     def discovery_waiting_loop(self, discovered_macs):
-        WAIT_LOOP = 180
+        WAIT_LOOP = 320
         SLEEP_TIME = 10
         all_discovered = False
         for i in range(WAIT_LOOP):
@@ -147,13 +149,8 @@ class Deploy(object):
     def assign_roles_to_cluster_node_ids(self):
         self.node_id_roles_dict = {}
         for blade, node_id in self.node_ids_dict.iteritems():
-            role_list = []
-            role = self.dea.get_node_role(blade)
-            if role == 'controller':
-                role_list.extend(['controller', 'mongo'])
-            elif role == 'compute':
-                role_list.extend(['compute'])
-            self.node_id_roles_dict[node_id] = (role_list, blade)
+            roles = commafy(self.dea.get_node_role(blade))
+            self.node_id_roles_dict[node_id] = (roles, blade)
 
     def configure_environment(self):
         config_env = ConfigureEnvironment(self.dea, YAML_CONF_DIR,
@@ -175,25 +172,16 @@ class Deploy(object):
         self.configure_environment()
         self.deploy_cloud()
 
-def usage():
-    print '''
-    Usage:
-    python deploy.py <dea_file> <macs_file>
-
-    Example:
-            python deploy.py dea.yaml macs.yaml
-    '''
-
 def parse_arguments():
-    if len(sys.argv) != 3:
-        log('Incorrect number of arguments')
-        usage()
-        sys.exit(1)
-    dea_file = sys.argv[-2]
-    macs_file = sys.argv[-1]
-    check_file_exists(dea_file)
-    check_file_exists(macs_file)
-    return dea_file, macs_file
+    parser = ArgParser(prog='python %s' % __file__)
+    parser.add_argument('dea_file', action='store',
+                        help='Deployment Environment Adapter: dea.yaml')
+    parser.add_argument('macs_file', action='store',
+                        help='Blade MAC addresses: macs.yaml')
+    args = parser.parse_args()
+    check_file_exists(args.dea_file)
+    check_file_exists(args.macs_file)
+    return (args.dea_file, args.macs_file)
 
 def main():
 
index cf56c36..0054c5b 100644 (file)
@@ -31,7 +31,7 @@ class Deployment(object):
                          % (self.yaml_config_dir, self.env_id)
         if os.path.exists(deployment_dir):
             shutil.rmtree(deployment_dir)
-        exec_cmd('fuel --env %s deployment --default --dir %s'
+        exec_cmd('fuel deployment --env %s --download --dir %s'
                  % (self.env_id, self.yaml_config_dir))
 
     def upload_deployment_info(self):
@@ -75,7 +75,8 @@ class Deployment(object):
             if env[0][E['status']] == 'operational':
                 ready = True
                 break
-            elif env[0][E['status']] == 'error':
+            elif (env[0][E['status']] == 'error'
+                  or env[0][E['status']] == 'stopped'):
                 break
             else:
                 time.sleep(SLEEP_TIME)
@@ -102,10 +103,9 @@ class Deployment(object):
 
     def health_check(self):
         log('Now running sanity and smoke health checks')
-        exec_cmd('fuel health --env %s --check sanity,smoke --force'
-                 % self.env_id)
-        log('Health checks passed !')
-
+        log(exec_cmd('fuel health --env %s --check sanity,smoke --force'
+                     % self.env_id))
+        
     def deploy(self):
         self.config_opnfv()
         self.run_deploy()
index 6dbda67..dc12637 100644 (file)
@@ -2,6 +2,7 @@ import subprocess
 import sys
 import os
 import logging
+import argparse
 
 N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5,
      'roles': 6, 'pending_roles': 7, 'online': 8}
@@ -73,6 +74,19 @@ def check_dir_exists(dir_path):
     if not os.path.isdir(dir_path):
         err('ERROR: Directory %s not found\n' % dir_path)
 
+def create_dir_if_not_exists(dir_path):
+    if not os.path.isdir(dir_path):
+        log('Creating directory %s' % dir_path)
+        os.makedirs(dir_path)
+
+def commafy(comma_separated_list):
+    l = [c.strip() for c in comma_separated_list.split(',')]
+    return ','.join(l)
+
+def delete_file(file):
+    if os.path.exists(file):
+        os.remove(file)
+
 def check_if_root():
     r = exec_cmd('whoami')
     if r != 'root':
@@ -80,3 +94,10 @@ def check_if_root():
 
 def log(message):
     LOG.debug('%s\n' % message)
+
+class ArgParser(argparse.ArgumentParser):
+    def error(self, message):
+        sys.stderr.write('ERROR: %s\n' % message)
+        self.print_help()
+        sys.exit(2)
+
index 8066b6a..61ebea3 100644 (file)
@@ -48,6 +48,8 @@ class DeploymentEnvironmentAdapter(object):
                 return node[property_name]
 
     def get_node_role(self, node_id):
+        role_list = []
+
         return self.get_node_property(node_id, 'role')
 
     def get_node_ids(self):
index 9d1a3d2..33c6f9f 100644 (file)
@@ -1,33 +1,39 @@
-import sys
 import os
 import shutil
 import io
 import re
+import sys
 import netaddr
+import uuid
+import yaml
 
 from dea import DeploymentEnvironmentAdapter
 from dha import DeploymentHardwareAdapter
 from install_fuel_master import InstallFuelMaster
 from deploy_env import CloudDeploy
+from setup_execution_environment import ExecutionEnvironment
 import common
 
 log = common.log
 exec_cmd = common.exec_cmd
 err = common.err
 check_file_exists = common.check_file_exists
+check_dir_exists = common.check_dir_exists
+create_dir_if_not_exists = common.create_dir_if_not_exists
 check_if_root = common.check_if_root
+ArgParser = common.ArgParser
 
 FUEL_VM = 'fuel'
-TMP_DIR = '%s/fueltmp' % os.getenv('HOME')
 PATCH_DIR = 'fuel_patch'
 WORK_DIR = 'deploy'
+CWD = os.getcwd()
 
 class cd:
     def __init__(self, new_path):
         self.new_path = os.path.expanduser(new_path)
 
     def __enter__(self):
-        self.saved_path = os.getcwd()
+        self.saved_path = CWD
         os.chdir(self.new_path)
 
     def __exit__(self, etype, value, traceback):
@@ -36,8 +42,11 @@ class cd:
 
 class AutoDeploy(object):
 
-    def __init__(self, without_fuel, iso_file, dea_file, dha_file):
+    def __init__(self, without_fuel, storage_dir, pxe_bridge, iso_file,
+                 dea_file, dha_file):
         self.without_fuel = without_fuel
+        self.storage_dir = storage_dir
+        self.pxe_bridge = pxe_bridge
         self.iso_file = iso_file
         self.dea_file = dea_file
         self.dha_file = dha_file
@@ -45,22 +54,8 @@ class AutoDeploy(object):
         self.dha = DeploymentHardwareAdapter(dha_file)
         self.fuel_conf = {}
         self.fuel_node_id = self.dha.get_fuel_node_id()
-        self.fuel_custom = self.dha.use_fuel_custom_install()
         self.fuel_username, self.fuel_password = self.dha.get_fuel_access()
-
-    def setup_dir(self, dir):
-        self.cleanup_dir(dir)
-        os.makedirs(dir)
-
-    def cleanup_dir(self, dir):
-        if os.path.isdir(dir):
-            shutil.rmtree(dir)
-
-    def power_off_blades(self):
-        node_ids = self.dha.get_all_node_ids()
-        node_ids = list(set(node_ids) - set([self.fuel_node_id]))
-        for node_id in node_ids:
-            self.dha.node_power_off(node_id)
+        self.tmp_dir = None
 
     def modify_ip(self, ip_addr, index, val):
         ip_str = str(netaddr.IPAddress(ip_addr))
@@ -77,11 +72,9 @@ class AutoDeploy(object):
         self.fuel_conf['showmenu'] = 'yes'
 
     def install_fuel_master(self):
-        if self.without_fuel:
-            log('Not Installing Fuel Master')
-            return
         log('Install Fuel Master')
-        new_iso = '%s/deploy-%s' % (TMP_DIR, os.path.basename(self.iso_file))
+        new_iso = '%s/deploy-%s' \
+                  % (self.tmp_dir, os.path.basename(self.iso_file))
         self.patch_iso(new_iso)
         self.iso_file = new_iso
         self.install_iso()
@@ -91,23 +84,18 @@ class AutoDeploy(object):
                                  self.fuel_conf['ip'], self.fuel_username,
                                  self.fuel_password, self.fuel_node_id,
                                  self.iso_file, WORK_DIR)
-        if self.fuel_custom:
-            log('Custom Fuel install')
-            fuel.custom_install()
-        else:
-            log('Ordinary Fuel install')
-            fuel.install()
+        fuel.install()
 
     def patch_iso(self, new_iso):
-        tmp_orig_dir = '%s/origiso' % TMP_DIR
-        tmp_new_dir = '%s/newiso' % TMP_DIR
+        tmp_orig_dir = '%s/origiso' % self.tmp_dir
+        tmp_new_dir = '%s/newiso' % self.tmp_dir
         self.copy(tmp_orig_dir, tmp_new_dir)
         self.patch(tmp_new_dir, new_iso)
 
     def copy(self, tmp_orig_dir, tmp_new_dir):
         log('Copying...')
-        self.setup_dir(tmp_orig_dir)
-        self.setup_dir(tmp_new_dir)
+        os.makedirs(tmp_orig_dir)
+        os.makedirs(tmp_new_dir)
         exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir))
         with cd(tmp_orig_dir):
             exec_cmd('find . | cpio -pd %s' % tmp_new_dir)
@@ -118,7 +106,7 @@ class AutoDeploy(object):
 
     def patch(self, tmp_new_dir, new_iso):
         log('Patching...')
-        patch_dir = '%s/%s' % (os.getcwd(), PATCH_DIR)
+        patch_dir = '%s/%s' % (CWD, PATCH_DIR)
         ks_path = '%s/ks.cfg.patch' % patch_dir
 
         with cd(tmp_new_dir):
@@ -151,49 +139,83 @@ class AutoDeploy(object):
     def deploy_env(self):
         dep = CloudDeploy(self.dha, self.fuel_conf['ip'], self.fuel_username,
                           self.fuel_password, self.dea_file, WORK_DIR)
-        dep.deploy()
+        return dep.deploy()
+
+    def setup_execution_environment(self):
+        exec_env = ExecutionEnvironment(self.storage_dir, self.pxe_bridge,
+                                        self.dha_file, self.dea)
+        exec_env.setup_environment()
+
+    def create_tmp_dir(self):
+        self.tmp_dir = '%s/fueltmp-%s' % (CWD, str(uuid.uuid1()))
+        os.makedirs(self.tmp_dir)
 
     def deploy(self):
         check_if_root()
-        self.setup_dir(TMP_DIR)
         self.collect_fuel_info()
-        self.power_off_blades()
-        self.install_fuel_master()
-        self.cleanup_dir(TMP_DIR)
-        self.deploy_env()
-
-def usage():
-    print '''
-    Usage:
-    python deploy.py [-nf] <isofile> <deafile> <dhafile>
-
-    Optional arguments:
-      -nf   Do not install Fuel master
-    '''
+        if not self.without_fuel:
+            self.setup_execution_environment()
+            self.create_tmp_dir()
+            self.install_fuel_master()
+            shutil.rmtree(self.tmp_dir)
+        return self.deploy_env()
+
+def check_bridge(pxe_bridge, dha_path):
+    with io.open(dha_path) as yaml_file:
+        dha_struct = yaml.load(yaml_file)
+    if dha_struct['adapter'] != 'libvirt':
+        log('Using Linux Bridge %s for booting up the Fuel Master VM'
+            % pxe_bridge)
+        r = exec_cmd('ip link show %s' % pxe_bridge)
+        if pxe_bridge in r and 'state UP' not in r:
+            err('Linux Bridge {0} is not Active, '
+                'bring it UP first: [ip link set dev {0} up]' % pxe_bridge)
 
 def parse_arguments():
-    if (len(sys.argv) < 4 or len(sys.argv) > 5
-        or (len(sys.argv) == 5 and sys.argv[1] != '-nf')):
-        log('Incorrect number of arguments')
-        usage()
-        sys.exit(1)
-    without_fuel = False
-    if len(sys.argv) == 5 and sys.argv[1] == '-nf':
-        without_fuel = True
-    iso_file = sys.argv[-3]
-    dea_file = sys.argv[-2]
-    dha_file = sys.argv[-1]
-    check_file_exists(iso_file)
-    check_file_exists(dea_file)
-    check_file_exists(dha_file)
-    return (without_fuel, iso_file, dea_file, dha_file)
+    parser = ArgParser(prog='python %s' % __file__)
+    parser.add_argument('-nf', dest='without_fuel', action='store_true',
+                        default=False,
+                        help='Do not install Fuel Master (and Node VMs when '
+                             'using libvirt)')
+    parser.add_argument('iso_file', nargs='?', action='store',
+                        default='%s/OPNFV.iso' % CWD,
+                        help='ISO File [default: OPNFV.iso]')
+    parser.add_argument('dea_file', action='store',
+                        help='Deployment Environment Adapter: dea.yaml')
+    parser.add_argument('dha_file', action='store',
+                        help='Deployment Hardware Adapter: dha.yaml')
+    parser.add_argument('-s', dest='storage_dir', action='store',
+                        default='%s/images' % CWD,
+                        help='Storage Directory [default: images]')
+    parser.add_argument('-b', dest='pxe_bridge', action='store',
+                        default='pxebr',
+                        help='Linux Bridge for booting up the Fuel Master VM '
+                             '[default: pxebr]')
+
+    args = parser.parse_args()
+    log(args)
+
+    check_file_exists(args.dea_file)
+    check_file_exists(args.dha_file)
+
+    if not args.without_fuel:
+        log('Using OPNFV ISO file: %s' % args.iso_file)
+        check_file_exists(args.iso_file)
+        log('Using image directory: %s' % args.storage_dir)
+        create_dir_if_not_exists(args.storage_dir)
+        check_bridge(args.pxe_bridge, args.dha_file)
+
+    return (args.without_fuel, args.storage_dir, args.pxe_bridge,
+            args.iso_file, args.dea_file, args.dha_file)
 
-def main():
 
-    without_fuel, iso_file, dea_file, dha_file = parse_arguments()
+def main():
+    without_fuel, storage_dir, pxe_bridge, iso_file, dea_file, dha_file = \
+        parse_arguments()
 
-    d = AutoDeploy(without_fuel, iso_file, dea_file, dha_file)
-    d.deploy()
+    d = AutoDeploy(without_fuel, storage_dir, pxe_bridge, iso_file,
+                   dea_file, dha_file)
+    sys.exit(d.deploy())
 
 if __name__ == '__main__':
     main()
\ No newline at end of file
index 9bc8fbb..084f37e 100644 (file)
@@ -53,7 +53,7 @@ class CloudDeploy(object):
 
     def set_boot_order(self, boot_order_list):
         for node_id in self.node_ids:
-            self.dha.node_set_boot_order(node_id, boot_order_list)
+            self.dha.node_set_boot_order(node_id, boot_order_list[:])
 
     def get_mac_addresses(self):
         macs_per_node = {}
@@ -67,8 +67,10 @@ class CloudDeploy(object):
         deploy_app = '%s/%s' % (self.work_dir, deploy_app)
         dea_file = '%s/%s' % (self.work_dir, os.path.basename(self.dea_file))
         macs_file = '%s/%s' % (self.work_dir, os.path.basename(self.macs_file))
-        with self.ssh:
-            self.ssh.run('python %s %s %s' % (deploy_app, dea_file, macs_file))
+        with self.ssh as s:
+            status = s.run('python %s %s %s'
+                           % (deploy_app, dea_file, macs_file))
+        return status
 
     def deploy(self):
 
@@ -84,4 +86,4 @@ class CloudDeploy(object):
 
         self.upload_cloud_deployment_files()
 
-        self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
+        return self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
index 884e9ce..a8d0121 100644 (file)
@@ -34,18 +34,15 @@ class HardwareAdapter(object):
         node_ids.sort()
         return node_ids
 
-    def use_fuel_custom_install(self):
-        return self.dha_struct['fuelCustomInstall']
-
     def get_node_property(self, node_id, property_name):
         for node in self.dha_struct['nodes']:
             if node['id'] == node_id and property_name in node:
                 return node[property_name]
 
-    def node_can_zero_mbr(self, node_id):
-        return self.get_node_property(node_id, 'nodeCanZeroMBR')
-
     def get_fuel_access(self):
         for node in self.dha_struct['nodes']:
             if 'isFuel' in node and node['isFuel']:
                 return node['username'], node['password']
+
+    def get_disks(self):
+        return self.dha_struct['disks']
\ No newline at end of file
index 8fc38ad..8cfec34 100644 (file)
@@ -19,7 +19,7 @@ class HpAdapter(IpmiAdapter):
         log('Set boot order %s on Node %s' % (boot_order_list, node_id))
         ip, username, password = self.get_access_info(node_id)
         ssh = SSHClient(ip, username, password)
-        for order, dev in enumerate(boot_order_list):
-            with ssh as s:
+        with ssh as s:
+            for order, dev in enumerate(boot_order_list):
                 s.exec_cmd('set %s/%s bootorder=%s'
                            % (ROOT, DEV[dev], order+1))
index d97fd2d..1bef898 100644 (file)
@@ -1,8 +1,10 @@
 import common
+import time
 from hardware_adapter import HardwareAdapter
 
 log = common.log
 exec_cmd = common.exec_cmd
+err = common.err
 
 class IpmiAdapter(HardwareAdapter):
 
@@ -27,28 +29,72 @@ class IpmiAdapter(HardwareAdapter):
         return mac_list
 
     def node_power_on(self, node_id):
+        WAIT_LOOP = 200
+        SLEEP_TIME = 3
         log('Power ON Node %s' % node_id)
         cmd_prefix = self.ipmi_cmd(node_id)
         state = exec_cmd('%s chassis power status' % cmd_prefix)
         if state == 'Chassis Power is off':
             exec_cmd('%s chassis power on' % cmd_prefix)
+            done = False
+            for i in range(WAIT_LOOP):
+                state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
+                                    False)
+                if state == 'Chassis Power is on':
+                    done = True
+                    break
+                else:
+                    time.sleep(SLEEP_TIME)
+            if not done:
+                err('Could Not Power ON Node %s' % node_id)
 
     def node_power_off(self, node_id):
+        WAIT_LOOP = 200
+        SLEEP_TIME = 3
         log('Power OFF Node %s' % node_id)
         cmd_prefix = self.ipmi_cmd(node_id)
         state = exec_cmd('%s chassis power status' % cmd_prefix)
         if state == 'Chassis Power is on':
+            done = False
             exec_cmd('%s chassis power off' % cmd_prefix)
+            for i in range(WAIT_LOOP):
+                state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
+                                    False)
+                if state == 'Chassis Power is off':
+                    done = True
+                    break
+                else:
+                    time.sleep(SLEEP_TIME)
+            if not done:
+                err('Could Not Power OFF Node %s' % node_id)
 
     def node_reset(self, node_id):
-        log('Reset Node %s' % node_id)
+        WAIT_LOOP = 600
+        log('RESET Node %s' % node_id)
         cmd_prefix = self.ipmi_cmd(node_id)
         state = exec_cmd('%s chassis power status' % cmd_prefix)
         if state == 'Chassis Power is on':
+            was_shut_off = False
+            done = False
             exec_cmd('%s chassis power reset' % cmd_prefix)
+            for i in range(WAIT_LOOP):
+                state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
+                                    False)
+                if state == 'Chassis Power is off':
+                    was_shut_off = True
+                elif state == 'Chassis Power is on' and was_shut_off:
+                    done = True
+                    break
+                time.sleep(1)
+            if not done:
+                err('Could Not RESET Node %s' % node_id)
+        else:
+            err('Cannot RESET Node %s because it\'s not Active, state: %s'
+                % (node_id, state))
 
     def node_set_boot_order(self, node_id, boot_order_list):
         log('Set boot order %s on Node %s' % (boot_order_list, node_id))
+        boot_order_list.reverse()
         cmd_prefix = self.ipmi_cmd(node_id)
         for dev in boot_order_list:
             if dev == 'pxe':
@@ -58,4 +104,4 @@ class IpmiAdapter(HardwareAdapter):
                 exec_cmd('%s chassis bootdev cdrom' % cmd_prefix)
             elif dev == 'disk':
                 exec_cmd('%s chassis bootdev disk options=persistent'
-                         % cmd_prefix)
+                         % cmd_prefix)
\ No newline at end of file
index dde4946..1eca548 100644 (file)
@@ -96,12 +96,6 @@ class LibvirtAdapter(HardwareAdapter):
         exec_cmd('virsh change-media %s --insert %s %s'
                  % (vm_name, device, iso_file))
 
-    def get_disks(self):
-        return self.dha_struct['disks']
-
-    def get_node_role(self, node_id):
-        return self.get_node_property(node_id, 'role')
-
     def get_node_pxe_mac(self, node_id):
         mac_list = []
         vm_name = self.get_node_property(node_id, 'libvirtName')
diff --git a/fuel/deploy/environments/__init__.py b/fuel/deploy/environments/__init__.py
new file mode 100644 (file)
index 0000000..c274feb
--- /dev/null
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/environments/execution_environment.py b/fuel/deploy/environments/execution_environment.py
new file mode 100644 (file)
index 0000000..4f612a6
--- /dev/null
@@ -0,0 +1,67 @@
+from lxml import etree
+
+import common
+from dha_adapters.libvirt_adapter import LibvirtAdapter
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+class ExecutionEnvironment(object):
+
+    def __init__(self, storage_dir, dha_file, root_dir):
+        self.storage_dir = storage_dir
+        self.dha = LibvirtAdapter(dha_file)
+        self.root_dir = root_dir
+        self.parser = etree.XMLParser(remove_blank_text=True)
+        self.fuel_node_id = self.dha.get_fuel_node_id()
+
+    def delete_vm(self, node_id):
+        vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+        r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
+        if c:
+            return
+        self.undefine_vm_delete_disk(r, vm_name)
+
+    def undefine_vm_delete_disk(self, printout, vm_name):
+        disk_files = []
+        xml_dump = etree.fromstring(printout, self.parser)
+        disks = xml_dump.xpath('/domain/devices/disk')
+        for disk in disks:
+            sources = disk.xpath('source')
+            for source in sources:
+                source_file = source.get('file')
+                if source_file:
+                    disk_files.append(source_file)
+        log('Deleting VM %s with disks %s' % (vm_name, disk_files))
+        exec_cmd('virsh destroy %s' % vm_name, False)
+        exec_cmd('virsh undefine %s' % vm_name, False)
+        for file in disk_files:
+            exec_cmd('rm -f %s' % file)
+
+    def define_vm(self, vm_name, temp_vm_file, disk_path):
+        log('Creating VM %s with disks %s' % (vm_name, disk_path))
+        with open(temp_vm_file) as f:
+            vm_xml = etree.parse(f)
+        names = vm_xml.xpath('/domain/name')
+        for name in names:
+            name.text = vm_name
+        uuids = vm_xml.xpath('/domain/uuid')
+        for uuid in uuids:
+            uuid.getparent().remove(uuid)
+        disks = vm_xml.xpath('/domain/devices/disk')
+        for disk in disks:
+            if (disk.get('type') == 'file'
+                and disk.get('device') == 'disk'):
+                sources = disk.xpath('source')
+                for source in sources:
+                    disk.remove(source)
+                source = etree.Element('source')
+                source.set('file', disk_path)
+                disk.append(source)
+        with open(temp_vm_file, 'w') as f:
+            vm_xml.write(f, pretty_print=True, xml_declaration=True)
+        exec_cmd('virsh define %s' % temp_vm_file)
\ No newline at end of file
diff --git a/fuel/deploy/environments/libvirt_environment.py b/fuel/deploy/environments/libvirt_environment.py
new file mode 100644 (file)
index 0000000..e156fd2
--- /dev/null
@@ -0,0 +1,93 @@
+from lxml import etree
+import glob
+
+import common
+from execution_environment import ExecutionEnvironment
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+NET_DIR = 'libvirt/networks'
+
+class LibvirtEnvironment(ExecutionEnvironment):
+
+    def __init__(self, storage_dir, dha_file, dea, root_dir):
+        super(LibvirtEnvironment, self).__init__(
+            storage_dir, dha_file, root_dir)
+        self.dea = dea
+        self.network_dir = '%s/%s' % (self.root_dir, NET_DIR)
+        self.node_ids = self.dha.get_all_node_ids()
+        self.net_names = self.collect_net_names()
+
+    def create_storage(self, node_id, disk_path, disk_sizes):
+        if node_id == self.fuel_node_id:
+           disk_size = disk_sizes['fuel']
+        else:
+           roles = self.dea.get_node_role(node_id)
+           role = 'controller' if 'controller' in roles else 'compute'
+           disk_size = disk_sizes[role]
+        exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
+
+    def create_vms(self):
+        temp_dir = exec_cmd('mktemp -d')
+        disk_sizes = self.dha.get_disks()
+        for node_id in self.node_ids:
+            vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+            vm_template = '%s/%s' % (self.root_dir,
+                                     self.dha.get_node_property(
+                                         node_id, 'libvirtTemplate'))
+            check_file_exists(vm_template)
+            disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+            self.create_storage(node_id, disk_path, disk_sizes)
+            temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+            exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
+            self.define_vm(vm_name, temp_vm_file, disk_path)
+        exec_cmd('rm -fr %s' % temp_dir)
+
+    def create_networks(self):
+        for net_file in glob.glob('%s/*' % self.network_dir):
+            exec_cmd('virsh net-define %s' % net_file)
+        for net in self.net_names:
+            log('Creating network %s' % net)
+            exec_cmd('virsh net-autostart %s' % net)
+            exec_cmd('virsh net-start %s' % net)
+
+    def delete_networks(self):
+        for net in self.net_names:
+            log('Deleting network %s' % net)
+            exec_cmd('virsh net-destroy %s' % net, False)
+            exec_cmd('virsh net-undefine %s' % net, False)
+
+    def get_net_name(self, net_file):
+        with open(net_file) as f:
+            net_xml = etree.parse(f)
+            name_list = net_xml.xpath('/network/name')
+            for name in name_list:
+                net_name = name.text
+        return net_name
+
+    def collect_net_names(self):
+        net_list = []
+        for net_file in glob.glob('%s/*' % self.network_dir):
+            name = self.get_net_name(net_file)
+            net_list.append(name)
+        return net_list
+
+    def delete_vms(self):
+        for node_id in self.node_ids:
+            self.delete_vm(node_id)
+
+    def setup_environment(self):
+        check_if_root()
+        check_dir_exists(self.network_dir)
+        self.cleanup_environment()
+        self.create_vms()
+        self.create_networks()
+
+    def cleanup_environment(self):
+        self.delete_vms()
+        self.delete_networks()
diff --git a/fuel/deploy/environments/virtual_fuel.py b/fuel/deploy/environments/virtual_fuel.py
new file mode 100644 (file)
index 0000000..1f939f0
--- /dev/null
@@ -0,0 +1,60 @@
+from lxml import etree
+
+import common
+from execution_environment import ExecutionEnvironment
+
+exec_cmd = common.exec_cmd
+log = common.log
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+class VirtualFuel(ExecutionEnvironment):
+
+    def __init__(self, storage_dir, pxe_bridge, dha_file, root_dir):
+        super(VirtualFuel, self).__init__(
+            storage_dir, dha_file, root_dir)
+        self.pxe_bridge = pxe_bridge
+
+    def set_vm_nic(self, temp_vm_file):
+        with open(temp_vm_file) as f:
+            vm_xml = etree.parse(f)
+        interfaces = vm_xml.xpath('/domain/devices/interface')
+        for interface in interfaces:
+            interface.getparent().remove(interface)
+        interface = etree.Element('interface')
+        interface.set('type', 'bridge')
+        source = etree.SubElement(interface, 'source')
+        source.set('bridge', self.pxe_bridge)
+        model = etree.SubElement(interface, 'model')
+        model.set('type', 'virtio')
+        devices = vm_xml.xpath('/domain/devices')
+        if devices:
+            device = devices[0]
+            device.append(interface)
+        with open(temp_vm_file, 'w') as f:
+            vm_xml.write(f, pretty_print=True, xml_declaration=True)
+
+    def create_vm(self):
+        temp_dir = exec_cmd('mktemp -d')
+        vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
+        vm_template = '%s/%s' % (self.root_dir,
+                                 self.dha.get_node_property(
+                                     self.fuel_node_id, 'libvirtTemplate'))
+        check_file_exists(vm_template)
+        disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+        disk_sizes = self.dha.get_disks()
+        disk_size = disk_sizes['fuel']
+        exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
+        temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+        exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
+        self.set_vm_nic(temp_vm_file)
+        self.define_vm(vm_name, temp_vm_file, disk_path)
+        exec_cmd('rm -fr %s' % temp_dir)
+
+    def setup_environment(self):
+        check_if_root()
+        self.cleanup_environment()
+        self.create_vm()
+
+    def cleanup_environment(self):
+        self.delete_vm(self.fuel_node_id)
index bb8e7e1..b9b7809 100644 (file)
@@ -32,15 +32,6 @@ class InstallFuelMaster(object):
 
         self.dha.node_power_off(self.fuel_node_id)
 
-        self.zero_mbr_set_boot_order()
-
-        self.proceed_with_installation()
-
-    def custom_install(self):
-        log('Start Custom Fuel Installation')
-
-        self.dha.node_power_off(self.fuel_node_id)
-
         log('Zero the MBR')
         self.dha.node_zero_mbr(self.fuel_node_id)
 
@@ -68,7 +59,7 @@ class InstallFuelMaster(object):
 
         log('Let the Fuel deployment continue')
         log('Found FUEL menu as PID %s, now killing it' % fuel_menu_pid)
-        self.ssh_exec_cmd('kill %s' % fuel_menu_pid)
+        self.ssh_exec_cmd('kill %s' % fuel_menu_pid, False)
 
         log('Wait until installation complete')
         self.wait_until_installation_completed()
@@ -81,18 +72,6 @@ class InstallFuelMaster(object):
 
         log('Fuel Master installed successfully !')
 
-    def zero_mbr_set_boot_order(self):
-        if self.dha.node_can_zero_mbr(self.fuel_node_id):
-            log('Fuel Node %s capable of zeroing MBR so doing that...'
-                % self.fuel_node_id)
-            self.dha.node_zero_mbr(self.fuel_node_id)
-            self.dha.node_set_boot_order(self.fuel_node_id, ['disk', 'iso'])
-        elif self.dha.node_can_set_boot_order_live(self.fuel_node_id):
-            log('Node %s can change ISO boot order live' % self.fuel_node_id)
-            self.dha.node_set_boot_order(self.fuel_node_id, ['iso', 'disk'])
-        else:
-            err('No way to install Fuel node')
-
     def wait_for_node_up(self):
         WAIT_LOOP = 60
         SLEEP_TIME = 10
@@ -103,8 +82,8 @@ class InstallFuelMaster(object):
                 success = True
                 break
             except Exception as e:
-                log('EXCEPTION [%s] received when SSH-ing into Fuel VM %s ... '
-                    'sleeping %s seconds' % (e, self.fuel_ip, SLEEP_TIME))
+                log('Trying to SSH into Fuel VM %s ... sleeping %s seconds'
+                    % (self.fuel_ip, SLEEP_TIME))
                 time.sleep(SLEEP_TIME)
             finally:
                 self.ssh.close()
@@ -138,9 +117,9 @@ class InstallFuelMaster(object):
                 break
         return fuel_menu_pid
 
-    def ssh_exec_cmd(self, cmd):
+    def ssh_exec_cmd(self, cmd, check=True):
         with self.ssh:
-            ret = self.ssh.exec_cmd(cmd)
+            ret = self.ssh.exec_cmd(cmd, check=check)
         return ret
 
     def inject_own_astute_yaml(self):
@@ -159,7 +138,7 @@ class InstallFuelMaster(object):
                      self.work_dir, os.path.basename(self.dea_file)))
 
     def wait_until_installation_completed(self):
-        WAIT_LOOP = 180
+        WAIT_LOOP = 360
         SLEEP_TIME = 10
         CMD = 'ps -ef | grep %s | grep -v grep' % BOOTSTRAP_ADMIN
 
diff --git a/fuel/deploy/libvirt/conf/ha/dea.yaml b/fuel/deploy/libvirt/conf/ha/dea.yaml
new file mode 100644 (file)
index 0000000..907bf90
--- /dev/null
@@ -0,0 +1,976 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+environment_name: opnfv_virt
+environment_mode: ha
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 2
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 3
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
+- id: 4
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 5
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+- id: 6
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
+fuel:
+  ADMIN_NETWORK:
+    ipaddress: 10.20.0.2
+    netmask: 255.255.255.0
+    dhcp_pool_start: 10.20.0.3
+    dhcp_pool_end: 10.20.0.254
+  DNS_UPSTREAM: 8.8.8.8
+  DNS_DOMAIN: domain.tld
+  DNS_SEARCH: domain.tld
+  FUEL_ACCESS:
+    user: admin
+    password: admin
+  HOSTNAME: opnfv_virt
+  NTP1: 0.pool.ntp.org
+  NTP2: 1.pool.ntp.org
+  NTP3: 2.pool.ntp.org
+interfaces:
+  interfaces_1:
+    eth0:
+    - fuelweb_admin
+    - management
+    eth1:
+    - storage
+    eth2:
+    - private
+    eth3:
+    - public
+transformations:
+  transformations_1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-ex
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 102
+      - 0
+      vlan_ids:
+      - 102
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-mgmt
+      tags:
+      - 101
+      - 0
+      vlan_ids:
+      - 101
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth3
+      - br-ex
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+  transformations_2:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 102
+      - 0
+      vlan_ids:
+      - 102
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-mgmt
+      tags:
+      - 101
+      - 0
+      vlan_ids:
+      - 101
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+opnfv:
+  compute: {}
+  controller: {}
+network:
+  networking_parameters:
+    base_mac: fa:16:3e:00:00:00
+    dns_nameservers:
+    - 8.8.4.4
+    - 8.8.8.8
+    floating_ranges:
+    - - 172.16.0.130
+      - 172.16.0.254
+    gre_id_range:
+    - 2
+    - 65535
+    internal_cidr: 192.168.111.0/24
+    internal_gateway: 192.168.111.1
+    net_l23_provider: ovs
+    segmentation_type: vlan
+    vlan_range:
+    - 1000
+    - 1030
+  networks:
+  - cidr: 172.16.0.0/24
+    gateway: 172.16.0.1
+    ip_ranges:
+    - - 172.16.0.2
+      - 172.16.0.126
+    meta:
+      assign_vip: true
+      cidr: 172.16.0.0/24
+      configurable: true
+      floating_range_var: floating_ranges
+      ip_range:
+      - 172.16.0.2
+      - 172.16.0.126
+      map_priority: 1
+      name: public
+      notation: ip_ranges
+      render_addr_mask: public
+      render_type: null
+      use_gateway: true
+      vlan_start: null
+    name: public
+    vlan_start: null
+  - cidr: 192.168.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.0.1
+      - 192.168.0.254
+    meta:
+      assign_vip: true
+      cidr: 192.168.0.0/24
+      configurable: true
+      map_priority: 2
+      name: management
+      notation: cidr
+      render_addr_mask: internal
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 101
+    name: management
+    vlan_start: 101
+  - cidr: 192.168.1.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.1.1
+      - 192.168.1.254
+    meta:
+      assign_vip: false
+      cidr: 192.168.1.0/24
+      configurable: true
+      map_priority: 2
+      name: storage
+      notation: cidr
+      render_addr_mask: storage
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 102
+    name: storage
+    vlan_start: 102
+  - cidr: null
+    gateway: null
+    ip_ranges: []
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 2
+      name: private
+      neutron_vlan_range: true
+      notation: null
+      render_addr_mask: null
+      render_type: null
+      seg_type: vlan
+      use_gateway: false
+      vlan_start: null
+    name: private
+    vlan_start: null
+  - cidr: 10.20.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 10.20.0.3
+      - 10.20.0.254
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 0
+      notation: ip_ranges
+      render_addr_mask: null
+      render_type: null
+      unmovable: true
+      use_gateway: true
+    name: fuelweb_admin
+    vlan_start: null
+settings:
+  editable:
+    access:
+      email:
+        description: Email address for Administrator
+        label: email
+        type: text
+        value: admin@localhost
+        weight: 40
+      metadata:
+        label: Access
+        weight: 10
+      password:
+        description: Password for Administrator
+        label: password
+        type: password
+        value: admin
+        weight: 20
+      tenant:
+        description: Tenant (project) name for Administrator
+        label: tenant
+        regex:
+          error: Invalid tenant name
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 30
+      user:
+        description: Username for Administrator
+        label: username
+        regex:
+          error: Invalid username
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 10
+    additional_components:
+      ceilometer:
+        description: If selected, Ceilometer component will be installed
+        label: Install Ceilometer
+        type: checkbox
+        value: false
+        weight: 40
+      heat:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 30
+      metadata:
+        label: Additional Components
+        weight: 20
+      murano:
+        description: If selected, Murano component will be installed
+        label: Install Murano
+        restrictions:
+        - cluster:net_provider != 'neutron'
+        type: checkbox
+        value: false
+        weight: 20
+      sahara:
+        description: If selected, Sahara component will be installed
+        label: Install Sahara
+        type: checkbox
+        value: false
+        weight: 10
+    common:
+      auth_key:
+        description: Public key(s) to include in authorized_keys on deployed nodes
+        label: Public Key
+        type: text
+        value: ''
+        weight: 70
+      auto_assign_floating_ip:
+        description: If selected, OpenStack will automatically assign a floating IP
+          to a new instance
+        label: Auto assign floating IP
+        restrictions:
+        - cluster:net_provider == 'neutron'
+        type: checkbox
+        value: false
+        weight: 40
+      compute_scheduler_driver:
+        label: Scheduler driver
+        type: radio
+        value: nova.scheduler.filter_scheduler.FilterScheduler
+        values:
+        - data: nova.scheduler.filter_scheduler.FilterScheduler
+          description: Currently the most advanced OpenStack scheduler. See the OpenStack
+            documentation for details.
+          label: Filter scheduler
+        - data: nova.scheduler.simple.SimpleScheduler
+          description: This is 'naive' scheduler which tries to find the least loaded
+            host
+          label: Simple scheduler
+        weight: 40
+      debug:
+        description: Debug logging mode provides more information, but requires more
+          disk space.
+        label: OpenStack debug logging
+        type: checkbox
+        value: false
+        weight: 20
+      disable_offload:
+        description: If set, generic segmentation offload (gso) and generic receive
+          offload (gro) on physical nics will be disabled. See ethtool man.
+        label: Disable generic offload on physical nics
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+            == 'gre'
+        type: checkbox
+        value: true
+        weight: 80
+      libvirt_type:
+        label: Hypervisor type
+        type: radio
+        value: kvm
+        values:
+        - data: kvm
+          description: Choose this type of hypervisor if you run OpenStack on hardware
+          label: KVM
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: qemu
+          description: Choose this type of hypervisor if you run OpenStack on virtual
+            hosts.
+          label: QEMU
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: vcenter
+          description: Choose this type of hypervisor if you run OpenStack in a vCenter
+            environment.
+          label: vCenter
+          restrictions:
+          - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+            == 'neutron'
+        weight: 30
+      metadata:
+        label: Common
+        weight: 30
+      nova_quota:
+        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+          quotas will increase load on the Nova database.
+        label: Nova quotas
+        type: checkbox
+        value: false
+        weight: 25
+      resume_guests_state_on_host_boot:
+        description: Whether to resume previous guests state when the host reboots.
+          If enabled, this option causes guests assigned to the host to resume their
+          previous state. If the guest was running a restart will be attempted when
+          nova-compute starts. If the guest was not running previously, a restart
+          will not be attempted.
+        label: Resume guests state on host boot
+        type: checkbox
+        value: true
+        weight: 60
+      use_cow_images:
+        description: For most cases you will want qcow format. If it's disabled, raw
+          image format will be used to run VMs. OpenStack with raw format currently
+          does not support snapshotting.
+        label: Use qcow format for images
+        type: checkbox
+        value: true
+        weight: 50
+    corosync:
+      group:
+        description: ''
+        label: Group
+        type: text
+        value: 226.94.1.1
+        weight: 10
+      metadata:
+        label: Corosync
+        restrictions:
+        - action: hide
+          condition: 'true'
+        weight: 50
+      port:
+        description: ''
+        label: Port
+        type: text
+        value: '12000'
+        weight: 20
+      verified:
+        description: Set True only if multicast is configured correctly on router.
+        label: Need to pass network verification.
+        type: checkbox
+        value: false
+        weight: 10
+    external_dns:
+      dns_list:
+        description: List of upstream DNS servers, separated by comma
+        label: DNS list
+        type: text
+        value: 8.8.8.8, 8.8.4.4
+        weight: 10
+      metadata:
+        label: Upstream DNS
+        weight: 90
+    external_ntp:
+      metadata:
+        label: Upstream NTP
+        weight: 100
+      ntp_list:
+        description: List of upstream NTP servers, separated by comma
+        label: NTP servers list
+        type: text
+        value: 0.pool.ntp.org, 1.pool.ntp.org
+        weight: 10
+    kernel_params:
+      kernel:
+        description: Default kernel parameters
+        label: Initial parameters
+        type: text
+        value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+        weight: 45
+      metadata:
+        label: Kernel parameters
+        weight: 40
+    neutron_mellanox:
+      metadata:
+        enabled: true
+        label: Mellanox Neutron components
+        toggleable: false
+        weight: 50
+      plugin:
+        label: Mellanox drivers and SR-IOV plugin
+        type: radio
+        value: disabled
+        values:
+        - data: disabled
+          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+            not be installed.
+          label: Mellanox drivers and plugins disabled
+          restrictions:
+          - settings:storage.iser.value == true
+        - data: drivers_only
+          description: If selected, Mellanox Ethernet drivers will be installed to
+            support networking over Mellanox NIC. Mellanox Neutron plugin will not
+            be installed.
+          label: Install only Mellanox drivers
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm'
+        - data: ethernet
+          description: If selected, both Mellanox Ethernet drivers and Mellanox network
+            acceleration (Neutron) plugin will be installed.
+          label: Install Mellanox drivers and SR-IOV plugin
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+        weight: 60
+      vf_num:
+        description: Note that one virtual function will be reserved to the storage
+          network, in case of choosing iSER.
+        label: Number of virtual NICs
+        restrictions:
+        - settings:neutron_mellanox.plugin.value != 'ethernet'
+        type: text
+        value: '16'
+        weight: 70
+    nsx_plugin:
+      connector_type:
+        description: Default network transport type to use
+        label: NSX connector type
+        type: select
+        value: stt
+        values:
+        - data: gre
+          label: GRE
+        - data: ipsec_gre
+          label: GRE over IPSec
+        - data: stt
+          label: STT
+        - data: ipsec_stt
+          label: STT over IPSec
+        - data: bridge
+          label: Bridge
+        weight: 80
+      l3_gw_service_uuid:
+        description: UUID for the default L3 gateway service to use with this cluster
+        label: L3 service UUID
+        regex:
+          error: Invalid L3 gateway service UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 50
+      metadata:
+        enabled: false
+        label: VMware NSX
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+            != 'nsx'
+        weight: 20
+      nsx_controllers:
+        description: One or more IPv4[:port] addresses of NSX controller node, separated
+          by comma (e.g. 10.30.30.2,192.168.110.254:443)
+        label: NSX controller endpoint
+        regex:
+          error: Invalid controller endpoints, specify valid IPv4[:port] pair
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+        type: text
+        value: ''
+        weight: 60
+      nsx_password:
+        description: Password for Administrator
+        label: NSX password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: ''
+        weight: 30
+      nsx_username:
+        description: NSX administrator's username
+        label: NSX username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      packages_url:
+        description: URL to NSX specific packages
+        label: URL to NSX bits
+        regex:
+          error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+            http://10.20.0.2/nsx)
+          source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+        type: text
+        value: ''
+        weight: 70
+      replication_mode:
+        description: ''
+        label: NSX cluster has Service nodes
+        type: checkbox
+        value: true
+        weight: 90
+      transport_zone_uuid:
+        description: UUID of the pre-existing default NSX Transport zone
+        label: Transport zone UUID
+        regex:
+          error: Invalid transport zone UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 40
+    provision:
+      metadata:
+        label: Provision
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 80
+      method:
+        description: Which provision method to use for this cluster.
+        label: Provision method
+        type: radio
+        value: cobbler
+        values:
+        - data: image
+          description: Copying pre-built images on a disk.
+          label: Image
+        - data: cobbler
+          description: Install from scratch using anaconda or debian-installer.
+          label: Classic (use anaconda or debian-installer)
+    public_network_assignment:
+      assign_to_all_nodes:
+        description: When disabled, public network will be assigned to controllers
+          and zabbix-server only
+        label: Assign public network to all nodes
+        type: checkbox
+        value: false
+        weight: 10
+      metadata:
+        label: Public network assignment
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron'
+        weight: 50
+    storage:
+      ephemeral_ceph:
+        description: Configures Nova to store ephemeral volumes in RBD. This works
+          best if Ceph is enabled for volumes and images, too. Enables live migration
+          of all types of Ceph backed VMs (without this option, live migration will
+          only work with VMs launched from Cinder volumes).
+        label: Ceph RBD for ephemeral volumes (Nova)
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: checkbox
+        value: true
+        weight: 75
+      images_ceph:
+        description: Configures Glance to use the Ceph RBD backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: Ceph RBD for images (Glance)
+        type: checkbox
+        value: true
+        weight: 30
+      images_vcenter:
+        description: Configures Glance to use the vCenter/ESXi backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: VMWare vCenter/ESXi datastore for images (Glance)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter'
+        type: checkbox
+        value: false
+        weight: 35
+      iser:
+        description: 'High performance block storage: Cinder volumes over iSER protocol
+          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+          and will use a dedicated virtual function for the storage network.'
+        label: iSER protocol for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+          != 'kvm'
+        type: checkbox
+        value: false
+        weight: 11
+      metadata:
+        label: Storage
+        weight: 60
+      objects_ceph:
+        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+        label: Ceph RadosGW for objects (Swift API)
+        restrictions:
+        - settings:storage.images_ceph.value == false
+        type: checkbox
+        value: false
+        weight: 80
+      osd_pool_size:
+        description: Configures the default number of object replicas in Ceph. This
+          number must be equal to or lower than the number of deployed 'Storage -
+          Ceph OSD' nodes.
+        label: Ceph object replication factor
+        regex:
+          error: Invalid number
+          source: ^[1-9]\d*$
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: text
+        value: '2'
+        weight: 85
+      vc_datacenter:
+        description: Inventory path to a datacenter. If you want to use ESXi host
+          as datastore, it should be "ha-datacenter".
+        label: Datacenter name
+        regex:
+          error: Empty datacenter
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 65
+      vc_datastore:
+        description: Datastore associated with the datacenter.
+        label: Datastore name
+        regex:
+          error: Empty datastore
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 60
+      vc_host:
+        description: IP Address of vCenter/ESXi
+        label: vCenter/ESXi IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 45
+      vc_image_dir:
+        description: The name of the directory where the glance images will be stored
+          in the VMware datastore.
+        label: Datastore Images directory
+        regex:
+          error: Empty images directory
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: /openstack_glance
+        weight: 70
+      vc_password:
+        description: vCenter/ESXi admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: password
+        value: ''
+        weight: 55
+      vc_user:
+        description: vCenter/ESXi admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 50
+      volumes_ceph:
+        description: Configures Cinder to store volumes in Ceph RBD images.
+        label: Ceph RBD for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+          == 'vcenter'
+        type: checkbox
+        value: true
+        weight: 20
+      volumes_lvm:
+        description: Requires at least one Storage - Cinder LVM node.
+        label: Cinder LVM over iSCSI for volumes
+        restrictions:
+        - settings:storage.volumes_ceph.value == true
+        type: checkbox
+        value: false
+        weight: 10
+      volumes_vmdk:
+        description: Configures Cinder to store volumes via VMware vCenter.
+        label: VMware vCenter for volumes (Cinder)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+          == true
+        type: checkbox
+        value: false
+        weight: 15
+    syslog:
+      metadata:
+        label: Syslog
+        weight: 50
+      syslog_port:
+        description: Remote syslog port
+        label: Port
+        regex:
+          error: Invalid Syslog port
+          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+        type: text
+        value: '514'
+        weight: 20
+      syslog_server:
+        description: Remote syslog hostname
+        label: Hostname
+        type: text
+        value: ''
+        weight: 10
+      syslog_transport:
+        label: Syslog transport protocol
+        type: radio
+        value: tcp
+        values:
+        - data: udp
+          description: ''
+          label: UDP
+        - data: tcp
+          description: ''
+          label: TCP
+        weight: 30
+    vcenter:
+      cluster:
+        description: vCenter cluster name. If you have multiple clusters, use comma
+          to separate names
+        label: Cluster
+        regex:
+          error: Invalid cluster list
+          source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+        type: text
+        value: ''
+        weight: 40
+      datastore_regex:
+        description: The Datastore regexp setting specifies the data stores to use
+          with Compute. For example, "nas.*". If you want to use all available datastores,
+          leave this field blank
+        label: Datastore regexp
+        regex:
+          error: Invalid datastore regexp
+          source: ^(\S.*\S|\S|)$
+        type: text
+        value: ''
+        weight: 50
+      host_ip:
+        description: IP Address of vCenter
+        label: vCenter IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        type: text
+        value: ''
+        weight: 10
+      metadata:
+        label: vCenter
+        restrictions:
+        - action: hide
+          condition: settings:common.libvirt_type.value != 'vcenter'
+        weight: 20
+      use_vcenter:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 5
+      vc_password:
+        description: vCenter admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: admin
+        weight: 30
+      vc_user:
+        description: vCenter admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      vlan_interface:
+        description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+          vmnic1). If empty "vmnic0" is used by default
+        label: ESXi VLAN interface
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+            != 'VlanManager'
+        type: text
+        value: ''
+        weight: 60
+    zabbix:
+      metadata:
+        label: Zabbix Access
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 70
+      password:
+        description: Password for Zabbix Administrator
+        label: password
+        type: password
+        value: zabbix
+        weight: 20
+      username:
+        description: Username for Zabbix Administrator
+        label: username
+        type: text
+        value: admin
+        weight: 10
\ No newline at end of file
diff --git a/fuel/deploy/libvirt/conf/ha/dha.yaml b/fuel/deploy/libvirt/conf/ha/dha.yaml
new file mode 100644 (file)
index 0000000..d862f64
--- /dev/null
@@ -0,0 +1,42 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+
+# Adapter to use for this definition
+adapter: libvirt
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+  libvirtName: controller1
+  libvirtTemplate: libvirt/vms/controller.xml
+- id: 2
+  libvirtName: compute1
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 3
+  libvirtName: compute2
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 4
+  libvirtName: compute3
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 5
+  libvirtName: compute4
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 6
+  libvirtName: compute5
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 7
+  libvirtName: fuel-master
+  libvirtTemplate: libvirt/vms/fuel.xml
+  isFuel: yes
+  username: root
+  password: r00tme
+
+disks:
+  fuel: 30G
+  controller: 30G
+  compute: 30G
similarity index 97%
rename from fuel/deploy/libvirt/dea.yaml
rename to fuel/deploy/libvirt/conf/multinode/dea.yaml
index 802293f..dfd8382 100644 (file)
@@ -3,34 +3,34 @@ title: Deployment Environment Adapter (DEA)
 version: 1.1
 created: Sat Apr 25 16:26:22 UTC 2015
 comment: Small libvirt setup
-environment_name: opnfv59-b
+environment_name: opnfv_virt
 environment_mode: multinode
 wanted_release: Juno on Ubuntu 12.04.4
 nodes:
 - id: 1
-  interfaces: interface1
-  transformations: controller1
-  role: controller
+  interfaces: interfaces_1
+  transformations: transformations_1
+  role: ceph-osd,controller
 - id: 2
-  interfaces: interface1
-  transformations: controller1
-  role: controller
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
 - id: 3
-  interfaces: interface1
-  transformations: controller1
-  role: controller
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
 - id: 4
-  interfaces: interface1
-  transformations: compute1
-  role: compute
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
 - id: 5
-  interfaces: interface1
-  transformations: compute1
-  role: compute
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
 - id: 6
-  interfaces: interface1
-  transformations: compute1
-  role: compute
+  interfaces: interfaces_1
+  transformations: transformations_2
+  role: ceph-osd,compute
 fuel:
   ADMIN_NETWORK:
     ipaddress: 10.20.0.2
@@ -43,12 +43,12 @@ fuel:
   FUEL_ACCESS:
     user: admin
     password: admin
-  HOSTNAME: opnfv59
+  HOSTNAME: opnfv_virt
   NTP1: 0.pool.ntp.org
   NTP2: 1.pool.ntp.org
   NTP3: 2.pool.ntp.org
 interfaces:
-  interface1:
+  interfaces_1:
     eth0:
     - fuelweb_admin
     - management
@@ -59,7 +59,7 @@ interfaces:
     eth3:
     - public
 transformations:
-  controller1:
+  transformations_1:
     - action: add-br
       name: br-eth0
     - action: add-port
@@ -126,7 +126,7 @@ transformations:
       bridges:
       - br-eth2
       - br-prv
-  compute1:
+  transformations_2:
     - action: add-br
       name: br-eth0
     - action: add-port
@@ -692,14 +692,14 @@ settings:
         restrictions:
         - settings:common.libvirt_type.value == 'vcenter'
         type: checkbox
-        value: false
+        value: true
         weight: 75
       images_ceph:
         description: Configures Glance to use the Ceph RBD backend to store images.
           If enabled, this option will prevent Swift from installing.
         label: Ceph RBD for images (Glance)
         type: checkbox
-        value: false
+        value: true
         weight: 30
       images_vcenter:
         description: Configures Glance to use the vCenter/ESXi backend to store images.
@@ -833,7 +833,7 @@ settings:
         - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
           == 'vcenter'
         type: checkbox
-        value: false
+        value: true
         weight: 20
       volumes_lvm:
         description: Requires at least one Storage - Cinder LVM node.
@@ -841,7 +841,7 @@ settings:
         restrictions:
         - settings:storage.volumes_ceph.value == true
         type: checkbox
-        value: true
+        value: false
         weight: 10
       volumes_vmdk:
         description: Configures Cinder to store volumes via VMware vCenter.
@@ -973,4 +973,4 @@ settings:
         label: username
         type: text
         value: admin
-        weight: 10
+        weight: 10
\ No newline at end of file
diff --git a/fuel/deploy/libvirt/conf/multinode/dha.yaml b/fuel/deploy/libvirt/conf/multinode/dha.yaml
new file mode 100644 (file)
index 0000000..5e560bf
--- /dev/null
@@ -0,0 +1,42 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+
+# Adapter to use for this definition
+adapter: libvirt
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+  libvirtName: controller1
+  libvirtTemplate: libvirt/vms/controller.xml
+- id: 2
+  libvirtName: controller2
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 3
+  libvirtName: controller3
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 4
+  libvirtName: compute1
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 5
+  libvirtName: compute2
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 6
+  libvirtName: compute3
+  libvirtTemplate: libvirt/vms/compute.xml
+- id: 7
+  libvirtName: fuel-master
+  libvirtTemplate: libvirt/vms/fuel.xml
+  isFuel: yes
+  username: root
+  password: r00tme
+
+disks:
+  fuel: 30G
+  controller: 30G
+  compute: 30G
diff --git a/fuel/deploy/libvirt/dha.yaml b/fuel/deploy/libvirt/dha.yaml
deleted file mode 100644 (file)
index ce61e53..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version: 1.1
-created: Sat Apr 25 16:26:22 UTC 2015
-comment: Small libvirt setup
-
-# Adapter to use for this definition
-adapter: libvirt
-
-# Node list.
-# Mandatory fields are id and role.
-# The MAC address of the PXE boot interface is not mandatory
-#   to be set, but the field must be present.
-# All other fields are adapter specific.
-
-nodes:
-- id: 1
-  pxeMac: 52:54:00:aa:dd:84
-  libvirtName: controller1
-  libvirtTemplate: controller
-  role: controller
-- id: 2
-  pxeMac: 52:54:00:aa:dd:84
-  libvirtName: controller2
-  libvirtTemplate: controller
-  role: controller
-- id: 3
-  pxeMac: 52:54:00:aa:dd:84
-  libvirtName: controller3
-  libvirtTemplate: controller
-  role: controller
-- id: 4
-  pxeMac: 52:54:00:41:64:f3
-  libvirtName: compute1
-  libvirtTemplate: compute
-  role: compute
-- id: 5
-  pxeMac: 52:54:00:69:a0:79
-  libvirtName: compute2
-  libvirtTemplate: compute
-  role: compute
-- id: 6
-  pxeMac: 52:54:00:69:a0:79
-  libvirtName: compute3
-  libvirtTemplate: compute
-  role: compute
-- id: 7
-  pxeMac: 52:54:00:f8:b0:75
-  libvirtName: fuel-master
-  libvirtTemplate: fuel-master
-  isFuel: yes
-  nodeCanZeroMBR: yes
-  nodeCanSetBootOrderLive: yes
-  username: root
-  password: r00tme
-
-disks:
-  fuel: 30G
-  controller: 30G
-  compute: 30G
-
-# Deployment power on strategy
-# all:      Turn on all nodes at once. There will be no correlation
-#           between the DHA and DEA node numbering. MAC addresses
-#           will be used to select the node roles though.
-# sequence: Turn on the nodes in sequence starting with the lowest order
-#           node and wait for the node to be detected by Fuel. Not until
-#           the node has been detected and assigned a role will the next
-#           node be turned on.
-powerOnStrategy: all
-
-# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
-# calling the DHA adapter function "dha_fuelCustomInstall()"  with two
-# arguments: node ID and the ISO file name to deploy. The custom install
-# function is then to handle all necessary logic to boot the Fuel master
-# from the ISO and then return.
-# Allowed values: true, false
-
-fuelCustomInstall: false
-
similarity index 99%
rename from fuel/deploy/libvirt/vms/compute
rename to fuel/deploy/libvirt/vms/compute.xml
index 7591509..2ea35ac 100644 (file)
@@ -1,5 +1,5 @@
 <domain type='kvm'>
-  <name>compute4</name>
+  <name>compute</name>
   <memory unit='KiB'>8388608</memory>
   <currentMemory unit='KiB'>8388608</currentMemory>
   <vcpu placement='static'>2</vcpu>
similarity index 99%
rename from fuel/deploy/libvirt/vms/controller
rename to fuel/deploy/libvirt/vms/controller.xml
index a871262..4377879 100644 (file)
@@ -1,5 +1,5 @@
 <domain type='kvm'>
-  <name>controller1</name>
+  <name>controller</name>
   <memory unit='KiB'>2097152</memory>
   <currentMemory unit='KiB'>2097152</currentMemory>
   <vcpu placement='static'>2</vcpu>
similarity index 99%
rename from fuel/deploy/libvirt/vms/fuel-master
rename to fuel/deploy/libvirt/vms/fuel.xml
index f4e652b..1a32860 100644 (file)
@@ -1,5 +1,5 @@
 <domain type='kvm'>
-  <name>fuel-master</name>
+  <name>fuel</name>
   <memory unit='KiB'>2097152</memory>
   <currentMemory unit='KiB'>2097152</currentMemory>
   <vcpu placement='static'>2</vcpu>
diff --git a/fuel/deploy/reap.py b/fuel/deploy/reap.py
new file mode 100644 (file)
index 0000000..8a8681a
--- /dev/null
@@ -0,0 +1,330 @@
+import common
+import time
+import os
+import yaml
+import glob
+import shutil
+
+N = common.N
+E = common.E
+R = common.R
+ArgParser = common.ArgParser
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+log = common.log
+delete_file = common.delete_file
+commafy = common.commafy
+
+DEA_1 = '''
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: {date}
+comment: {comment}
+'''
+
+DHA_1 = '''
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: {date}
+comment: {comment}
+
+# Adapter to use for this definition
+# adapter: [ipmi|libvirt]
+adapter:
+
+# Node list.
+# Mandatory properties are id and role.
+# All other properties are adapter specific.
+# For Non-Fuel nodes controlled by:
+#   - ipmi adapter you need to provide:
+#       pxeMac
+#       ipmiIp
+#       ipmiUser
+#       ipmiPass
+#   - libvirt adapter you need to provide:
+#       libvirtName: <whatever>
+#       libvirtTemplate: [libvirt/vms/controller.xml | libvirt/vms/compute.xml]
+#
+# For the Fuel Node you need to provide:
+#       libvirtName: <whatever>
+#       libvirtTemplate: libvirt/vms/fuel.xml
+#       isFuel: yes
+#       username: root
+#       password: r00tme
+'''
+
+DHA_2 = '''
+# Adding the Fuel node as node id {node_id}
+# which may not be correct - please adjust as needed.
+'''
+
+DISKS = {'fuel': '30G',
+         'controller': '30G',
+         'compute': '30G'}
+
+class Reap(object):
+
+    def __init__(self, dea_file, dha_file, comment):
+        self.dea_file = dea_file
+        self.dha_file = dha_file
+        self.comment = comment
+        self.temp_dir = None
+        self.env = None
+        self.env_id = None
+        self.last_node = None
+
+    def get_env(self):
+        env_list = parse(exec_cmd('fuel env'))
+        if len(env_list) > 1:
+            err('Not exactly one environment')
+        self.env = env_list[0]
+        self.env_id = self.env[E['id']]
+
+    def download_config(self, config_type):
+        log('Download %s config for environment %s'
+            % (config_type, self.env_id))
+        exec_cmd('fuel %s --env %s --download --dir %s'
+                 % (config_type, self.env_id, self.temp_dir))
+
+    def write(self, file, text, newline=True):
+        mode = 'a' if os.path.isfile(file) else 'w'
+        with open(file, mode) as f:
+            f.write('%s%s' % (text, ('\n' if newline else '')))
+
+    def write_yaml(self, file, data, newline=True):
+        self.write(file, yaml.dump(data, default_flow_style=False).strip(),
+                   newline)
+
+    def get_node_by_id(self, node_list, node_id):
+        for node in node_list:
+            if node[N['id']] == node_id:
+                return node
+
+    def reap_interface(self, node_id, interfaces):
+        interface, mac = self.get_interface(node_id)
+        if_name = None
+        if interfaces:
+            if_name = self.check_dict_exists(interfaces, interface)
+        if not if_name:
+            if_name = 'interfaces_%s' % str(len(interfaces) + 1)
+            interfaces[if_name] = interface
+        return if_name, mac
+
+    def reap_transformation(self, node_id, roles, transformations):
+        main_role = 'controller' if 'controller' in roles else 'compute'
+        node_file = glob.glob('%s/deployment_%s/*%s_%s.yaml'
+                              % (self.temp_dir, self.env_id,
+                                 main_role, node_id))
+        tr_name = None
+        with open(node_file[0]) as f:
+            node_config = yaml.load(f)
+        transformation = node_config['network_scheme']['transformations']
+        if transformations:
+            tr_name = self.check_dict_exists(transformations, transformation)
+        if not tr_name:
+            tr_name = 'transformations_%s' % str(len(transformations) + 1)
+            transformations[tr_name] = transformation
+        return tr_name
+
+    def check_dict_exists(self, main_dict, dict):
+        for key, val in main_dict.iteritems():
+            if cmp(dict, val) == 0:
+                return key
+
+    def reap_nodes_interfaces_transformations(self):
+        node_list = parse(exec_cmd('fuel node'))
+        real_node_ids = [node[N['id']] for node in node_list]
+        real_node_ids.sort()
+        min_node = real_node_ids[0]
+
+        interfaces = {}
+        transformations = {}
+        dea_nodes = []
+        dha_nodes = []
+
+        for real_node_id in real_node_ids:
+            node_id = int(real_node_id) - int(min_node) + 1
+            self.last_node = node_id
+            node = self.get_node_by_id(node_list, real_node_id)
+            roles = commafy(node[N['roles']])
+            if not roles:
+                err('Fuel Node %s has no role' % real_node_id)
+            dea_node = {'id': node_id,
+                        'role': roles}
+            dha_node = {'id': node_id}
+            if_name, mac = self.reap_interface(real_node_id, interfaces)
+            tr_name = self.reap_transformation(real_node_id, roles,
+                                               transformations)
+            dea_node.update(
+                {'interfaces': if_name,
+                 'transformations': tr_name})
+
+            dha_node.update(
+                {'pxeMac': mac if mac else None,
+                 'ipmiIp': None,
+                 'ipmiUser': None,
+                 'ipmiPass': None,
+                 'libvirtName': None,
+                 'libvirtTemplate': None})
+
+            dea_nodes.append(dea_node)
+            dha_nodes.append(dha_node)
+
+        self.write_yaml(self.dha_file, {'nodes': dha_nodes}, False)
+        self.write_yaml(self.dea_file, {'nodes': dea_nodes})
+        self.write_yaml(self.dea_file, {'interfaces': interfaces})
+        self.write_yaml(self.dea_file, {'transformations': transformations})
+        self.reap_fuel_node_info()
+        self.write_yaml(self.dha_file, {'disks': DISKS})
+
+    def reap_fuel_node_info(self):
+        dha_nodes = []
+        dha_node = {
+            'id': self.last_node + 1,
+            'libvirtName': None,
+            'libvirtTemplate': None,
+            'isFuel': True,
+            'username': 'root',
+            'password': 'r00tme'}
+
+        dha_nodes.append(dha_node)
+
+        self.write(self.dha_file, DHA_2.format(node_id=dha_node['id']), False)
+        self.write_yaml(self.dha_file, dha_nodes)
+
+    def reap_environment_info(self):
+        self.write_yaml(self.dea_file,
+                        {'environment_name': self.env[E['name']]})
+        self.write_yaml(self.dea_file,
+                        {'environment_mode': self.env[E['mode']]})
+        wanted_release = None
+        rel_list = parse(exec_cmd('fuel release'))
+        for rel in rel_list:
+            if rel[R['id']] == self.env[E['release_id']]:
+                wanted_release = rel[R['name']]
+        self.write_yaml(self.dea_file, {'wanted_release': wanted_release})
+
+    def reap_fuel_settings(self):
+        data = self.read_yaml('/etc/fuel/astute.yaml')
+        fuel = {}
+        del(data['ADMIN_NETWORK']['mac'])
+        del(data['ADMIN_NETWORK']['interface'])
+        for key in ['ADMIN_NETWORK', 'HOSTNAME', 'DNS_DOMAIN', 'DNS_SEARCH',
+                    'DNS_UPSTREAM', 'NTP1', 'NTP2', 'NTP3', 'FUEL_ACCESS']:
+            fuel[key] = data[key]
+        self.write_yaml(self.dea_file, {'fuel': fuel})
+
+    def reap_network_settings(self):
+        network_file = ('%s/network_%s.yaml'
+                          % (self.temp_dir, self.env_id))
+        data = self.read_yaml(network_file)
+        network = {}
+        network['networking_parameters'] = data['networking_parameters']
+        network['networks'] = data['networks']
+        for net in network['networks']:
+            del net['id']
+            del net['group_id']
+        self.write_yaml(self.dea_file, {'network': network})
+
+    def reap_settings(self):
+        settings_file  = '%s/settings_%s.yaml' % (self.temp_dir, self.env_id)
+        settings = self.read_yaml(settings_file)
+        self.write_yaml(self.dea_file, {'settings': settings})
+
+    def get_opnfv_astute(self, role):
+        node_files = glob.glob('%s/deployment_%s/*%s*.yaml'
+                               % (self.temp_dir, self.env_id, role))
+        node_config = self.read_yaml(node_files[0])
+        return node_config['opnfv'] if 'opnfv' in node_config else {}
+
+    def reap_opnfv_astute(self):
+        controller_opnfv_astute = self.get_opnfv_astute('controller')
+        compute_opnfv_astute = self.get_opnfv_astute('compute')
+        opnfv = {}
+        opnfv['opnfv'] = {
+            'controller': controller_opnfv_astute,
+            'compute': compute_opnfv_astute}
+        self.write_yaml(self.dea_file, opnfv)
+
+    def get_interface(self, real_node_id):
+        exec_cmd('fuel node --node-id %s --network --download --dir %s'
+                 % (real_node_id, self.temp_dir))
+        interface_file = ('%s/node_%s/interfaces.yaml'
+                          % (self.temp_dir, real_node_id))
+        interfaces = self.read_yaml(interface_file)
+        interface_config = {}
+        pxe_mac = None
+        for interface in interfaces:
+            networks = []
+            for network in interface['assigned_networks']:
+                networks.append(network['name'])
+                if network['name'] == 'fuelweb_admin':
+                    pxe_mac = interface['mac']
+            if networks:
+                interface_config[interface['name']] = networks
+        return interface_config, pxe_mac
+
+    def read_yaml(self, yaml_file):
+        with open(yaml_file) as f:
+            data = yaml.load(f)
+            return data
+
+    def intro(self):
+        delete_file(self.dea_file)
+        delete_file(self.dha_file)
+        self.temp_dir = exec_cmd('mktemp -d')
+        date = time.strftime('%c')
+        self.write(self.dea_file,
+                   DEA_1.format(date=date, comment=self.comment), False)
+        self.write(self.dha_file,
+                   DHA_1.format(date=date, comment=self.comment))
+        self.get_env()
+        self.download_config('deployment')
+        self.download_config('settings')
+        self.download_config('network')
+
+    def finale(self):
+        log('DEA file is available at %s' % self.dea_file)
+        log('DHA file is available at %s (this is just a template)'
+            % self.dha_file)
+        shutil.rmtree(self.temp_dir)
+
+    def reap(self):
+        self.intro()
+        self.reap_environment_info()
+        self.reap_nodes_interfaces_transformations()
+        self.reap_fuel_settings()
+        self.reap_opnfv_astute()
+        self.reap_network_settings()
+        self.reap_settings()
+        self.finale()
+
+def usage():
+    print '''
+    Usage:
+    python reap.py <dea_file> <dha_file> <comment>
+    '''
+
+def parse_arguments():
+    parser = ArgParser(prog='python %s' % __file__)
+    parser.add_argument('dea_file', nargs='?', action='store',
+                        default='dea.yaml',
+                        help='Deployment Environment Adapter: dea.yaml')
+    parser.add_argument('dha_file', nargs='?', action='store',
+                        default='dha.yaml',
+                        help='Deployment Hardware Adapter: dha.yaml')
+    parser.add_argument('comment', nargs='?', action='store', help='Comment')
+    args = parser.parse_args()
+    return (args.dea_file, args.dha_file, args.comment)
+
+def main():
+    dea_file, dha_file, comment = parse_arguments()
+
+    r = Reap(dea_file, dha_file, comment)
+    r.reap()
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/fuel/deploy/setup_environment.py b/fuel/deploy/setup_environment.py
deleted file mode 100644 (file)
index 4e0e7ba..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-import sys
-from lxml import etree
-import os
-import glob
-import common
-
-from dha import DeploymentHardwareAdapter
-
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-
-
-class LibvirtEnvironment(object):
-
-    def __init__(self, storage_dir, dha_file):
-        self.dha = DeploymentHardwareAdapter(dha_file)
-        self.storage_dir = storage_dir
-        self.parser = etree.XMLParser(remove_blank_text=True)
-        self.file_dir = os.path.dirname(os.path.realpath(__file__))
-        self.network_dir = '%s/libvirt/networks' % self.file_dir
-        self.vm_dir = '%s/libvirt/vms' % self.file_dir
-        self.node_ids = self.dha.get_all_node_ids()
-        self.fuel_node_id = self.dha.get_fuel_node_id()
-        self.net_names = self.collect_net_names()
-
-    def create_storage(self, node_id, disk_path, disk_sizes):
-        if node_id == self.fuel_node_id:
-           disk_size = disk_sizes['fuel']
-        else:
-           role = self.dha.get_node_role(node_id)
-           disk_size = disk_sizes[role]
-        exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
-
-    def create_vms(self):
-        temp_dir = exec_cmd('mktemp -d')
-        disk_sizes = self.dha.get_disks()
-        for node_id in self.node_ids:
-            vm_name = self.dha.get_node_property(node_id, 'libvirtName')
-            vm_template = self.dha.get_node_property(node_id,
-                                                     'libvirtTemplate')
-            disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
-            self.create_storage(node_id, disk_path, disk_sizes)
-            self.define_vm(vm_name, vm_template, temp_dir, disk_path)
-        exec_cmd('rm -fr %s' % temp_dir)
-
-    def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
-        log('Creating VM %s with disks %s' % (vm_name, disk_path))
-        temp_vm_file = '%s/%s' % (temp_dir, vm_name)
-        exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
-        with open(temp_vm_file) as f:
-            vm_xml = etree.parse(f)
-            names = vm_xml.xpath('/domain/name')
-            for name in names:
-                name.text = vm_name
-            uuids = vm_xml.xpath('/domain/uuid')
-            for uuid in uuids:
-                uuid.getparent().remove(uuid)
-            disks = vm_xml.xpath('/domain/devices/disk')
-            for disk in disks:
-                sources = disk.xpath('source')
-                for source in sources:
-                    source.set('file', disk_path)
-        with open(temp_vm_file, 'w') as f:
-            vm_xml.write(f, pretty_print=True, xml_declaration=True)
-        exec_cmd('virsh define %s' % temp_vm_file)
-
-    def create_networks(self):
-        for net_file in glob.glob('%s/*' % self.network_dir):
-            exec_cmd('virsh net-define %s' % net_file)
-        for net in self.net_names:
-            log('Creating network %s' % net)
-            exec_cmd('virsh net-autostart %s' % net)
-            exec_cmd('virsh net-start %s' % net)
-
-    def delete_networks(self):
-        for net in self.net_names:
-            log('Deleting network %s' % net)
-            exec_cmd('virsh net-destroy %s' % net, False)
-            exec_cmd('virsh net-undefine %s' % net, False)
-
-    def get_net_name(self, net_file):
-        with open(net_file) as f:
-            net_xml = etree.parse(f)
-            name_list = net_xml.xpath('/network/name')
-            for name in name_list:
-                net_name = name.text
-        return net_name
-
-    def collect_net_names(self):
-        net_list = []
-        for net_file in glob.glob('%s/*' % self.network_dir):
-            name = self.get_net_name(net_file)
-            net_list.append(name)
-        return net_list
-
-    def delete_vms(self):
-        for node_id in self.node_ids:
-            vm_name = self.dha.get_node_property(node_id, 'libvirtName')
-            r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
-            if c > 0:
-                log(r)
-                continue
-            self.undefine_vm_delete_disk(r, vm_name)
-
-    def undefine_vm_delete_disk(self, printout, vm_name):
-        disk_files = []
-        xml_dump = etree.fromstring(printout, self.parser)
-        disks = xml_dump.xpath('/domain/devices/disk')
-        for disk in disks:
-            sources = disk.xpath('source')
-            for source in sources:
-                source_file = source.get('file')
-                if source_file:
-                    disk_files.append(source_file)
-        log('Deleting VM %s with disks %s' % (vm_name, disk_files))
-        exec_cmd('virsh destroy %s' % vm_name, False)
-        exec_cmd('virsh undefine %s' % vm_name, False)
-        for file in disk_files:
-            exec_cmd('rm -f %s' % file)
-
-    def setup_environment(self):
-        check_if_root()
-        check_dir_exists(self.network_dir)
-        check_dir_exists(self.vm_dir)
-        self.cleanup_environment()
-        self.create_vms()
-        self.create_networks()
-
-    def cleanup_environment(self):
-        self.delete_vms()
-        self.delete_networks()
-
-
-def usage():
-    print '''
-    Usage:
-    python setup_environment.py <storage_directory> <dha_file>
-
-    Example:
-            python setup_environment.py /mnt/images dha.yaml
-    '''
-
-def parse_arguments():
-    if len(sys.argv) != 3:
-        log('Incorrect number of arguments')
-        usage()
-        sys.exit(1)
-    storage_dir = sys.argv[-2]
-    dha_file = sys.argv[-1]
-    check_dir_exists(storage_dir)
-    check_file_exists(dha_file)
-    return storage_dir, dha_file
-
-def main():
-    storage_dir, dha_file = parse_arguments()
-
-    virt = LibvirtEnvironment(storage_dir, dha_file)
-    virt.setup_environment()
-
-if __name__ == '__main__':
-    main()
\ No newline at end of file
diff --git a/fuel/deploy/setup_execution_environment.py b/fuel/deploy/setup_execution_environment.py
new file mode 100644 (file)
index 0000000..d97fcde
--- /dev/null
@@ -0,0 +1,36 @@
+import yaml
+import io
+import sys
+import os
+
+import common
+from environments.libvirt_environment import LibvirtEnvironment
+from environments.virtual_fuel import VirtualFuel
+from dea import DeploymentEnvironmentAdapter
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+ArgParser = common.ArgParser
+
+class ExecutionEnvironment(object):
+    def __new__(cls, storage_dir, pxe_bridge, dha_path, dea):
+
+        with io.open(dha_path) as yaml_file:
+            dha_struct = yaml.load(yaml_file)
+
+        type = dha_struct['adapter']
+
+        root_dir = os.path.dirname(os.path.realpath(__file__))
+
+        if cls is ExecutionEnvironment:
+            if type == 'libvirt':
+                return LibvirtEnvironment(storage_dir, dha_path, dea, root_dir)
+
+            if type == 'ipmi' or type == 'hp':
+                return VirtualFuel(storage_dir, pxe_bridge, dha_path, root_dir)
+
+        return super(ExecutionEnvironment, cls).__new__(cls)
diff --git a/fuel/deploy/setup_vfuel.py b/fuel/deploy/setup_vfuel.py
deleted file mode 100644 (file)
index 65ee013..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-import sys
-from lxml import etree
-import os
-
-import common
-from dha import DeploymentHardwareAdapter
-
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-
-VFUELNET = '''
-iface vfuelnet inet static
-        bridge_ports em1
-        address 10.40.0.1
-        netmask 255.255.255.0
-        pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
-        pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
-        post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
-        post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
-'''
-VM_DIR = 'baremetal/vm'
-FUEL_DISK_SIZE = '30G'
-IFACE = 'vfuelnet'
-INTERFACE_CONFIG = '/etc/network/interfaces'
-
-class VFuel(object):
-
-    def __init__(self, storage_dir, dha_file):
-        self.dha = DeploymentHardwareAdapter(dha_file)
-        self.storage_dir = storage_dir
-        self.parser = etree.XMLParser(remove_blank_text=True)
-        self.fuel_node_id = self.dha.get_fuel_node_id()
-        self.file_dir = os.path.dirname(os.path.realpath(__file__))
-        self.vm_dir = '%s/%s' % (self.file_dir, VM_DIR)
-
-    def setup_environment(self):
-        check_if_root()
-        check_dir_exists(self.vm_dir)
-        self.setup_networking()
-        self.delete_vm()
-        self.create_vm()
-
-    def setup_networking(self):
-        with open(INTERFACE_CONFIG) as f:
-            data = f.read()
-        if VFUELNET not in data:
-            log('Appending to file %s:\n %s' % (INTERFACE_CONFIG, VFUELNET))
-            with open(INTERFACE_CONFIG, 'a') as f:
-                f.write('\n%s\n' % VFUELNET)
-            if exec_cmd('ip link show | grep %s' % IFACE):
-                log('Bring DOWN interface %s' % IFACE)
-                exec_cmd('ifdown %s' % IFACE, False)
-            log('Bring UP interface %s' % IFACE)
-            exec_cmd('ifup %s' % IFACE, False)
-
-    def delete_vm(self):
-        vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
-        r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
-        if c > 0:
-            log(r)
-            return
-        self.undefine_vm_delete_disk(r, vm_name)
-
-    def undefine_vm_delete_disk(self, printout, vm_name):
-        disk_files = []
-        xml_dump = etree.fromstring(printout, self.parser)
-        disks = xml_dump.xpath('/domain/devices/disk')
-        for disk in disks:
-            sources = disk.xpath('source')
-            for source in sources:
-                source_file = source.get('file')
-                if source_file:
-                    disk_files.append(source_file)
-        log('Deleting VM %s with disks %s' % (vm_name, disk_files))
-        exec_cmd('virsh destroy %s' % vm_name, False)
-        exec_cmd('virsh undefine %s' % vm_name, False)
-        for file in disk_files:
-            exec_cmd('rm -f %s' % file)
-
-    def create_vm(self):
-        temp_dir = exec_cmd('mktemp -d')
-        vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
-        vm_template = self.dha.get_node_property(self.fuel_node_id,
-                                                 'libvirtTemplate')
-        disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
-        exec_cmd('fallocate -l %s %s' % (FUEL_DISK_SIZE, disk_path))
-        self.define_vm(vm_name, vm_template, temp_dir, disk_path)
-        exec_cmd('rm -fr %s' % temp_dir)
-
-    def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
-        log('Creating VM %s with disks %s' % (vm_name, disk_path))
-        temp_vm_file = '%s/%s' % (temp_dir, vm_name)
-        exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
-        with open(temp_vm_file) as f:
-            vm_xml = etree.parse(f)
-            names = vm_xml.xpath('/domain/name')
-            for name in names:
-                name.text = vm_name
-            uuids = vm_xml.xpath('/domain/uuid')
-            for uuid in uuids:
-                uuid.getparent().remove(uuid)
-            disks = vm_xml.xpath('/domain/devices/disk')
-            for disk in disks:
-                sources = disk.xpath('source')
-                for source in sources:
-                    source.set('file', disk_path)
-        with open(temp_vm_file, 'w') as f:
-            vm_xml.write(f, pretty_print=True, xml_declaration=True)
-        exec_cmd('virsh define %s' % temp_vm_file)
-
-
-def usage():
-    print '''
-    Usage:
-    python setup_vfuel.py <storage_directory> <dha_file>
-
-    Example:
-            python setup_vfuel.py /mnt/images dha.yaml
-    '''
-
-def parse_arguments():
-    if len(sys.argv) != 3:
-        log('Incorrect number of arguments')
-        usage()
-        sys.exit(1)
-    storage_dir = sys.argv[-2]
-    dha_file = sys.argv[-1]
-    check_dir_exists(storage_dir)
-    check_file_exists(dha_file)
-    return storage_dir, dha_file
-
-def main():
-    storage_dir, dha_file = parse_arguments()
-
-    vfuel = VFuel(storage_dir, dha_file)
-    vfuel.setup_environment()
-
-if __name__ == '__main__':
-    main()
index 9ea227a..0ec2edc 100644 (file)
@@ -6,6 +6,7 @@ TIMEOUT = 600
 log = common.log
 err = common.err
 
+
 class SSHClient(object):
 
     def __init__(self, host, username, password):
@@ -18,7 +19,8 @@ class SSHClient(object):
         self.client = paramiko.SSHClient()
         self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
         self.client.connect(self.host, username=self.username,
-                            password=self.password, timeout=timeout)
+                            password=self.password, look_for_keys=False,
+                            timeout=timeout)
 
     def close(self):
         if self.client is not None:
@@ -60,16 +62,15 @@ class SSHClient(object):
             if chan.recv_ready():
                 data = chan.recv(1024)
                 while data:
-                    print data
+                    log(data.strip())
                     data = chan.recv(1024)
 
             if chan.recv_stderr_ready():
                 error_buff = chan.recv_stderr(1024)
                 while error_buff:
-                    print error_buff
+                    log(error_buff.strip())
                     error_buff = chan.recv_stderr(1024)
-        exit_status = chan.recv_exit_status()
-        log('Exit status %s' % exit_status)
+        return chan.recv_exit_status()
 
     def scp_get(self, remote, local='.', dir=False):
         try:
index 9e70427..25de4b9 100644 (file)
@@ -205,7 +205,7 @@ network:
     gateway: 172.30.9.1
     ip_ranges:
     - - 172.30.9.70
-      - 172.30.9.70
+      - 172.30.9.79
     meta:
       assign_vip: true
       cidr: 172.16.0.0/24
index fd0e7b3..3abbdce 100644 (file)
@@ -205,7 +205,7 @@ network:
     gateway: 172.30.9.1
     ip_ranges:
     - - 172.30.9.70
-      - 172.30.9.70
+      - 172.30.9.79
     meta:
       assign_vip: true
       cidr: 172.16.0.0/24