Added option to install neutron-openswitch without ODL.
consolidated source files to common as all sdn share the s
same source for openstack and it can be modified during deployment.
Change-Id: I1df7f5cd5d928d170bc6be6b32c3404dbcb5fda3
case "$1" in
'nonha' )
- cp odl/juju-deployer/ovs-odl.yaml ./bundles.yaml
+ cp nosdn/juju-deployer/ovs.yaml ./bundles.yaml
;;
'ha' )
- cp odl/juju-deployer/ovs-odl-ha.yaml ./bundles.yaml
+ cp nosdn/juju-deployer/ovs-ha.yaml ./bundles.yaml
;;
'tip' )
- cp odl/juju-deployer/ovs-odl-tip.yaml ./bundles.yaml
- cp odl/juju-deployer/source/* ./
+ cp nosdn/juju-deployer/ovs-tip.yaml ./bundles.yaml
+ cp common/source/* ./
sed -i -- "s|branch: master|branch: stable/$2|g" ./*.yaml
;;
* )
- cp odl/juju-deployer/ovs-odl.yaml ./bundles.yaml
+ cp nosdn/juju-deployer/ovs.yaml ./bundles.yaml
;;
esac
# As per your lab vip address list be deafult uses 10.4.1.11 - 10.4.1.20
sed -i -- 's/10.4.1.1/192.168.10.1/g' ./bundles.yaml
# Choose the external port to go out from gateway to use.
- sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "juju-br0"/g' ./bundles.yaml
+ sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "eth1"/g' ./bundles.yaml
;;
esac
;;
esac
-echo "... Deployment finished ...."
services:
nodes-api:
charm: "local:trusty/ubuntu-nodes-controller"
- num_units: 3
+ num_units: 1
constraints: tags=control
nodes-compute:
- charm: "local:trusty/ubuntu-nodes-controller"
+ charm: "local:trusty/ubuntu-nodes-compute"
num_units: 1
constraints: tags=compute
"ntp":
openstack-phase2:
inherits: openstack-phase1
services:
- "percona-cluster":
- charm: "cs:trusty/percona-cluster"
- num_units: 3
+ "mysql":
+ charm: "cs:trusty/mysql"
+ num_units: 1
options:
"dataset-size": 2G
- "lp1366997-workaround": true
"max-connections": 10000
- "root-password": 293rhc395m
- "sst-password": 127rc14t51
- vip: 10.4.1.25
to:
- "lxc:nodes-api=0"
- - "lxc:nodes-api=1"
- - "lxc:nodes-api=2"
- "hacluster-keystone":
- charm: "cs:trusty/hacluster"
- options:
- corosync_key: |
- 'QisjNpoKHLkvyc3PEdDHP5VZjgD0kOk9t0ABPWmCELlu5AgWgnW6bJd34BVXO7VhW+3ZwNQM3W+bZ3pl28ftY4ki4N/S0KLFNia5egFgiA3AH2QXLnokS7OYXuu6GMJJ1S5mDe6P5zU+tfJe/Mebq24u7CgR0gAvohSPRdDTwdY='
- "hacluster-horizon":
- charm: "cs:trusty/hacluster"
- options:
- corosync_key: |
- 'aCVPHw6XYne+Hxv0WPiM3+yEfeIxs0Ly0EMKB494Rdzjf5rE52GcXqqxuvIa/nXJ4GCR+UdKk9FEwcASfYkeu3HDWUld9uTE6pOc+ibWYnybNH7VBfEHW8h9YmQKs3HD2T3wlTcS2irU4CUW7/IKNok4etYdM3iFn1K2ReSGXEI='
- "hacluster-nova":
- charm: "cs:trusty/hacluster"
- options:
- corosync_key: |
- 'gel86qmEze8dYKYbfqIgRpqapJpKtdYL2hxC1y5nWYBPq7EMf6V8mF01IjeUkSRs14CUDrsPpT4PWeom7EOY2fleuLx/aIuqQUfEDkhf/gvaz7BaU4hrmTCoDBK7/HvEwY+/wu4qkEeckzSRPsm9MYzqnLRshh8yjZJ70xU/mmk='
- "hacluster-neutron":
- charm: "cs:trusty/hacluster"
+ "ceilometer":
+ branch: "lp:~openstack-charmers/charms/trusty/ceilometer/next"
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ "ceilometer-agent":
+ branch: "lp:~openstack-charmers/charms/trusty/ceilometer-agent/next"
+ "mongodb":
+ charm: "cs:trusty/mongodb"
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ heat:
+ branch: "lp:~openstack-charmers/charms/trusty/heat/next"
+ to:
+ - "lxc:nodes-api=0"
+ ceph:
+ charm: cs:trusty/ceph
+ num_units: 1
options:
- corosync_key: |
- 'KNhb4++3jlllbnscS5D3qdzOJDsQPEeZ7zOLZJHbkKrRjX9gRCijVVOiv2JCvq03HqQ7LIufQzWGl9Za8qh0f6QmQ3XhFh/Cb/3WaYFj+tEf0zArWv+8miswmM1z4eyTSrTWBq0dTgx1z96wjBxP5HV0+1LWW+3Ei4oZWyRGeR0='
- "hacluster-glance":
- charm: "cs:trusty/hacluster"
+ fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7
+ monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A==
+ osd-devices: /srv
+ monitor-count: 1
+# osd-reformat: 'yes'
+# ceph-cluster-network: 192.168.0.0/24
+# ceph-public-network: 192.168.0.0/24
+ to:
+ - "nodes-compute=0"
+# - "nodes-compute=1"
+# ceph-osd:
+# charm: cs:trusty/ceph-osd
+# num_units: 1
+# options:
+# osd-devices: /srv
+# osd-reformat: 'yes'
+# to:
+# - "nodes-compute=0"
+# ceph-radosgw:
+# charm: cs:trusty/ceph-radosgw
+# num_units: 1
+# options:
+# use-embedded-webserver: true
+# to:
+# - "lxc:nodes-api=0"
+ cinder:
+ charm: cs:trusty/cinder
+ num_units: 1
options:
- corosync_key: |
- 'el1dd8107J5mwQDPS7tEJPZrr0XFfL95+Tku/QG90W5Q5f5SP4W8TRfKvddGmZWZl2lVd1neG5WqaHa1mq/aScJpoflzVAJCvshN7Gd2AjHhLNNugeI8S90j/7wrKUhqiCAlkKaeri2xs5bB5PZ7Z9AHuNZL7SW1al8lxrKhUFI='
+ block-device: None
+ glance-api-version: 2
+# ha-mcastport: 5401
+ to:
+ - "lxc:nodes-api=0"
+ cinder-ceph:
+ charm: cs:trusty/cinder-ceph
"rabbitmq-server":
- charm: "cs:trusty/rabbitmq-server"
- num_units: 3
- options:
- management_plugin: true
+ branch: lp:charms/trusty/rabbitmq-server
+ num_units: 1
to:
- "lxc:nodes-api=0"
- - "lxc:nodes-api=1"
- - "lxc:nodes-api=2"
"keystone":
- charm: "cs:trusty/keystone"
- num_units: 3
+ branch: lp:charms/trusty/keystone
+ num_units: 1
options:
+# ha-mcastport: 5402
"admin-password": openstack
"admin-token": admin
- vip: 10.4.1.23
to:
- "lxc:nodes-api=0"
- - "lxc:nodes-api=1"
- - "lxc:nodes-api=2"
"openstack-dashboard":
- charm: "cs:trusty/openstack-dashboard"
- num_units: 3
+ branch: lp:charms/trusty/openstack-dashboard
+ num_units: 1
options:
secret: admin
- vip: 10.4.1.21
webroot: /
to:
- "lxc:nodes-api=0"
- - "lxc:nodes-api=1"
- - "lxc:nodes-api=2"
"nova-compute":
- branch: "lp:~openstack-charmers/charms/trusty/nova-compute/next"
+ branch: lp:~openstack-charmers/charms/trusty/nova-compute/next
num_units: 1
options:
- "enable-live-migration": true
- "enable-resize": true
- "migration-auth-type": ssh
+ "enable-live-migration": False
+ "manage-neutron-plugin-legacy-mode": False
to:
- "nodes-compute=0"
# - "nodes-compute=1"
# - "nodes-api=1"
# - "nodes-api=2"
"nova-cloud-controller":
- charm: "cs:trusty/nova-cloud-controller"
- num_units: 3
+ branch: lp:charms/trusty/nova-cloud-controller
+ num_units: 1
options:
"console-access-protocol": novnc
"network-manager": Neutron
"quantum-security-groups": "yes"
- "service-guard": true
- vip: 10.4.1.26
to:
- "lxc:nodes-api=0"
- - "lxc:nodes-api=1"
- - "lxc:nodes-api=2"
"neutron-api":
branch: lp:~openstack-charmers/charms/trusty/neutron-api/next
- num_units: 3
+ num_units: 1
options:
- neutron-security-groups: False
+ neutron-security-groups: True
manage-neutron-plugin-legacy-mode: False
- vip: 10.4.1.27
to:
- "lxc:nodes-api=0"
- - "lxc:nodes-api=1"
- - "lxc:nodes-api=2"
neutron-gateway:
- branch: lp:~narindergupta/charms/trusty/neutron-gateway/ovs-odl
-# num_units: 3
-# constraints: tags=gateway
+ branch: lp:charms/trusty/neutron-gateway
options:
- "ext-port": "00:1e:67:cf:bb:53"
- plugin: ovs-odl
-# instance-mtu: 1400
+# "ext-port": "eth1"
to:
- "nodes-api=0"
- odl-controller:
- branch: lp:~narindergupta/charms/trusty/odl-controller/liberty
- options:
- install-url: "https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.4-Helium-SR4/distribution-karaf-0.2.4-Helium-SR4.tar.gz"
-# install-url: "https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distribution-karaf/0.3.2-Lithium-SR2/distribution-karaf-0.3.2-Lithium-SR2.tar.gz"
- to: lxc:nodes-api=1
"glance":
- charm: "cs:trusty/glance"
- num_units: 3
- options:
- vip: 10.4.1.22
+ branch: lp:charms/trusty/glance
+ num_units: 1
to:
- "lxc:nodes-api=0"
- - "lxc:nodes-api=1"
- - "lxc:nodes-api=2"
- neutron-api-odl:
- branch: lp:~narindergupta/charms/trusty/neutron-api-odl/liberty
- options:
- overlay-network-type: 'vxlan gre'
- security-groups: true
- openvswitch-odl:
- branch: lp:~openstack-charmers/charms/trusty/openvswitch-odl/trunk
+ neutron-openvswitch:
+ branch: lp:~openstack-charmers/charms/trusty/neutron-openvswitch/next
relations:
- - - "glance:ha"
- - "hacluster-glance:ha"
- - - "keystone:ha"
- - "hacluster-keystone:ha"
- - - "neutron-api:ha"
- - "hacluster-neutron:ha"
- - - "nova-cloud-controller:ha"
- - "hacluster-nova:ha"
- - - "openstack-dashboard:ha"
- - "hacluster-horizon:ha"
- - - "nova-compute:amqp"
- - "rabbitmq-server:amqp"
- - - "neutron-gateway:amqp"
+ - - neutron-openvswitch:neutron-plugin-api
+ - neutron-api:neutron-plugin-api
+ - - nova-compute:neutron-plugin
+ - neutron-openvswitch:neutron-plugin
+ - - neutron-openvswitch:amqp
+ - rabbitmq-server:amqp
+ - - "keystone:shared-db"
+ - "mysql:shared-db"
+ - - "nova-cloud-controller:shared-db"
+ - "mysql:shared-db"
+ - - "nova-cloud-controller:amqp"
- "rabbitmq-server:amqp"
+ - - "nova-cloud-controller:image-service"
+ - "glance:image-service"
- - "nova-cloud-controller:identity-service"
- "keystone:identity-service"
+ - - "nova-cloud-controller:cloud-compute"
+ - "nova-compute:cloud-compute"
+ - - "nova-compute:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-compute:image-service"
+ - "glance:image-service"
+ - - "glance:shared-db"
+ - "mysql:shared-db"
- - "glance:identity-service"
- "keystone:identity-service"
- - - "neutron-api:identity-service"
+ - - "glance:amqp"
+ - "rabbitmq-server:amqp"
+ - - "openstack-dashboard:identity-service"
- "keystone:identity-service"
+ - - "neutron-api:shared-db"
+ - "mysql:shared-db"
- - "neutron-api:amqp"
- "rabbitmq-server:amqp"
+ - - "nova-cloud-controller:neutron-api"
+ - "neutron-api:neutron-api"
+ - - "neutron-api:identity-service"
+ - "keystone:identity-service"
+ - - "neutron-gateway:amqp"
+ - "rabbitmq-server:amqp"
- - "neutron-gateway:neutron-plugin-api"
- "neutron-api:neutron-plugin-api"
- - - "glance:amqp"
+ - - "nova-cloud-controller:quantum-network-service"
+ - "neutron-gateway:quantum-network-service"
+ - - "ceilometer:amqp"
- "rabbitmq-server:amqp"
- - - "nova-cloud-controller:image-service"
- - "glance:image-service"
- - - "nova-compute:image-service"
+ - - "ceilometer-agent:ceilometer-service"
+ - "ceilometer:ceilometer-service"
+ - - "ceilometer:identity-service"
+ - "keystone:identity-service"
+ - - "ceilometer:identity-notifications"
+ - "keystone:identity-notifications"
+ - - "ceilometer-agent:nova-ceilometer"
+ - "nova-compute:nova-ceilometer"
+ - - "ceilometer:shared-db"
+ - "mongodb:database"
+ - - "heat:shared-db"
+ - "mysql:shared-db"
+ - - "heat:identity-service"
+ - "keystone:identity-service"
+ - - "heat:amqp"
+ - "rabbitmq-server:amqp"
+ - - "cinder:image-service"
- "glance:image-service"
- - - "nova-cloud-controller:cloud-compute"
- - "nova-compute:cloud-compute"
- - - "nova-cloud-controller:amqp"
+ - - "cinder:amqp"
- "rabbitmq-server:amqp"
- - - "nova-cloud-controller:quantum-network-service"
- - "neutron-gateway:quantum-network-service"
- - - "openstack-dashboard:identity-service"
+ - - "cinder:identity-service"
- "keystone:identity-service"
- - - "nova-cloud-controller:neutron-api"
- - "neutron-api:neutron-api"
- - - "neutron-gateway:shared-db"
- - "percona-cluster:shared-db"
- - - "glance:shared-db"
- - "percona-cluster:shared-db"
- - - "keystone:shared-db"
- - "percona-cluster:shared-db"
- - - "nova-cloud-controller:shared-db"
- - "percona-cluster:shared-db"
- - - "neutron-api:shared-db"
- - "percona-cluster:shared-db"
- - - "neutron-api:neutron-plugin-api-subordinate"
- - "neutron-api-odl:neutron-plugin-api-subordinate"
- - - "nova-compute:neutron-plugin"
- - "openvswitch-odl:neutron-plugin"
- - - "neutron-gateway"
- - "openvswitch-odl"
- - - "openvswitch-odl:ovsdb-manager"
- - "odl-controller:ovsdb-manager"
- - - "neutron-api-odl:odl-controller"
- - "odl-controller:controller-api"
+ - - "cinder:cinder-volume-service"
+ - "nova-cloud-controller:cinder-volume-service"
+ - - "cinder-ceph:storage-backend"
+ - "cinder:storage-backend"
+ - - "ceph:client"
+ - "nova-compute:ceph"
+ - - "cinder:shared-db"
+ - "mysql:shared-db"
+ - - "ceph:client"
+ - "cinder-ceph:ceph"
+ - - "ceph:client"
+ - "glance:ceph"
+# - - ceph-osd:mon
+# - ceph:osd
+# - - ceph-radosgw:mon
+# - ceph:radosgw
+# - - ceph-radosgw:identity-service
+# - keystone:identity-service
trusty-liberty-nodes:
inherits: openstack-phase1
overrides:
trusty-liberty:
inherits: openstack-phase2
overrides:
- cluster_count: 3
- os-data-network: 10.4.9.0/24
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
region: Canonical
source: "cloud:trusty-liberty"
"openstack-origin": "cloud:trusty-liberty"
series: trusty
+ ceph-osd-replication-count: 1
trusty-kilo-nodes:
inherits: openstack-phase1
trusty-kilo:
inherits: openstack-phase2
overrides:
- cluster_count: 3
- os-data-network: 10.4.9.0/24
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
region: Canonical
source: "cloud:trusty-kilo"
"openstack-origin": "cloud:trusty-kilo"
series: trusty
+ ceph-osd-replication-count: 1
trusty-juno-nodes:
trusty-juno:
inherits: openstack-phase2
overrides:
- cluster_count: 3
- os-data-network: 10.4.9.0/24
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
region: Canonical
source: "cloud:trusty-juno"
"openstack-origin": "cloud:trusty-juno"
series: trusty
+ ceph-osd-replication-count: 1
#need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle.
# Below parameters are the default and we can according the release
-opnfvsdn=odl
+opnfvsdn=nosdn
opnfvtype=nonha
openstack=liberty
opnfvlab=default
opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
}
-usage() { echo "Usage: $0 [-s <odl|opencontrail>]
+usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
[-t <nonha|ha|tip>]
- [-o <juno|kilo|liberty>]
+ [-o <juno|liberty>]
[-l <default|intelpod5>]
[-r <a|b>]" 1>&2 exit 1; }
deploy() {
#copy the script which needs to get deployed as part of ofnfv release
- echo "deploying now"
+ echo "...... deploying now ......"
echo " " >> environments.yaml
echo " enable-os-refresh-update: false" >> environments.yaml
echo " enable-os-upgrade: false" >> environments.yaml
./01-deploybundle.sh $opnfvtype $openstack $opnfvlab
}
+check_status() {
+ while [ $? -eq 0 ]; do
+ sleep 60
+ echo " still executing the reltionship within charms ..."
+ juju status | grep executing > /dev/null
+ done
+ echo "...... deployment finishing ......."
+}
+
if [ "$#" -eq 0 ]; then
echo "This installtion will use deploy.yaml"
read_config
fi
-echo "deploying started"
+echo "...... deployment started ......"
#deploy_dep
deploy
-echo "deploying finished"
+check_status
+
+echo "...... deployment finished ......."
+
# Defines the sdn plugin to be get installed.
sdn:odl
# Defines the ha type to be get installed.
- type:nonha
+ type:ha
# Defines the lab where release to be get installed.
lab:default
# This file defines the deployment for the MAAS environment which is to be
# deployed and automated.
demo-maas:
+ # Contains the virtual machine parameters for creating the Juju bootstrap
+ # node virtual machine
+ juju-bootstrap:
+ name: bootstrap
+ interfaces: ['bridge=virbr0,model=virtio']
+ memory: 2048
+ vcpus: 2
+ arch: amd64
+ pool: default
+ disk_size: 20G
+ sticky_ip_address:
+ requested_address: 192.168.122.5
+
maas:
# Defines the general setup for the MAAS environment, including the
# username and password for the host as well as the MAAS server.
# Contains the virtual machine parameters for creating the MAAS virtual
# server. Here you can configure the name of the virsh domain, the
# parameters for how the network is attached.
- name: opnfv-maas-intel
- interfaces: ['bridge=brAdm,model=virtio','bridge=brData,model=virtio','bridge=brPublic,model=virtio']
+ name: opnfv-maas
+ interfaces: ['bridge=virbr0,model=virtio']
memory: 4096
- vcpus: 4
+ vcpus: 2
arch: amd64
pool: default
- disk_size: 160G
+ disk_size: 60G
# Apt http proxy setting(s)
- apt_http_proxy:
+ #apt_http_proxy:
+ # Package sources. These will be used on the MAAS controller.
apt_sources:
- ppa:maas/stable
- ppa:juju/stable
virsh:
rsa_priv_key: /home/ubuntu/.ssh/id_rsa
rsa_pub_key: /home/ubuntu/.ssh/id_rsa.pub
- uri: qemu+ssh://ubuntu@10.4.1.1/system
+ uri: qemu+ssh://ubuntu@192.168.122.1/system
# Defines the IP Address that the configuration script will use to
# to access the MAAS controller via SSH.
- ip_address: 10.4.1.2
+ ip_address: 192.168.122.2
# This section allows the user to set a series of options on the
# MAAS server itself. The list of config options can be found in
# - http://maas.ubuntu.com/docs/api.html#maas-server
settings:
main_archive: http://us.archive.ubuntu.com/ubuntu
- upstream_dns: 10.4.0.2
- maas_name: intelpod5
+ upstream_dns: 192.168.122.1
+ maas_name: automaas
# kernel_opts: "console=tty0 console=ttyS1,115200n8"
- # ntp_server: ntp.ubuntu.com
+ ntp_server: ntp.ubuntu.com
# This section is used to define the networking parameters for when
# the node first comes up. It is fed into the meta-data cloud-init
auto eth0
iface eth0 inet static
- address 10.4.1.2
- netmask 255.255.248.0
- network 10.4.0.0
- broadcast 10.4.7.255
- gateway 10.4.0.1
- dns-nameservers 10.4.0.2 127.0.0.1
-
- auto eth1
- iface eth1 inet static
- address 10.4.9.2
- netmask 255.255.248.0
- network 10.4.8.0
- broadcast 10.4.15.255
-
- #auto lo
- #iface lo inet loopback
-
- #auto eth0
- #iface eth0 inet static
- # address 192.168.122.2
- # netmask 255.255.248.0
- # network 192.168.122.0
- # broadcast 192.168.122.255
- # gateway 192.168.122.1
- # dns-nameservers 192.168.122.1 127.0.0.1
+ address 192.168.122.2
+ netmask 255.255.255.0
+ network 192.168.122.0
+ broadcast 192.168.122.255
+ gateway 192.168.122.1
+ dns-nameservers 192.168.122.1 127.0.0.1
+
+ # See https://maas.ubuntu.com/docs/maascli.html#node-groups for
+ # description and full list of supported options.
+ # NOTE: interfaces are added using the node_group_interfaces section
+ # and only one node_group can be created by this bundle.
+ # Additional node groups can be added post deployment.
+ #node_group:
+ # # This is the cluster DNS name.
+ # name: maas
# The node-group-interfaces section is used to configure the MAAS
# network interfaces. Basic configuration is supported, such as which
# ${maas_ip} - the ip address of the MAAS controller
node_group_ifaces:
- device: eth0
- ip: 10.4.1.2
- subnet_mask: 255.255.248.0
- broadcast_ip: 10.4.7.255
- router_ip: 10.4.0.1
+ ip: 192.168.122.2
+ subnet_mask: 255.255.255.0
+ broadcast_ip: 192.168.122.255
+ router_ip: 192.168.122.1
static_range:
- low: 10.4.2.20
- high: 10.4.2.254
+ low: 192.168.122.51
+ high: 192.168.122.60
dynamic_range:
- low: 10.4.1.50
- high: 10.4.1.254
-
- # Defines the physical nodes which are added to the MAAS cluster
- # controller upon startup of the node.
- nodes:
- - name: node5-compute
- tags: compute
- architecture: amd64/generic
- mac_addresses:
- - "00:1e:67:e0:0a:4a"
- - "00:1e:67:e0:0a:4b"
- - "00:1e:67:d0:9a:10"
- - "00:1e:67:d0:9a:11"
- power:
- type: ipmi
- address: 10.4.7.5
- user: root
- pass: root
- driver: LAN_2_0
-# sticky_ip_address:
-# mac_address: "38:63:bb:43:b8:9c"
-# requested_address: 192.168.122.5
-
- - name: node4-control
- tags: control
- architecture: amd64/generic
- mac_addresses:
- - "00:1e:67:e0:08:b0"
- - "00:1e:67:e0:08:b1"
- - "00:1e:67:d0:99:ee"
- - "00:1e:67:d0:99:ef"
- power:
- type: ipmi
- address: 10.4.7.4
- user: root
- pass: root
- driver: LAN_2_0
-# sticky_ip_address:
-# mac_address: "38:63:bb:43:b8:9c"
-# requested_address: 192.168.122.5
+ low: 192.168.122.5
+ high: 192.168.122.50
+
+ # Physical nodes to be added to the MAAS cluster. Nodes will be
+ # configured, commissioned and put into the Ready state so
+ # they are ready to be deployed to.
+ #nodes:
+ # - name: node1
+ # tags: api
+ # architecture: amd64/generic
+ # mac_addresses:
+ # - "38:63:bb:43:b8:9c"
+ # power:
+ # type: ipmi
+ # address: 10.0.1.1
+ # user: maas
+ # pass: passw0rd
+ # driver: LAN_2_0
+ # sticky_ip_address:
+ # mac_address: "38:63:bb:43:b8:9c"
+ # requested_address: 192.168.122.6
- - name: node3-control
- tags: control
- architecture: amd64/generic
- mac_addresses:
- - "00:1e:67:e0:08:7e"
- - "00:1e:67:e0:08:7f"
- - "00:1e:67:c2:23:d8"
- - "00:1e:67:c2:23:d9"
- power:
- type: ipmi
- address: 10.4.7.3
- user: root
- pass: root
- driver: LAN_2_0
-# sticky_ip_address:
-# mac_address: "38:63:bb:43:b8:9c"
-# requested_address: 192.168.122.5
-
- - name: node2-control
- tags: control
- architecture: amd64/generic
- mac_addresses:
- - "00:1e:67:cf:b8:92"
- - "00:1e:67:cf:b8:93"
- - "00:1e:67:d0:9b:0c"
- - "00:1e:67:d0:9b:0d"
- power:
- type: ipmi
- address: 10.4.7.2
- user: root
- pass: root
- driver: LAN_2_0
-# sticky_ip_address:
-# mac_address: "38:63:bb:43:b8:9c"
-# requested_address: 192.168.122.5
-
-# - name: jenkins-slave
-# tags: jenkins-slave
-# architecture: amd64/generic
-# mac_addresses:
-# - "52:54:00:f0:5c:53"
-# power:
-# type: virsh
-# address: qemu+ssh://ubuntu@10.4.1.1/system
-
- # Contains the virtual machine parameters for creating the Juju bootstrap
- # node virtual machine
- juju-bootstrap:
- name: bootstrap
- interfaces: ['bridge=brAdm,model=virtio']
- memory: 4096
- vcpus: 4
- arch: amd64
- pool: default
- disk_size: 120G
--- /dev/null
+#!/bin/bash
+#placeholder for deployment script.
+set -ex
+
+case "$1" in
+ 'nonha' )
+ cp nosdn/juju-deployer/ovs.yaml ./bundles.yaml
+ ;;
+ 'ha' )
+ cp nosdn/juju-deployer/ovs-ha.yaml ./bundles.yaml
+ ;;
+ 'tip' )
+ cp nosdn/juju-deployer/ovs-tip.yaml ./bundles.yaml
+ cp common/source/* ./
+ sed -i -- "s|branch: master|branch: stable/$2|g" ./*.yaml
+ ;;
+ * )
+ cp nosdn/juju-deployer/ovs.yaml ./bundles.yaml
+ ;;
+esac
+
+case "$3" in
+ 'orangepod2' )
+ cp maas/orange/pod2/control-interfaces.host trusty/ubuntu-nodes-controller/network/interfaces.host
+ cp maas/orange/pod2/lxc-add-more-interfaces trusty/ubuntu-nodes-controller/lxc/add-more-interfaces
+ cp maas/orange/pod2/compute-interfaces.host trusty/ubuntu-nodes-compute/network/interfaces.host
+ cp maas/orange/pod2/lxc-add-more-interfaces trusty/ubuntu-nodes-compute/lxc/add-more-interfaces
+ # As per your lab vip address list be deafult uses 10.4.1.11 - 10.4.1.20
+ sed -i -- 's/10.4.1.1/192.168.2.2/g' ./bundles.yaml
+ # choose the correct interface to use for data network
+ sed -i -- 's/#os-data-network: 10.4.8.0\/21/os-data-network: 192.168.12.0\/24/g' ./bundles.yaml
+ # Choose the external port to go out from gateway to use.
+ sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "eth1"/g' ./bundles.yaml
+ ;;
+ 'intelpod6' )
+ cp maas/intel/pod6/interfaces.host trusty/ubuntu-nodes-controller/network/interfaces.host
+ cp maas/intel/pod6/lxc-add-more-interfaces trusty/ubuntu-nodes-controller/lxc/add-more-interfaces
+ cp maas/intel/pod6/interfaces.host trusty/ubuntu-nodes-compute/network/interfaces.host
+ cp maas/intel/pod6/lxc-add-more-interfaces trusty/ubuntu-nodes-compute/lxc/add-more-interfaces
+ # As per your lab vip address list be deafult uses 10.4.1.11 - 10.4.1.20
+ sed -i -- 's/10.4.1.1/10.4.1.2/g' ./bundles.yaml
+ # choose the correct interface to use for data network
+ sed -i -- 's/#os-data-network: 10.4.8.0\/21/os-data-network: 10.4.9.0\/24/g' ./bundles.yaml
+ # Choose the external port to go out from gateway to use.
+ sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "brPublic"/g' ./bundles.yaml
+ ;;
+ 'intelpod5' )
+ cp maas/intel/pod5/interfaces.host trusty/ubuntu-nodes-controller/network/interfaces.host
+ cp maas/intel/pod5/lxc-add-more-interfaces trusty/ubuntu-nodes-controller/lxc/add-more-interfaces
+ cp maas/intel/pod5/interfaces.host trusty/ubuntu-nodes-compute/network/interfaces.host
+ cp maas/intel/pod5/lxc-add-more-interfaces trusty/ubuntu-nodes-compute/lxc/add-more-interfaces
+ # As per your lab vip address list be deafult uses 10.4.1.11 - 10.4.1.20
+ sed -i -- 's/10.4.1.1/10.4.1.2/g' ./bundles.yaml
+ # choose the correct interface to use for data network
+ sed -i -- 's/#os-data-network: 10.4.8.0\/21/os-data-network: 10.4.9.0\/24/g' ./bundles.yaml
+ # Choose the external port to go out from gateway to use.
+ sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "brPublic"/g' ./bundles.yaml
+ ;;
+ 'attvirpod1' )
+ cp maas/att/virpod1/interfaces.host trusty/ubuntu-nodes-controller/network/interfaces.host
+ cp maas/att/virpod1/interfaces.host trusty/ubuntu-nodes-compute/network/interfaces.host
+ cp maas/att/virpod1/lxc-add-more-interfaces trusty/ubuntu-nodes-controller/lxc/add-more-interfaces
+ cp maas/att/virpod1/lxc-add-more-interfaces trusty/ubuntu-nodes-compute/lxc/add-more-interfaces
+ # As per your lab vip address list be deafult uses 10.4.1.11 - 10.4.1.20
+ sed -i -- 's/10.4.1.1/192.168.10.1/g' ./bundles.yaml
+ # Choose the external port to go out from gateway to use.
+ sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "eth1"/g' ./bundles.yaml
+ ;;
+esac
+
+echo "... Deployment Started ...."
+case "$1" in
+ 'nonha' )
+ juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
+ juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ ;;
+ 'ha' )
+ juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
+ juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ ;;
+ 'tip' )
+ juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
+ juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ ;;
+ * )
+ juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
+ juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ ;;
+esac
+
--- /dev/null
+OpenDaylight Deployment with Juju
+=================================
+
+This readme contains instructions for checking out and deploying Juju charms for
+OpenDaylight.
+
+The charms are targetted at Trusty.
+
+
+Checkout charms
+---------------
+
+Charms are hosted on Launchpad.
+You need to 'sudo apt-get install bzr' first.
+
+Follow these steps to checkout code:
+
+cd <deployer dir>
+./fetch-charms.sh
+
+This will checkout the relevant charms into 'src' and create any Juju symlinks
+in 'charms'.
+
+
+Deploy with cloud-sh-odl
+------------------------
+
+cloud-sh-odl is a collection of development shell scripts to deploy
+and setup OpenStack with OpenDaylight using Juju's local provider. This will
+create 3 KVMs as follows:
+
+*KVM #1 - Keystone, Glance, Neutron Server, Nova Cloud Controller, Horizon,
+ MySQL, RabbitMQ, OpenDaylight Controller
+
+*KVM #2 - Neutron agents
+
+*KVM #3 - Nova Compute
+
+You'll require approx. 13Gb RAM with 40Gb+ disk space.
+Deployment can take anywhere between 20 mins to 1 hour.
+
+You need to 'sudo apt-get install juju juju-local uvtool', and
+logout/login in order to pick up libvirt group permissions before
+proceeding. See https://bugs.launchpad.net/juju-core/+bug/1308088.
+
+Follow these steps:
+
+ssh-keygen
+ (if you don't already have a key at ~/.ssh/id_rsa).
+
+cp cloud-sh-odl/environments.yaml ~/.juju
+ (or create your own default local environment in your existing
+ environments.yaml file)
+
+cd cloud-sh-odl
+
+./deploy.sh
+
+This will log to 'out.log'.
+
+This will deploy OpenStack and import Trusty's daily image into Glance.
+
+Horizon will be located on the machine 'juju status openstack-dashboard' -
+http://<ip>/horizon.
+Admin credentials will be written to cloud/admin-openrc.
+
+The deployment can be destroyed with:
+
+juju destroy-environment local
+
+
+Deploy with Juju Deployer
+-------------------------
+
+Juju Deployer can deploy a preset configuration of charms given a yaml
+configuration file. There is a configuration file in
+'juju-deployer/odl.yaml'.
+
+You need to 'sudo apt-get install juju-deployer' first.
+
+Then:
+
+cd juju-deployer
+
+juju-deployer -c odl.yaml -d trusty-icehouse-odl
+
+Juju Deployer will branch its own copy of the remote charms.
--- /dev/null
+#!/bin/sh -e
+
+. ~/admin-openrc
+
+# adjust tiny image
+nova flavor-delete m1.tiny
+nova flavor-create m1.tiny 1 512 8 1
+
+# import key pair
+nova keypair-add --pub-key id_rsa.pub ubuntu-keypair
--- /dev/null
+#!/bin/sh -e
+
+modprobe kvm_intel
+printf "\n%s\n" kvm_intel >> /etc/modules
+service libvirt-bin restart
+
+sed -e 's/KSM_ENABLED=1/KSM_ENABLED=0/' -i /etc/default/qemu-kvm
+service qemu-kvm restart
--- /dev/null
+export JUJU_REPOSITORY=../charms
+
+DEFAULT_SERIES=trusty
+
+CHARM_GLANCE=local:trusty/glance
+
+CHARM_KEYSTONE=local:trusty/keystone
+CHARM_KEYSTONE_DEPLOY_OPTS="--config config.yaml"
+
+CHARM_MYSQL=trusty/mysql
+CHARM_MYSQL_DEPLOY_OPTS="--config config.yaml"
+
+CHARM_NEUTRON_API=local:trusty/neutron-api
+CHARM_NEUTRON_API_DEPLOY_OPTS="--config config.yaml"
+
+CHARM_NEUTRON_GATEWAY=local:trusty/quantum-gateway
+CHARM_NEUTRON_GATEWAY_DEPLOY_OPTS="--config config.yaml"
+
+CHARM_NEUTRON_ODL=local:trusty/neutron-odl
+
+CHARM_NOVA_CLOUD_CONTROLLER=local:trusty/nova-cloud-controller
+CHARM_NOVA_CLOUD_CONTROLLER_DEPLOY_OPTS="--config config.yaml"
+
+CHARM_NOVA_COMPUTE=local:trusty/nova-compute
+
+CHARM_ODL_CONTROLLER=local:trusty/odl-controller
+
+CHARM_OPENSTACK_DASHBOARD=local:trusty/openstack-dashboard
+
+CHARM_RABBITMQ_SERVER=trusty/rabbitmq-server
--- /dev/null
+keystone:
+ admin-password: password
+
+mysql:
+ dataset-size: 10%
+ max-connections: 1000
+
+neutron-api:
+ neutron-plugin: odl
+ neutron-security-groups: True
+
+neutron-gateway:
+ instance-mtu: 1400
+ plugin: odl
+
+nova-cloud-controller:
+ network-manager: Neutron
+ quantum-security-groups: "yes"
--- /dev/null
+#!/bin/sh -e
+exec ./openstack.sh ./config.sh 2>&1 | tee out.log
--- /dev/null
+default: local
+
+environments:
+ local:
+ type: local
+ container: kvm
+ default-series: trusty
+ lxc-clone: true
--- /dev/null
+#!/bin/sh -e
+
+. ~/admin-openrc
+
+wget http://cloud-images.ubuntu.com/trusty/current/MD5SUMS
+
+wget http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+glance image-create --name ubuntu-trusty-daily --disk-format qcow2 --container-format bare --owner admin --file trusty-server-cloudimg-amd64-disk1.img --checksum $(grep trusty-server-cloudimg-amd64-disk1.img MD5SUMS | cut -d " " -f 1) --is-public True
--- /dev/null
+#!/bin/sh -e
+
+DEBIAN_FRONTEND=noninteractive apt-get -qy -o Dpkg::Options::=--force-confdef -o Dpkg::Options::=--force-confold install lxc < /dev/null
+
+sed -e 's/^USE_LXC_BRIDGE="true"/USE_LXC_BRIDGE="false"/' -i /etc/default/lxc
+service lxc-net restart
+
+ifdown eth0
+mv /etc/network/interfaces.d/eth0.cfg /etc/network/interfaces.d/eth0.cfg.bak
+cat <<-"EOF" > /etc/network/interfaces.d/bridge.cfg
+ auto eth0
+ iface eth0 inet manual
+
+ auto lxcbr0
+ iface lxcbr0 inet dhcp
+ bridge_ports eth0
+ EOF
+ifup eth0 lxcbr0
--- /dev/null
+#!/bin/sh -ex
+
+agentState()
+{
+ juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"machines\"][\"$1\"][\"agent-state\"]" 2> /dev/null
+}
+
+agentStateUnit()
+{
+ juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"agent-state\"]" 2> /dev/null
+}
+
+configOpenrc()
+{
+ cat <<-EOF
+ export OS_USERNAME=$1
+ export OS_PASSWORD=$2
+ export OS_TENANT_NAME=$3
+ export OS_AUTH_URL=$4
+ export OS_REGION_NAME=$5
+ EOF
+}
+
+unitAddress()
+{
+ juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
+}
+
+unitMachine()
+{
+ juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"machine\"]" 2> /dev/null
+}
+
+waitForMachine()
+{
+ for machine; do
+ while [ "$(agentState $machine)" != started ]; do
+ sleep 5
+ done
+ done
+}
+
+waitForService()
+{
+ for service; do
+ while [ "$(agentStateUnit "$service" 0)" != started ]; do
+ sleep 5
+ done
+ done
+}
+
+if [ $# -ne 0 ]; then
+ . "$1"
+fi
+
+juju bootstrap
+waitForMachine 0
+
+spare_cpus=$(($(grep processor /proc/cpuinfo | wc -l) - 5))
+if [ $spare_cpus -gt 0 ]; then
+ spare_cpus=$(((spare_cpus * 3) / 4))
+else
+ spare_cpus=0
+fi
+
+extra_cpus=0
+[ $spare_cpus -ne 0 ] && extra_cpus=$((1 + (((spare_cpus - 1) * 3) / 4))) && spare_cpus=$((spare_cpus - extra_cpus))
+juju add-machine --constraints "cpu-cores=$((1 + extra_cpus)) mem=8G root-disk=20G" --series $DEFAULT_SERIES
+
+juju deploy --constraints mem=1G $CHARM_NEUTRON_GATEWAY_DEPLOY_OPTS "${CHARM_NEUTRON_GATEWAY:-quantum-gateway}" neutron-gateway
+
+juju deploy --constraints "cpu-cores=$((1 + spare_cpus)) mem=4G root-disk=20G" $CHARM_NOVA_COMPUTE_DEPLOY_OPTS "${CHARM_NOVA_COMPUTE:-nova-compute}"
+
+waitForMachine 1
+juju scp lxc-network.sh 1:
+juju run --machine 1 "sudo ./lxc-network.sh"
+juju deploy --to lxc:1 $CHARM_MYSQL_DEPLOY_OPTS "${CHARM_MYSQL:-mysql}"
+juju deploy --to lxc:1 $CHARM_RABBITMQ_SERVER_DEPLOY_OPTS "${CHARM_RABBITMQ_SERVER:-rabbitmq-server}"
+juju deploy --to lxc:1 $CHARM_KEYSTONE_DEPLOY_OPTS "${CHARM_KEYSTONE:-keystone}"
+juju deploy --to lxc:1 $CHARM_NOVA_CLOUD_CONTROLLER_DEPLOY_OPTS "${CHARM_NOVA_CLOUD_CONTROLLER:-nova-cloud-controller}"
+juju deploy --to lxc:1 $CHARM_NEUTRON_API_DEPLOY_OPTS "${CHARM_NEUTRON_API:-neutron-api}"
+juju deploy --to lxc:1 $CHARM_GLANCE_DEPLOY_OPTS "${CHARM_GLANCE:-glance}"
+juju deploy --to lxc:1 $CHARM_OPENSTACK_DASHBOARD_DEPLOY_OPTS "${CHARM_OPENSTACK_DASHBOARD:-openstack-dashboard}"
+# opendaylight
+juju deploy --to lxc:1 $CHARM_ODL_CONTROLLER_DEPLOY_OPTS "${CHARM_ODL_CONTROLLER:-odl-controller}"
+juju deploy $CHARM_NEUTRON_ODL_DEPLOY_OPTS "${CHARM_NEUTRON_ODL:-neutron-odl}"
+
+# relation must be set first
+# no official way of knowing when this relation hook will fire
+waitForService mysql keystone
+juju add-relation keystone mysql
+sleep 60
+
+waitForService rabbitmq-server nova-cloud-controller glance
+juju add-relation nova-cloud-controller mysql
+juju add-relation nova-cloud-controller rabbitmq-server
+juju add-relation nova-cloud-controller glance
+juju add-relation nova-cloud-controller keystone
+sleep 60
+
+waitForService neutron-api
+juju add-relation neutron-api mysql
+juju add-relation neutron-api rabbitmq-server
+juju add-relation neutron-api keystone
+juju add-relation neutron-api nova-cloud-controller
+sleep 60
+
+waitForService openstack-dashboard neutron-gateway nova-compute
+juju add-relation neutron-gateway mysql
+juju add-relation neutron-gateway:amqp rabbitmq-server:amqp
+juju add-relation neutron-gateway nova-cloud-controller
+juju add-relation neutron-gateway neutron-api
+juju add-relation nova-compute:shared-db mysql:shared-db
+juju add-relation nova-compute:amqp rabbitmq-server:amqp
+juju add-relation nova-compute glance
+juju add-relation nova-compute nova-cloud-controller
+juju add-relation glance mysql
+juju add-relation glance keystone
+juju add-relation openstack-dashboard keystone
+sleep 60
+
+# opendaylight
+waitForService odl-controller
+juju add-relation neutron-api odl-controller
+juju add-relation neutron-gateway odl-controller
+juju add-relation nova-compute neutron-odl
+juju add-relation neutron-odl odl-controller
+sleep 60
+
+# enable kvm on compute
+machine=$(unitMachine nova-compute 0)
+juju scp compute.sh $machine:
+juju run --machine $machine "sudo ./compute.sh"
+
+mkdir -m 0700 -p cloud
+controller_address=$(unitAddress keystone 0)
+configOpenrc admin password Admin http://$controller_address:5000/v2.0 RegionOne > cloud/admin-openrc
+chmod 0600 cloud/admin-openrc
+
+machine=$(unitMachine nova-cloud-controller 0)
+juju scp cloud-setup.sh cloud/admin-openrc ~/.ssh/id_rsa.pub $machine:
+juju run --machine $machine ./cloud-setup.sh
+
+machine=$(unitMachine glance 0)
+juju scp glance.sh cloud/admin-openrc $machine:
+juju run --machine $machine ./glance.sh
--- /dev/null
+#!/bin/sh -ex
+
+mkdir -p src/charms/trusty
+
+# openstack
+bzr branch lp:~openstack-charmers/charms/trusty/glance/next src/charms/trusty/glance-next
+bzr branch lp:~openstack-charmers/charms/trusty/keystone/next src/charms/trusty/keystone-next
+bzr branch lp:~sdn-charmers/charms/trusty/neutron-api/odl src/charms/trusty/neutron-api-odl
+bzr branch lp:~openstack-charmers/charms/trusty/nova-cloud-controller/next src/charms/trusty/nova-cloud-controller-next
+bzr branch lp:~sdn-charmers/charms/trusty/nova-compute/odl src/charms/trusty/nova-compute-odl
+bzr branch lp:~openstack-charmers/charms/trusty/openstack-dashboard/next src/charms/trusty/openstack-dashboard-next
+bzr branch lp:~sdn-charmers/charms/trusty/quantum-gateway/odl src/charms/trusty/quantum-gateway-odl
+
+# opendaylight
+bzr branch lp:~sdn-charmers/charms/trusty/odl-controller/trunk src/charms/trusty/odl-controller
+bzr branch lp:~sdn-charmers/charms/trusty/neutron-odl/trunk src/charms/trusty/neutron-odl
+
+mkdir -p charms/trusty
+(cd charms/trusty; ln -s ../../src/charms/trusty/* .)
--- /dev/null
+# vim: set ts=2 et:
+openstack-phase1:
+ series: trusty
+ services:
+ nodes-api:
+ charm: "local:trusty/ubuntu-nodes-controller"
+ num_units: 3
+ constraints: tags=control
+ nodes-compute:
+ charm: "local:trusty/ubuntu-nodes-compute"
+ num_units: 1
+ constraints: tags=compute
+ "ntp":
+ charm: "cs:trusty/ntp"
+ relations:
+ - - "ntp:juju-info"
+ - "nodes-api:juju-info"
+ - - "ntp:juju-info"
+ - "nodes-compute:juju-info"
+openstack-phase2:
+ inherits: openstack-phase1
+ services:
+ "percona-cluster":
+ charm: "cs:trusty/percona-cluster"
+ num_units: 3
+ options:
+ "dataset-size": 2G
+ "lp1366997-workaround": true
+ "max-connections": 10000
+ "root-password": 293rhc395m
+ "sst-password": 127rc14t51
+ vip: 10.4.1.15
+ to:
+ - "lxc:nodes-api=0"
+ - "lxc:nodes-api=1"
+ - "lxc:nodes-api=2"
+ "hacluster-keystone":
+ charm: "cs:trusty/hacluster"
+ options:
+ corosync_key: |
+ 'QisjNpoKHLkvyc3PEdDHP5VZjgD0kOk9t0ABPWmCELlu5AgWgnW6bJd34BVXO7VhW+3ZwNQM3W+bZ3pl28ftY4ki4N/S0KLFNia5egFgiA3AH2QXLnokS7OYXuu6GMJJ1S5mDe6P5zU+tfJe/Mebq24u7CgR0gAvohSPRdDTwdY='
+ "hacluster-horizon":
+ charm: "cs:trusty/hacluster"
+ options:
+ corosync_key: |
+ 'aCVPHw6XYne+Hxv0WPiM3+yEfeIxs0Ly0EMKB494Rdzjf5rE52GcXqqxuvIa/nXJ4GCR+UdKk9FEwcASfYkeu3HDWUld9uTE6pOc+ibWYnybNH7VBfEHW8h9YmQKs3HD2T3wlTcS2irU4CUW7/IKNok4etYdM3iFn1K2ReSGXEI='
+ "hacluster-nova":
+ charm: "cs:trusty/hacluster"
+ options:
+ corosync_key: |
+ 'gel86qmEze8dYKYbfqIgRpqapJpKtdYL2hxC1y5nWYBPq7EMf6V8mF01IjeUkSRs14CUDrsPpT4PWeom7EOY2fleuLx/aIuqQUfEDkhf/gvaz7BaU4hrmTCoDBK7/HvEwY+/wu4qkEeckzSRPsm9MYzqnLRshh8yjZJ70xU/mmk='
+ "hacluster-neutron":
+ charm: "cs:trusty/hacluster"
+ options:
+ corosync_key: |
+ 'KNhb4++3jlllbnscS5D3qdzOJDsQPEeZ7zOLZJHbkKrRjX9gRCijVVOiv2JCvq03HqQ7LIufQzWGl9Za8qh0f6QmQ3XhFh/Cb/3WaYFj+tEf0zArWv+8miswmM1z4eyTSrTWBq0dTgx1z96wjBxP5HV0+1LWW+3Ei4oZWyRGeR0='
+ "hacluster-glance":
+ charm: "cs:trusty/hacluster"
+ options:
+ corosync_key: |
+ 'el1dd8107J5mwQDPS7tEJPZrr0XFfL95+Tku/QG90W5Q5f5SP4W8TRfKvddGmZWZl2lVd1neG5WqaHa1mq/aScJpoflzVAJCvshN7Gd2AjHhLNNugeI8S90j/7wrKUhqiCAlkKaeri2xs5bB5PZ7Z9AHuNZL7SW1al8lxrKhUFI='
+ "hacluster-ceilometer":
+ charm: "cs:trusty/hacluster"
+ options:
+ corosync_key: |
+ 'cHkgKGJpdHMgPSA5NjApLgpQcmVzcyBrZXlzIG9uIHlvdXIga2V5Ym9hcmQgdG8gZ2VuZXJhdGUgZW50cm9weSAoYml0cyA9IDEwMDgpLgpXcml0aW5nIGNvcm9zeW5jIGtleSB0byAvZXRjL2Nvcm9zeW5jL2F1dGhrZXkuCg=='
+ "ceilometer":
+ branch: "lp:~openstack-charmers/charms/trusty/ceilometer/next"
+ num_units: 3
+ options:
+ vip: 10.4.1.14
+ to:
+ - "lxc:nodes-api=0"
+ - "lxc:nodes-api=1"
+ - "lxc:nodes-api=2"
+ "ceilometer-agent":
+ branch: "lp:~openstack-charmers/charms/trusty/ceilometer-agent/next"
+ "mongodb":
+ charm: "cs:trusty/mongodb"
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ heat:
+ branch: "lp:~openstack-charmers/charms/trusty/heat/next"
+ to:
+ - "lxc:nodes-api=0"
+ ceph:
+ charm: cs:trusty/ceph
+ num_units: 1
+ options:
+ fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7
+ monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A==
+ osd-devices: /srv
+ monitor-count: 1
+# osd-reformat: 'yes'
+# ceph-cluster-network: 192.168.0.0/24
+# ceph-public-network: 192.168.0.0/24
+ to:
+ - "nodes-compute=0"
+# - "nodes-compute=1"
+# ceph-osd:
+# charm: cs:trusty/ceph-osd
+# num_units: 1
+# options:
+# osd-devices: /dev/sdb
+# osd-reformat: 'yes'
+# to:
+# - "lxc:nodes-api=0"
+# ceph-radosgw:
+# charm: cs:trusty/ceph-radosgw
+# num_units: 1
+# options:
+# use-embedded-webserver: true
+# to:
+# - "lxc:nodes-api=0"
+ cinder:
+ charm: cs:trusty/cinder
+ num_units: 3
+ options:
+ block-device: None
+ glance-api-version: 2
+# ha-mcastport: 5401
+ to:
+ - "lxc:nodes-api=0"
+ - "lxc:nodes-api=1"
+ - "lxc:nodes-api=2"
+ cinder-ceph:
+ charm: cs:trusty/cinder-ceph
+ "rabbitmq-server":
+ branch: lp:charms/trusty/rabbitmq-server
+ num_units: 3
+ to:
+ - "lxc:nodes-api=0"
+ - "lxc:nodes-api=1"
+ - "lxc:nodes-api=2"
+ "keystone":
+ branch: lp:charms/trusty/keystone
+ num_units: 3
+ options:
+# ha-mcastport: 5402
+ "admin-password": openstack
+ "admin-token": admin
+ vip: 10.4.1.13
+ to:
+ - "lxc:nodes-api=0"
+ - "lxc:nodes-api=1"
+ - "lxc:nodes-api=2"
+ "openstack-dashboard":
+ branch: lp:charms/trusty/openstack-dashboard
+ num_units: 3
+ options:
+ secret: admin
+ vip: 10.4.1.11
+ webroot: /
+ to:
+ - "lxc:nodes-api=0"
+ - "lxc:nodes-api=1"
+ - "lxc:nodes-api=2"
+ "nova-compute":
+ branch: lp:~openstack-charmers/charms/trusty/nova-compute/next
+ num_units: 1
+ options:
+ "enable-live-migration": False
+ "manage-neutron-plugin-legacy-mode": False
+ to:
+ - "nodes-compute=0"
+# - "nodes-compute=1"
+# - "nodes-api=0"
+# - "nodes-api=1"
+# - "nodes-api=2"
+ "nova-cloud-controller":
+ branch: lp:charms/trusty/nova-cloud-controller
+ num_units: 3
+ options:
+ "console-access-protocol": novnc
+ "network-manager": Neutron
+ "quantum-security-groups": "yes"
+ "service-guard": true
+ vip: 10.4.1.16
+ to:
+ - "lxc:nodes-api=0"
+ - "lxc:nodes-api=1"
+ - "lxc:nodes-api=2"
+ "neutron-api":
+ branch: lp:~openstack-charmers/charms/trusty/neutron-api/next
+ num_units: 3
+ options:
+ neutron-security-groups: True
+ manage-neutron-plugin-legacy-mode: False
+ vip: 10.4.1.17
+ to:
+ - "lxc:nodes-api=0"
+ - "lxc:nodes-api=1"
+ - "lxc:nodes-api=2"
+ neutron-gateway:
+ branch: lp:charms/trusty/neutron-gateway
+ options:
+# "ext-port": "eth1"
+ to:
+ - "nodes-api=0"
+ "glance":
+ branch: lp:charms/trusty/glance
+ num_units: 3
+ options:
+ vip: 10.4.1.12
+# ha-mcastport: 5402
+ to:
+ - "lxc:nodes-api=0"
+ - "lxc:nodes-api=1"
+ - "lxc:nodes-api=2"
+ neutron-openvswitch:
+ branch: lp:~openstack-charmers/charms/trusty/neutron-openvswitch/trunk
+ relations:
+ - - "glance:ha"
+ - "hacluster-glance:ha"
+ - - "keystone:ha"
+ - "hacluster-keystone:ha"
+ - - "neutron-api:ha"
+ - "hacluster-neutron:ha"
+ - - "nova-cloud-controller:ha"
+ - "hacluster-nova:ha"
+ - - "openstack-dashboard:ha"
+ - "hacluster-horizon:ha"
+ - - "ceilometer:ha"
+ - "hacluster-ceilometer:ha"
+ - - neutron-openvswitch:neutron-plugin-api
+ - neutron-api:neutron-plugin-api
+ - - nova-compute:neutron-plugin
+ - neutron-openvswitch:neutron-plugin
+ - - neutron-openvswitch:amqp
+ - rabbitmq-server:amqp
+ - - "keystone:shared-db"
+ - "percona-cluster:shared-db"
+ - - "nova-cloud-controller:shared-db"
+ - "percona-cluster:shared-db"
+ - - "nova-cloud-controller:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-cloud-controller:image-service"
+ - "glance:image-service"
+ - - "nova-cloud-controller:identity-service"
+ - "keystone:identity-service"
+ - - "nova-cloud-controller:cloud-compute"
+ - "nova-compute:cloud-compute"
+ - - "nova-compute:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-compute:image-service"
+ - "glance:image-service"
+ - - "glance:shared-db"
+ - "percona-cluster:shared-db"
+ - - "glance:identity-service"
+ - "keystone:identity-service"
+ - - "glance:amqp"
+ - "rabbitmq-server:amqp"
+ - - "openstack-dashboard:identity-service"
+ - "keystone:identity-service"
+ - - "neutron-api:shared-db"
+ - "percona-cluster:shared-db"
+ - - "neutron-api:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-cloud-controller:neutron-api"
+ - "neutron-api:neutron-api"
+ - - "neutron-api:identity-service"
+ - "keystone:identity-service"
+ - - "neutron-gateway:amqp"
+ - "rabbitmq-server:amqp"
+ - - "neutron-gateway:neutron-plugin-api"
+ - "neutron-api:neutron-plugin-api"
+ - - "nova-cloud-controller:quantum-network-service"
+ - "neutron-gateway:quantum-network-service"
+ - - "ceilometer:amqp"
+ - "rabbitmq-server:amqp"
+ - - "ceilometer-agent:ceilometer-service"
+ - "ceilometer:ceilometer-service"
+ - - "ceilometer:identity-service"
+ - "keystone:identity-service"
+ - - "ceilometer:identity-notifications"
+ - "keystone:identity-notifications"
+ - - "ceilometer-agent:nova-ceilometer"
+ - "nova-compute:nova-ceilometer"
+ - - "ceilometer:shared-db"
+ - "mongodb:database"
+ - - "heat:shared-db"
+ - "percona-cluster:shared-db"
+ - - "heat:identity-service"
+ - "keystone:identity-service"
+ - - "heat:amqp"
+ - "rabbitmq-server:amqp"
+ - - "cinder:image-service"
+ - "glance:image-service"
+ - - "cinder:amqp"
+ - "rabbitmq-server:amqp"
+ - - "cinder:identity-service"
+ - "keystone:identity-service"
+ - - "cinder:cinder-volume-service"
+ - "nova-cloud-controller:cinder-volume-service"
+ - - "cinder-ceph:storage-backend"
+ - "cinder:storage-backend"
+ - - "ceph:client"
+ - "nova-compute:ceph"
+ - - "cinder:shared-db"
+ - "percona-cluster:shared-db"
+ - - "ceph:client"
+ - "cinder-ceph:ceph"
+ - - "ceph:client"
+ - "glance:ceph"
+trusty-liberty-nodes:
+ inherits: openstack-phase1
+ overrides:
+ series: trusty
+
+trusty-liberty:
+ inherits: openstack-phase2
+ overrides:
+ cluster_count: 3
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
+ region: Canonical
+ source: "cloud:trusty-liberty"
+ "openstack-origin": "cloud:trusty-liberty"
+ series: trusty
+ ceph-osd-replication-count: 1
+
+trusty-kilo-nodes:
+ inherits: openstack-phase1
+ overrides:
+ series: trusty
+
+trusty-kilo:
+ inherits: openstack-phase2
+ overrides:
+ cluster_count: 3
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
+ region: Canonical
+ source: "cloud:trusty-kilo"
+ "openstack-origin": "cloud:trusty-kilo"
+ series: trusty
+ ceph-osd-replication-count: 1
+
+
+trusty-juno-nodes:
+ inherits: openstack-phase1
+ overrides:
+ series: trusty
+
+trusty-juno:
+ inherits: openstack-phase2
+ overrides:
+ cluster_count: 3
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
+ region: Canonical
+ source: "cloud:trusty-juno"
+ "openstack-origin": "cloud:trusty-juno"
+ series: trusty
+ ceph-osd-replication-count: 1
+
+
--- /dev/null
+# vim: set ts=2 et:
+openstack-phase1:
+ series: trusty
+ services:
+ nodes-api:
+ charm: "local:trusty/ubuntu-nodes-controller"
+ num_units: 1
+ constraints: tags=control
+ nodes-compute:
+ charm: "local:trusty/ubuntu-nodes-compute"
+ num_units: 1
+ constraints: tags=compute
+ "ntp":
+ charm: "cs:trusty/ntp"
+ relations:
+ - - "ntp:juju-info"
+ - "nodes-api:juju-info"
+ - - "ntp:juju-info"
+ - "nodes-compute:juju-info"
+openstack-phase2:
+ inherits: openstack-phase1
+ services:
+ "mysql":
+ charm: "cs:trusty/mysql"
+ num_units: 1
+ options:
+ "dataset-size": 2G
+ "max-connections": 10000
+ to:
+ - "lxc:nodes-api=0"
+ "ceilometer":
+ branch: "lp:~openstack-charmers/charms/trusty/ceilometer/next"
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ "ceilometer-agent":
+ branch: "lp:~openstack-charmers/charms/trusty/ceilometer-agent/next"
+ "mongodb":
+ charm: "cs:trusty/mongodb"
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ "heat":
+ branch: "lp:~openstack-charmers/charms/trusty/heat/next"
+ to:
+ - "lxc:nodes-api=0"
+ ceph:
+ charm: cs:trusty/ceph
+ num_units: 1
+ options:
+ fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7
+ monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A==
+ osd-devices: /srv
+ monitor-count: 1
+# osd-reformat: 'yes'
+# ceph-cluster-network: 192.168.0.0/24
+# ceph-public-network: 192.168.0.0/24
+ to:
+ - "nodes-compute=0"
+# - "nodes-compute=1"
+# ceph-osd:
+# charm: cs:trusty/ceph-osd
+# num_units: 1
+# options:
+# osd-devices: /dev/sdb
+# osd-reformat: 'yes'
+# to:
+# - "lxc:nodes-api=0"
+# ceph-radosgw:
+# charm: cs:trusty/ceph-radosgw
+# num_units: 1
+# options:
+# use-embedded-webserver: true
+# to:
+
+# - "lxc:nodes-api=0"
+ cinder:
+ charm: cs:trusty/cinder
+ num_units: 1
+ options:
+ block-device: None
+ glance-api-version: 2
+# ha-mcastport: 5401
+# openstack-origin-git: include-file://cinder-master.yaml
+ to:
+ - "lxc:nodes-api=0"
+ cinder-ceph:
+ charm: cs:trusty/cinder-ceph
+ "rabbitmq-server":
+ branch: lp:charms/trusty/rabbitmq-server
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ "keystone":
+ branch: lp:charms/trusty/keystone
+ num_units: 1
+ options:
+ openstack-origin-git: include-file://keystone-master.yaml
+ "admin-password": openstack
+ "admin-token": admin
+ to:
+ - "lxc:nodes-api=0"
+ "openstack-dashboard":
+ branch: lp:charms/trusty/openstack-dashboard
+ num_units: 1
+ options:
+ openstack-origin-git: include-file://horizon-master.yaml
+ secret: admin
+ to:
+ - "lxc:nodes-api=0"
+ "nova-compute":
+ branch: lp:~openstack-charmers/charms/trusty/nova-compute/next
+ num_units: 1
+ options:
+ openstack-origin-git: include-file://nova-master.yaml
+ "enable-live-migration": False
+ "manage-neutron-plugin-legacy-mode": False
+ to:
+ - "nodes-compute=0"
+# - "nodes-compute=1"
+ "nova-cloud-controller":
+ branch: lp:charms/trusty/nova-cloud-controller
+ num_units: 1
+ options:
+ openstack-origin-git: include-file://nova-master.yaml
+ "console-access-protocol": novnc
+ "network-manager": Neutron
+ "quantum-security-groups": "yes"
+ to:
+ - "lxc:nodes-api=0"
+ "neutron-api":
+ branch: lp:~openstack-charmers/charms/trusty/neutron-api/next
+ num_units: 1
+ options:
+ neutron-security-groups: True
+ manage-neutron-plugin-legacy-mode: False
+ to:
+ - "lxc:nodes-api=0"
+ neutron-gateway:
+ branch: lp:charms/trusty/neutron-gateway
+ options:
+ openstack-origin-git: include-file://neutron-master.yaml
+# "ext-port": "eth1"
+ to:
+ - "nodes-api=0"
+ "glance":
+ branch: lp:charms/trusty/glance
+ num_units: 1
+ options:
+ openstack-origin-git: include-file://glance-master.yaml
+ to:
+ - "lxc:nodes-api=0"
+ neutron-openvswitch:
+ branch: lp:~openstack-charmers/charms/trusty/neutron-openvswitch/trunk
+ options:
+ openstack-origin-git: include-file://neutron-master.yaml
+ relations:
+ - - neutron-openvswitch:neutron-plugin-api
+ - neutron-api:neutron-plugin-api
+ - - nova-compute:neutron-plugin
+ - neutron-openvswitch:neutron-plugin
+ - - neutron-openvswitch:amqp
+ - rabbitmq-server:amqp
+ - - "keystone:shared-db"
+ - "mysql:shared-db"
+ - - "nova-cloud-controller:shared-db"
+ - "mysql:shared-db"
+ - - "nova-cloud-controller:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-cloud-controller:image-service"
+ - "glance:image-service"
+ - - "nova-cloud-controller:identity-service"
+ - "keystone:identity-service"
+ - - "nova-cloud-controller:cloud-compute"
+ - "nova-compute:cloud-compute"
+ - - "nova-compute:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-compute:image-service"
+ - "glance:image-service"
+ - - "glance:shared-db"
+ - "mysql:shared-db"
+ - - "glance:identity-service"
+ - "keystone:identity-service"
+ - - "glance:amqp"
+ - "rabbitmq-server:amqp"
+ - - "openstack-dashboard:identity-service"
+ - "keystone:identity-service"
+ - - "neutron-api:shared-db"
+ - "mysql:shared-db"
+ - - "neutron-api:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-cloud-controller:neutron-api"
+ - "neutron-api:neutron-api"
+ - - "neutron-api:identity-service"
+ - "keystone:identity-service"
+ - - "neutron-gateway:amqp"
+ - "rabbitmq-server:amqp"
+ - - "neutron-gateway:neutron-plugin-api"
+ - "neutron-api:neutron-plugin-api"
+ - - "nova-cloud-controller:quantum-network-service"
+ - "neutron-gateway:quantum-network-service"
+ - - "ceilometer:amqp"
+ - "rabbitmq-server:amqp"
+ - - "ceilometer-agent:ceilometer-service"
+ - "ceilometer:ceilometer-service"
+ - - "ceilometer:identity-service"
+ - "keystone:identity-service"
+ - - "ceilometer:identity-notifications"
+ - "keystone:identity-notifications"
+ - - "ceilometer-agent:nova-ceilometer"
+ - "nova-compute:nova-ceilometer"
+ - - "ceilometer:shared-db"
+ - "mongodb:database"
+ - - "heat:shared-db"
+ - "mysql:shared-db"
+ - - "heat:identity-service"
+ - "keystone:identity-service"
+ - - "heat:amqp"
+ - "rabbitmq-server:amqp"
+ - - "cinder:image-service"
+ - "glance:image-service"
+ - - "cinder:amqp"
+ - "rabbitmq-server:amqp"
+ - - "cinder:identity-service"
+ - "keystone:identity-service"
+ - - "cinder:cinder-volume-service"
+ - "nova-cloud-controller:cinder-volume-service"
+ - - "cinder-ceph:storage-backend"
+ - "cinder:storage-backend"
+ - - "ceph:client"
+ - "nova-compute:ceph"
+ - - "cinder:shared-db"
+ - "mysql:shared-db"
+ - - "ceph:client"
+ - "cinder-ceph:ceph"
+ - - "ceph:client"
+ - "glance:ceph"
+trusty-liberty-nodes:
+ inherits: openstack-phase1
+ overrides:
+ series: trusty
+
+trusty-liberty:
+ inherits: openstack-phase2
+ overrides:
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
+ region: Canonical
+ source: "cloud:trusty-liberty"
+ "openstack-origin": "cloud:trusty-liberty"
+ series: trusty
+ ceph-osd-replication-count: 1
+
+trusty-kilo-nodes:
+ inherits: openstack-phase1
+ overrides:
+ series: trusty
+
+# master
+trusty-kilo:
+ inherits: openstack-phase2
+ series: trusty
+ overrides:
+ region: Canonical
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
+ openstack-origin: cloud:trusty-kilo
+ source: cloud:trusty-kilo
+ series: trusty
+ ceph-osd-replication-count: 1
+
+
--- /dev/null
+# vim: set ts=2 et:
+openstack-phase1:
+ series: trusty
+ services:
+ nodes-api:
+ charm: "local:trusty/ubuntu-nodes-controller"
+ num_units: 1
+ constraints: tags=control
+ nodes-compute:
+ charm: "local:trusty/ubuntu-nodes-compute"
+ num_units: 1
+ constraints: tags=compute
+ "ntp":
+ charm: "cs:trusty/ntp"
+ relations:
+ - - "ntp:juju-info"
+ - "nodes-api:juju-info"
+ - - "ntp:juju-info"
+ - "nodes-compute:juju-info"
+openstack-phase2:
+ inherits: openstack-phase1
+ services:
+ "mysql":
+ charm: "cs:trusty/mysql"
+ num_units: 1
+ options:
+ "dataset-size": 2G
+ "max-connections": 10000
+ to:
+ - "lxc:nodes-api=0"
+ "ceilometer":
+ branch: "lp:~openstack-charmers/charms/trusty/ceilometer/next"
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ "ceilometer-agent":
+ branch: "lp:~openstack-charmers/charms/trusty/ceilometer-agent/next"
+ "mongodb":
+ charm: "cs:trusty/mongodb"
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ heat:
+ branch: "lp:~openstack-charmers/charms/trusty/heat/next"
+ to:
+ - "lxc:nodes-api=0"
+ ceph:
+ charm: cs:trusty/ceph
+ num_units: 1
+ options:
+ fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7
+ monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A==
+ osd-devices: /srv
+ monitor-count: 1
+# osd-reformat: 'yes'
+# ceph-cluster-network: 192.168.0.0/24
+# ceph-public-network: 192.168.0.0/24
+ to:
+ - "nodes-compute=0"
+# - "nodes-compute=1"
+# ceph-osd:
+# charm: cs:trusty/ceph-osd
+# num_units: 1
+# options:
+# osd-devices: /srv
+# osd-reformat: 'yes'
+# to:
+# - "nodes-compute=0"
+# ceph-radosgw:
+# charm: cs:trusty/ceph-radosgw
+# num_units: 1
+# options:
+# use-embedded-webserver: true
+# to:
+# - "lxc:nodes-api=0"
+ cinder:
+ charm: cs:trusty/cinder
+ num_units: 1
+ options:
+ block-device: None
+ glance-api-version: 2
+# ha-mcastport: 5401
+ to:
+ - "lxc:nodes-api=0"
+ cinder-ceph:
+ charm: cs:trusty/cinder-ceph
+ "rabbitmq-server":
+ branch: lp:charms/trusty/rabbitmq-server
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ "keystone":
+ branch: lp:charms/trusty/keystone
+ num_units: 1
+ options:
+# ha-mcastport: 5402
+ "admin-password": openstack
+ "admin-token": admin
+ to:
+ - "lxc:nodes-api=0"
+ "openstack-dashboard":
+ branch: lp:charms/trusty/openstack-dashboard
+ num_units: 1
+ options:
+ secret: admin
+ webroot: /
+ to:
+ - "lxc:nodes-api=0"
+ "nova-compute":
+ branch: lp:~openstack-charmers/charms/trusty/nova-compute/next
+ num_units: 1
+ options:
+ "enable-live-migration": False
+ "manage-neutron-plugin-legacy-mode": False
+ to:
+ - "nodes-compute=0"
+# - "nodes-compute=1"
+# - "nodes-api=0"
+# - "nodes-api=1"
+# - "nodes-api=2"
+ "nova-cloud-controller":
+ branch: lp:charms/trusty/nova-cloud-controller
+ num_units: 1
+ options:
+ "console-access-protocol": novnc
+ "network-manager": Neutron
+ "quantum-security-groups": "yes"
+ to:
+ - "lxc:nodes-api=0"
+ "neutron-api":
+ branch: lp:~openstack-charmers/charms/trusty/neutron-api/next
+ num_units: 1
+ options:
+ neutron-security-groups: True
+ manage-neutron-plugin-legacy-mode: False
+ to:
+ - "lxc:nodes-api=0"
+ neutron-gateway:
+ branch: lp:charms/trusty/neutron-gateway
+ options:
+# "ext-port": "eth1"
+ to:
+ - "nodes-api=0"
+ "glance":
+ branch: lp:charms/trusty/glance
+ num_units: 1
+ to:
+ - "lxc:nodes-api=0"
+ neutron-openvswitch:
+ branch: lp:~openstack-charmers/charms/trusty/neutron-openvswitch/next
+ relations:
+ - - neutron-openvswitch:neutron-plugin-api
+ - neutron-api:neutron-plugin-api
+ - - nova-compute:neutron-plugin
+ - neutron-openvswitch:neutron-plugin
+ - - neutron-openvswitch:amqp
+ - rabbitmq-server:amqp
+ - - "keystone:shared-db"
+ - "mysql:shared-db"
+ - - "nova-cloud-controller:shared-db"
+ - "mysql:shared-db"
+ - - "nova-cloud-controller:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-cloud-controller:image-service"
+ - "glance:image-service"
+ - - "nova-cloud-controller:identity-service"
+ - "keystone:identity-service"
+ - - "nova-cloud-controller:cloud-compute"
+ - "nova-compute:cloud-compute"
+ - - "nova-compute:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-compute:image-service"
+ - "glance:image-service"
+ - - "glance:shared-db"
+ - "mysql:shared-db"
+ - - "glance:identity-service"
+ - "keystone:identity-service"
+ - - "glance:amqp"
+ - "rabbitmq-server:amqp"
+ - - "openstack-dashboard:identity-service"
+ - "keystone:identity-service"
+ - - "neutron-api:shared-db"
+ - "mysql:shared-db"
+ - - "neutron-api:amqp"
+ - "rabbitmq-server:amqp"
+ - - "nova-cloud-controller:neutron-api"
+ - "neutron-api:neutron-api"
+ - - "neutron-api:identity-service"
+ - "keystone:identity-service"
+ - - "neutron-gateway:amqp"
+ - "rabbitmq-server:amqp"
+ - - "neutron-gateway:neutron-plugin-api"
+ - "neutron-api:neutron-plugin-api"
+ - - "nova-cloud-controller:quantum-network-service"
+ - "neutron-gateway:quantum-network-service"
+ - - "ceilometer:amqp"
+ - "rabbitmq-server:amqp"
+ - - "ceilometer-agent:ceilometer-service"
+ - "ceilometer:ceilometer-service"
+ - - "ceilometer:identity-service"
+ - "keystone:identity-service"
+ - - "ceilometer:identity-notifications"
+ - "keystone:identity-notifications"
+ - - "ceilometer-agent:nova-ceilometer"
+ - "nova-compute:nova-ceilometer"
+ - - "ceilometer:shared-db"
+ - "mongodb:database"
+ - - "heat:shared-db"
+ - "mysql:shared-db"
+ - - "heat:identity-service"
+ - "keystone:identity-service"
+ - - "heat:amqp"
+ - "rabbitmq-server:amqp"
+ - - "cinder:image-service"
+ - "glance:image-service"
+ - - "cinder:amqp"
+ - "rabbitmq-server:amqp"
+ - - "cinder:identity-service"
+ - "keystone:identity-service"
+ - - "cinder:cinder-volume-service"
+ - "nova-cloud-controller:cinder-volume-service"
+ - - "cinder-ceph:storage-backend"
+ - "cinder:storage-backend"
+ - - "ceph:client"
+ - "nova-compute:ceph"
+ - - "cinder:shared-db"
+ - "mysql:shared-db"
+ - - "ceph:client"
+ - "cinder-ceph:ceph"
+ - - "ceph:client"
+ - "glance:ceph"
+# - - ceph-osd:mon
+# - ceph:osd
+# - - ceph-radosgw:mon
+# - ceph:radosgw
+# - - ceph-radosgw:identity-service
+# - keystone:identity-service
+trusty-liberty-nodes:
+ inherits: openstack-phase1
+ overrides:
+ series: trusty
+
+trusty-liberty:
+ inherits: openstack-phase2
+ overrides:
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
+ region: Canonical
+ source: "cloud:trusty-liberty"
+ "openstack-origin": "cloud:trusty-liberty"
+ series: trusty
+ ceph-osd-replication-count: 1
+
+trusty-kilo-nodes:
+ inherits: openstack-phase1
+ overrides:
+ series: trusty
+
+trusty-kilo:
+ inherits: openstack-phase2
+ overrides:
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
+ region: Canonical
+ source: "cloud:trusty-kilo"
+ "openstack-origin": "cloud:trusty-kilo"
+ series: trusty
+ ceph-osd-replication-count: 1
+
+
+trusty-juno-nodes:
+ inherits: openstack-phase1
+ overrides:
+ series: trusty
+
+trusty-juno:
+ inherits: openstack-phase2
+ overrides:
+ #os-data-network: 10.4.8.0/21
+ #prefer-ipv6: true
+ region: Canonical
+ source: "cloud:trusty-juno"
+ "openstack-origin": "cloud:trusty-juno"
+ series: trusty
+ ceph-osd-replication-count: 1
+
+
--- /dev/null
+#!/bin/sh -e
+
+. ~/admin-openrc
+
+# adjust tiny image
+nova flavor-delete m1.tiny
+nova flavor-create m1.tiny 1 512 8 1
+
+# configure security groups
+neutron security-group-rule-create --direction ingress --ethertype IPv4 --protocol icmp --remote-ip-prefix 0.0.0.0/0 default
+neutron security-group-rule-create --direction ingress --ethertype IPv4 --protocol tcp --port-range-min 22 --port-range-max 22 --remote-ip-prefix 0.0.0.0/0 default
+
+# import key pair
+keystone tenant-create --name demo --description "Demo Tenant"
+keystone user-create --name demo --tenant demo --pass demo --email demo@demo.demo
+
+nova keypair-add --pub-key id_rsa.pub ubuntu-keypair
+
+# configure external network
+neutron net-create ext-net --router:external --provider:physical_network external --provider:network_type flat
+neutron subnet-create ext-net --name ext-subnet --allocation-pool start=10.5.8.5,end=10.5.8.254 --disable-dhcp --gateway 10.5.8.1 10.5.8.0/24
+
+# create vm network
+neutron net-create demo-net
+neutron subnet-create --name demo-subnet --gateway 10.20.5.1 demo-net 10.20.5.0/24
+
+neutron router-create demo-router
+
+neutron router-interface-add demo-router demo-subnet
+
+neutron router-gateway-set demo-router ext-net
+
+# create pool of floating ips
+i=0
+while [ $i -ne 10 ]; do
+ neutron floatingip-create ext-net
+ i=$((i + 1))
+done
+
--- /dev/null
+#!/bin/sh -e
+
+. ~/admin-openrc
+
+wget -P /tmp/images http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+wget -P /tmp/images http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+glance image-create --name "cirros-0.3.3-x86_64" --file /tmp/images/cirros-0.3.3-x86_64-disk.img --disk-format qcow2 --container-format bare --progress
+glance image-create --name "ubuntu-trusty-daily" --file /tmp/images/trusty-server-cloudimg-amd64-disk1.img --disk-format qcow2 --container-format bare --progress
+rm -rf /tmp/images
--- /dev/null
+#!/bin/sh -ex
+
+configOpenrc()
+{
+ cat <<-EOF
+ export OS_USERNAME=$1
+ export OS_PASSWORD=$2
+ export OS_TENANT_NAME=$3
+ export OS_AUTH_URL=$4
+ export OS_REGION_NAME=$5
+ EOF
+}
+
+unitAddress()
+{
+ juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
+}
+
+unitMachine()
+{
+ juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"machine\"]" 2> /dev/null
+}
+
+<<<<<<< HEAD
+juju run --service ceph 'sudo ceph osd pool set cinder-ceph size 1'
+juju run --service ceph 'sudo ceph osd pool set cinder-ceph min_size 1'
+=======
+#juju run --service ceph 'sudo ceph osd pool set cinder-ceph size 1'
+#juju run --service ceph 'sudo ceph osd pool set cinder-ceph min_size 1'
+>>>>>>> 69227d1... modified the bundle to include the ceph and cinder changes.
+
+mkdir -m 0700 -p cloud
+controller_address=$(unitAddress keystone 0)
+configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical > cloud/admin-openrc
+chmod 0600 cloud/admin-openrc
+
+machine=$(unitMachine glance 0)
+juju scp glance.sh cloud/admin-openrc $machine:
+juju run --machine $machine ./glance.sh
+
+machine=$(unitMachine nova-cloud-controller 0)
+juju scp cloud-setup.sh cloud/admin-openrc ~/.ssh/id_rsa.pub $machine:
+juju run --machine $machine ./cloud-setup.sh
+
;;
'tip' )
cp odl/juju-deployer/ovs-odl-tip.yaml ./bundles.yaml
- cp odl/juju-deployer/source/* ./
+ cp common/source/* ./
sed -i -- "s|branch: master|branch: stable/$2|g" ./*.yaml
;;
* )
cp maas/att/virpod1/lxc-add-more-interfaces trusty/ubuntu-nodes-controller/lxc/add-more-interfaces
cp maas/att/virpod1/lxc-add-more-interfaces trusty/ubuntu-nodes-compute/lxc/add-more-interfaces
# As per your lab vip address list be deafult uses 10.4.1.11 - 10.4.1.20
- sed -i -- 's/10.4.1.1/192.168.1.1/g' ./bundles.yaml
+ sed -i -- 's/10.4.1.1/192.168.10.1/g' ./bundles.yaml
# Choose the external port to go out from gateway to use.
- sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "juju-br0"/g' ./bundles.yaml
+ sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "eth1"/g' ./bundles.yaml
;;
esac
;;
esac
-echo "... Deployment finished ...."
;;
'tip' )
cp opencontrail/juju-deployer/contrail-tip.yaml ./bundles.yaml
- cp opencontrail/juju-deployer/source/* ./
+ cp common/source/* ./
sed -i -- "s|branch: master|branch: stable/$2|g" ./*.yaml
;;
* )
cp maas/att/virpod1/lxc-add-more-interfaces trusty/ubuntu-nodes-controller/lxc/add-more-interfaces
cp maas/att/virpod1/lxc-add-more-interfaces trusty/ubuntu-nodes-compute/lxc/add-more-interfaces
# As per your lab vip address list be deafult uses 10.4.1.11 - 10.4.1.20
- sed -i -- 's/10.4.1.1/192.168.1.1/g' ./bundles.yaml
+ sed -i -- 's/10.4.1.1/192.168.10.1/g' ./bundles.yaml
# Choose the external port to go out from gateway to use.
- sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "juju-br0"/g' ./bundles.yaml
+ sed -i -- 's/# "ext-port": "eth1"/ "ext-port": "eth1"/g' ./bundles.yaml
;;
esac
;;
esac
-echo "... Deployment finished ...."
+++ /dev/null
-repositories:
- - {name: requirements,
- repository: 'git://github.com/openstack/requirements',
- branch: master}
- - {name: cinder,
- repository: 'git://github.com/openstack/cinder',
- branch: master}
-directory: /mnt/openstack-git
-#http_proxy: http://squid.internal:3128
-#https_proxy: http://squid.internal:3128
+++ /dev/null
-repositories:
- - {name: requirements,
- repository: 'git://github.com/openstack/requirements',
- branch: master}
- - {name: glance,
- repository: 'git://github.com/openstack/glance',
- branch: master}
-directory: /mnt/openstack-git
-#http_proxy: http://squid.internal:3128
-#https_proxy: http://squid.internal:3128
+++ /dev/null
-repositories:
- - {name: requirements,
- repository: 'git://github.com/openstack/requirements',
- branch: master}
- - {name: horizon,
- repository: 'git://github.com/openstack/horizon',
- branch: master}
-directory: /mnt/openstack-git
-#http_proxy: http://squid.internal:3128
-#https_proxy: http://squid.internal:3128
+++ /dev/null
-repositories:
- - {name: requirements,
- repository: 'git://github.com/openstack/requirements',
- branch: master}
- - {name: keystone,
- repository: 'git://github.com/openstack/keystone',
- branch: master}
-directory: /mnt/openstack-git
-#http_proxy: http://squid.internal:3128
-#https_proxy: http://squid.internal:3128
+++ /dev/null
-repositories:
- - {name: requirements,
- repository: 'git://github.com/openstack/requirements',
- branch: master}
- - {name: neutron-fwaas,
- repository: 'git://github.com/openstack/neutron-fwaas',
- branch: master}
- - {name: neutron-lbaas,
- repository: 'git://github.com/openstack/neutron-lbaas',
- branch: master}
- - {name: neutron-vpnaas,
- repository: 'git://github.com/openstack/neutron-vpnaas',
- branch: master}
- - {name: neutron,
- repository: 'git://github.com/openstack/neutron',
- branch: master}
-directory: /mnt/openstack-git
-#http_proxy: http://squid.internal:3128
-#https_proxy: http://squid.internal:3128
+++ /dev/null
-repositories:
- - {name: requirements,
- repository: 'git://github.com/openstack/requirements',
- branch: master}
- - {name: neutron,
- repository: 'git://github.com/openstack/neutron',
- branch: master}
- - {name: nova,
- repository: 'git://github.com/openstack/nova',
- branch: master}
-directory: /mnt/openstack-git
-#http_proxy: http://squid.internal:3128
-#https_proxy: http://squid.internal:3128