# In the case of a virtual deployment get deployment.yaml and deployconfig.yaml
if [ "$virtinstall" -eq 1 ]; then
labname="default"
- ./cleanvm.sh
+ ./cleanvm.sh || true
cp ../labconfig/default/deployment.yaml ./
cp ../labconfig/default/labconfig.yaml ./
cp ../labconfig/default/deployconfig.yaml ./
sudo pip uninstall google-api-python-client
fi
+#create backup directory
+mkdir ~/joid_config/ || true
+mkdir ~/.juju/ || true
+
# Init Juju
-juju init -f
+juju init -f || true
#
# MAAS deploy
# Backup deployment.yaml and deployconfig.yaml in .juju folder
cp ./environments.yaml ~/.juju/
+cp ./environments.yaml ~/joid_config/
if [ -e ./deployconfig.yaml ]; then
cp ./deployconfig.yaml ~/.juju/
cp ./labconfig.yaml ~/.juju/
+ cp ./deployconfig.yaml ~/joid_config/
+ cp ./labconfig.yaml ~/joid_config/
fi
if [ -e ./deployment.yaml ]; then
cp ./deployment.yaml ~/.juju/
+ cp ./deployment.yaml ~/joid_config/
fi
#
done
}
+#function for JUJU envronment
+
+addcredential() {
+ controllername=`awk 'NR==1{print $2}' environments.yaml`
+ cloudname=`awk 'NR==1{print $2}' environments.yaml`
+
+ echo "credentials:" > credential.yaml
+ echo " $controllername:" >> credential.yaml
+ echo " opnfv-credentials:" >> credential.yaml
+ echo " auth-type: oauth1" >> credential.yaml
+ echo " maas-oauth: $apikey" >> credential.yaml
+
+ juju add-credential $controllername -f credential.yaml --replace
+}
+
+addcloud() {
+ controllername=`awk 'NR==1{print $2}' environments.yaml`
+ cloudname=`awk 'NR==1{print $2}' environments.yaml`
+
+ echo "clouds:" > maas-cloud.yaml
+ echo " $cloudname:" >> maas-cloud.yaml
+ echo " type: maas" >> maas-cloud.yaml
+ echo " auth-types: [oauth1]" >> maas-cloud.yaml
+ echo " endpoint: http://$maas_ip/MAAS" >> maas-cloud.yaml
+
+ juju add-cloud $cloudname maas-cloud.yaml --replace
+}
+
+
#
# VLAN customization
#
#
#read interface needed in Auto mode and enable it. Will be rmeoved once auto enablement will be implemented in the maas-deployer.
-if [ -e ~/.juju/deployconfig.yaml ]; then
+if [ -e ~/joid_config/deployconfig.yaml ]; then
+ cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
+elif [ -e ~/.juju/deployconfig.yaml ]; then
cp ~/.juju/deployconfig.yaml ./deployconfig.yaml
+fi
enableiflist=`grep "interface-enable" deployconfig.yaml | cut -d ' ' -f 4 `
datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
fi
fi
+
+# Add the cloud and controller credentials for MAAS for that lab.
+jujuver=`juju --version`
+
+if [ "$jujuver" -ge "2" ]; then
+ addcloud
+ addcredential
+fi
+
#
# End of scripts
#
#export JUJU_DEV_FEATURE_FLAGS=address-allocation
-juju bootstrap --debug --to bootstrap.maas
-sleep 5
-#disable juju gui until xenial charms are in charm store.
-juju deploy cs:juju-gui-130 --to 0
+jujuver=`juju --version`
-JUJU_REPOSITORY=
-juju set-constraints tags=
+if [ "$jujuver" -lt "2" ]; then
+ juju bootstrap --debug --to bootstrap.maas
+ sleep 5
+ #disable juju gui until xenial charms are in charm store.
+ juju deploy cs:juju-gui-130 --to 0
+ JUJU_REPOSITORY=
+ juju set-constraints tags=
+
+else
+ controllername=`awk 'NR==1{print $2}' environments.yaml`
+ cloudname=`awk 'NR==1{print $2}' environments.yaml`
+ juju bootstrap $controllername $cloudname --debug --to bootstrap.maas
+fi
#copy and download charms
cp $opnfvsdn/fetch-charms.sh ./fetch-charms.sh
+jujuver=`juju --version`
+
#modify the ubuntu series wants to deploy
sed -i -- "s|distro=trusty|distro=$opnfvdistro|g" ./fetch-charms.sh
while [ $retval -eq 0 ]; do
sleep 30
juju status > status.txt
- if [ "$(grep -c "executing" status.txt )" -ge 2 ]; then
- echo " still executing the reltionship within charms ..."
- if [ $timeoutiter -ge 60 ]; then
+ if [ "$(grep -c "waiting" status.txt )" -ge 4 ]; then
+ echo " still waiting for machines ..."
+ if [ $timeoutiter -ge 240 ]; then
retval=1
fi
timeoutiter=$((timeoutiter+1))
var=$var"_"publicapi
fi
-#lets generate the bundle for all target using genBundle.py
-python genBundle.py -l deployconfig.yaml -s $var > bundles.yaml
-
-#keep the back in cloud for later debugging.
-pastebinit bundles.yaml || true
-
-echo "... Deployment Started ...."
-juju-deployer -vW -d -t 7200 -r 5 -c bundles.yaml $opnfvdistro-"$openstack"
+if [ "$jujuver" -lt "2" ]; then
+ #lets generate the bundle for all target using genBundle.py
+ python genBundle.py -j 1 -l deployconfig.yaml -s $var > bundles.yaml
+ #keep the back in cloud for later debugging.
+ pastebinit bundles.yaml || true
+ echo "... Deployment Started ...."
+ juju-deployer -vW -d -t 7200 -r 5 -c bundles.yaml $opnfvdistro-"$openstack"
+else
+ #lets generate the bundle for all target using genBundle.py
+ python genBundle.py -j 2 -l deployconfig.yaml -s $var > bundles.yaml
+ #keep the back in cloud for later debugging.
+ pastebinit bundles.yaml || true
+ # with JUJU 2.0 bundles has to be deployed only once.
+ juju deploy bundles.yaml --debug
+ sleep 120
+ check_status
+fi
#lets gather the status of deployment once juju-deployer completed.
juju status --format=tabular
# seeing issue related to number of open files.
-# juju run --service nodes 'echo 2048 | sudo tee /proc/sys/fs/inotify/max_user_instances'
-
count=`juju status nodes --format=short | grep nodes | wc -l`
-
c=0
while [ $c -lt $count ]; do
juju ssh nodes/$c 'echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p' || true
set -ex
-if [ -d $HOME/.juju/environments ]; then
+if [ ! -d environments.yaml ]; then
+ cp ~/joid_config/environments.yaml ./environments.yaml
+fi
+
+jujuver=`juju --version`
+
+if [ "$jujuver" -ge "2" ]; then
+ controllername=`awk 'NR==1{print $2}' environments.yaml`
+ cloudname=`awk 'NR==1{print $2}' environments.yaml`
+ juju kill-controller $controllername --timeout 10s -y || true
+ rm -rf precise
+ rm -rf trusty
+ rm -rf xenial
+elif [ -d $HOME/.juju/environments ]; then
echo " " > status.txt
juju status &>>status.txt || true
if [ "$(grep -c "environment is not bootstrapped" status.txt )" -ge 1 ]; then
else
echo " environment is bootstrapped ..."
jujuenv=`juju status | grep environment | cut -d ":" -f 2`
- juju destroy-environment $jujuenv -y
+ juju destroy-environment $jujuenv -y || true
fi
rm -rf precise
rm -rf trusty
rm -rf $HOME/.juju/environments
rm -rf $HOME/.juju/ssh
fi
+
# also along with envuronment destroy.
echo " Cleanup Started ..."
-./clean.sh
+./clean.sh || true
sudo virsh destroy node1-control || true
sudo virsh destroy node3-control || true
--- /dev/null
+ aodh:
+ charm: "./{{ ubuntu.release }}/aodh"
+ num_units: 1
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ to:
+ - "lxd:nodes/0"
+{# Empty block to avoid bad block trim #}
--- /dev/null
+# vim: set ts=2 et:
+ series: {{ ubuntu.release }}
+ services:
+{% if os.hyperconverged %}
+ nodes:
+ charm: "cs:{{ ubuntu.release }}/ubuntu"
+ num_units: {{ opnfv.units }}
+{% else %}
+ nodes:
+ charm: "cs:{{ ubuntu.release }}/ubuntu"
+{% if os.ha.mode == 'ha' %}
+ num_units: 3
+{% else %}
+ num_units: 1
+{% endif %}
+ constraints: tags=control
+ nodes-compute:
+ charm: "cs:{{ ubuntu.release }}/ubuntu"
+{% if os.ha.mode == 'ha' %}
+ num_units: {{ opnfv.units - 3 }}
+{% else %}
+ num_units: {{ opnfv.units - 1 }}
+{% endif %}
+{% endif %}
+ ntp:
+ charm: "./{{ ubuntu.release }}/ntp"
+{% if os.network.controller == 'ocl' %}
+ options:
+ source: "0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org"
+ auto_peers: false
+{% endif %}
+{% include 'mysql.yaml' %}
+{% include 'ceilometer.yaml' %}
+{% if opnfv.storage_dict.scaleio is defined %}
+{% include 'scaleio.yaml' %}
+{% else %}
+{% include 'ceph.yaml' %}
+{% endif %}
+{% include 'cinder.yaml' %}
+{% include 'glance.yaml' %}
+{% if opnfv.storage_dict.ceph is defined %}
+{% include 'opnfv-promise.yaml' %}
+{% include 'congress.yaml' %}
+{% endif %}
+{% include 'keystone.yaml' %}
+{% include 'nova-cloud-controller.yaml' %}
+{% include 'nova-compute.yaml' %}
+{% include 'openstack-dashboard.yaml' %}
+{% include 'rabbitmq.yaml' %}
+{% include 'heat.yaml' %}
+{% include 'neutron-api.yaml' %}
+{% include 'neutron-gateway.yaml' %}
+{% include 'aodh.yaml' %}
+{% if os.network.controller == 'odl' %}
+{% include 'odl.yaml' %}
+{% elif os.network.controller == 'onos' %}
+{% include 'onos.yaml' %}
+{% elif os.network.controller == 'ocl' %}
+{% include 'oclphase1.yaml' %}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+{% include 'haclusters.yaml' %}
+{% endif %}
+{% include 'subordinate.yaml' %}
+
+{% if os.hyperconverged %}
+ relations:
+ - [ 'ntp:juju-info', 'nodes:juju-info' ]
+{% else %}
+ relations:
+ - [ 'ntp:juju-info', 'nodes:juju-info' ]
+ - [ 'ntp:juju-info', 'nodes-compute:juju-info' ]
+{% endif %}
+
+{% if os.ha.mode == 'ha' %}
+{% include 'harelations.yaml' %}
+{% endif %}
+{% include 'relations.yaml' %}
--- /dev/null
+ mongodb:
+ charm: ./{{ ubuntu.release }}/mongodb
+ num_units: 1
+ to:
+ - "lxd:nodes/0"
+ ceilometer:
+ charm: "./{{ ubuntu.release }}/ceilometer"
+ num_units: {{ unit_qty() }}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ region: {{ os.region }}
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.ceilometer }}
+{% endif %}
+{% if os.beta.public_api %}
+ os-public-hostname: api.{{ opnfv.domain }}
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
--- /dev/null
+{% if opnfv.storage_dict.ceph is defined %}
+ ceph:
+ charm: "./{{ ubuntu.release }}/ceph"
+ num_units: {{ unit_ceph_qty() }}
+ options:
+ monitor-count: {{ unit_ceph_qty() }}
+ fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7
+ monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A==
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if opnfv.spaces_dict.storage is defined %}
+ #ceph-cluster-network: {{ opnfv.spaces_dict.storage.cidr }}
+{% endif %}
+ to:
+{% if os.hyperconverged %}
+{% for unit_id in to_select(unit_ceph_qty()) %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+{% else %}
+{% if os.ha.mode == 'ha' %}
+{% for unit_id in range(0, 3) %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+{% else %}
+ - "lxd:nodes/0"
+ - "lxd:nodes/0"
+ - "lxd:nodes/0"
+{% endif %}
+{% endif %}
+ ceph-osd:
+ charm: "./{{ ubuntu.release }}/ceph-osd"
+{% if os.hyperconverged %}
+ num_units: {{ opnfv.units }}
+{% else %}
+ num_units: 3
+{% endif %}
+ options:
+ osd-devices: {{ opnfv.storage_dict.ceph.disk }}
+ osd-reformat: 'yes'
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ to:
+{% if os.hyperconverged %}
+{% for unit_id in range(0, opnfv.units) %}
+ - "nodes/{{ unit_id }}"
+{% endfor %}
+{% else %}
+{% if os.ha.mode == 'ha' %}
+{% for unit_id in range(0, 3) %}
+ - "nodes/{{ unit_id }}"
+{% endfor %}
+{% else %}
+ - "nodes/0"
+ - "nodes-compute/0"
+ - "nodes-compute/1"
+{% endif %}
+{% endif %}
+ ceph-radosgw:
+ charm: "./{{ ubuntu.release }}/ceph-radosgw"
+ num_units: {{ unit_qty() if os.beta.hacluster_ceph_radosgw else 1 }}
+ options:
+ region: {{ os.region }}
+ use-embedded-webserver: true
+ operator-roles: "Member,admin"
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if opnfv.storage_dict.ceph is defined %}
+ ceph-osd-replication-count: {{ unit_ceph_qty() }}
+{% endif %}
+ to:
+{% for unit_id in to_select(unit_qty() if os.beta.hacluster_ceph_radosgw else 1) %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+{% endif %}
--- /dev/null
+ cinder:
+ charm: "./{{ ubuntu.release }}/cinder"
+{% if opnfv.storage_dict.scaleio is defined %}
+ num_units: 1
+{% else %}
+ num_units: {{ unit_qty() }}
+{% endif %}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ region: {{ os.region }}
+ block-device: None
+ glance-api-version: 2
+{% if os.beta.public_api %}
+ use-internal-endpoints: true
+{% endif %}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.api.worker_multiplier %}
+ worker-multiplier: {{ os.api.worker_multiplier }}
+{% endif %}
+{% if opnfv.storage_dict.ceph is defined %}
+ ceph-osd-replication-count: {{ unit_ceph_qty() }}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.cinder }}
+{% endif %}
+{% if os.beta.public_api %}
+ os-public-hostname: api.{{ opnfv.domain }}
+{% endif %}
+ to:
+{% if opnfv.storage_dict.scaleio is defined %}
+ - "nodes/0"
+{% else %}
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+{% endif %}
--- /dev/null
+ congress:
+ charm: "./{{ ubuntu.release }}/congress"
+ num_units: 1
+ options:
+ region: {{ os.region }}
+{% if ubuntu.release == 'trusty' %}
+ source-branch: "stable/{{ os.release }}"
+{% endif %}
+ to:
+ - "lxd:nodes/0"
+{# Empty block to avoid bad block trim #}
--- /dev/null
+ glance:
+ charm: "./{{ ubuntu.release }}/glance"
+ num_units: {{ unit_qty() }}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ region: {{ os.region }}
+{% if os.beta.public_api %}
+ use-internal-endpoints: true
+{% endif %}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if opnfv.storage_dict.ceph is defined %}
+ ceph-osd-replication-count: {{ unit_ceph_qty() }}
+{% endif %}
+{% if os.api.worker_multiplier %}
+ worker-multiplier: {{ os.api.worker_multiplier }}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.glance }}
+{% endif %}
+{% if os.beta.public_api %}
+ os-public-hostname: api.{{ opnfv.domain }}
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
--- /dev/null
+
+ hacluster-keystone:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ hacluster-cinder:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+# hacluster-heat:
+# charm: "./{{ ubuntu.release }}/hacluster"
+# options:
+# corosync_transport: unicast
+# cluster_count: 3
+{% if os.network.ipv6 %}
+# prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ hacluster-horizon:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ hacluster-nova:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ hacluster-neutron:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ hacluster-glance:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ hacluster-ceilometer:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ hacluster-mysql:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.beta.hacluster_ceph_radosgw %}
+ hacluster-ceph-radosgw:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% endif %}
--- /dev/null
+{% if os.ha.mode == 'ha' %}
+ - [ 'mysql:ha', 'hacluster-mysql:ha' ]
+ - [ 'cinder:ha', 'hacluster-cinder:ha' ]
+# - [ 'heat:ha', 'hacluster-heat:ha' ]
+ - [ 'glance:ha', 'hacluster-glance:ha' ]
+ - [ 'keystone:ha', 'hacluster-keystone:ha' ]
+ - [ 'neutron-api:ha', 'hacluster-neutron:ha' ]
+ - [ 'nova-cloud-controller:ha', 'hacluster-nova:ha' ]
+ - [ 'openstack-dashboard:ha', 'hacluster-horizon:ha' ]
+ - [ 'ceilometer:ha', 'hacluster-ceilometer:ha' ]
+{% if os.beta.hacluster_ceph_radosgw %}
+ - [ 'ceph-radosgw:ha', 'hacluster-ceph-radosgw:ha' ]
+{% endif %}
+{% endif %}
--- /dev/null
+ heat:
+ charm: "./{{ ubuntu.release }}/heat"
+ num_units: 1
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ region: {{ os.region }}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.api.worker_multiplier %}
+ worker-multiplier: {{ os.api.worker_multiplier }}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+# vip: {{ opnfv.vip.heat }}
+{% endif %}
+{% if os.beta.public_api %}
+# os-public-hostname: api.{{ opnfv.domain }}
+{% endif %}
+ to:
+ - "lxd:nodes/0"
+{# Empty block to avoid bad block trim #}
--- /dev/null
+ keystone:
+ charm: "./{{ ubuntu.release }}/keystone"
+ num_units: {{ unit_qty() }}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ region: {{ os.region }}
+ admin-role: {{ os.admin.role }}
+ keystone-admin-role: {{ os.admin.role }}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.api.worker_multiplier %}
+ worker-multiplier: {{ os.api.worker_multiplier }}
+{% endif %}
+ admin-password: {{ opnfv.admin_password | default(os.admin.password) }}
+ admin-token: {{ os.admin.name }}
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.keystone }}
+{% endif %}
+{% if os.beta.public_api %}
+ os-public-hostname: api.{{ opnfv.domain }}
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
--- /dev/null
+ mysql:
+ charm: "./{{ ubuntu.release }}/percona-cluster"
+ num_units: {{ unit_qty() }}
+ options:
+{% if os.ha.mode == 'ha' %}
+ innodb-buffer-pool-size: 10G
+{% else %}
+ innodb-buffer-pool-size: 1G
+{% endif %}
+ max-connections: 20000
+ root-password: {{ get_password('mysql_root') }}
+ sst-password: {{ get_password('mysql_sst') }}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.mysql }}
+{% endif %}
+ min-cluster-size: {{ unit_qty() }}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
--- /dev/null
+ neutron-api:
+ charm: "./{{ ubuntu.release }}/neutron-api"
+ num_units: {{ unit_qty() }}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ region: {{ os.region }}
+ neutron-security-groups: true
+{% if os.beta.public_api %}
+ use-internal-endpoints: true
+{% endif %}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.api.worker_multiplier %}
+ worker-multiplier: {{ os.api.worker_multiplier }}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.neutron }}
+{% endif %}
+{% if os.network.controller == 'nosdn' %}
+ flat-network-providers: "*"
+{% if os.network.dvr %}
+ overlay-network-type: vxlan
+{% endif %}
+{% elif os.network.controller == 'odl' %}
+ manage-neutron-plugin-legacy-mode: False
+{% elif os.network.controller == 'onos' %}
+ flat-network-providers: "*"
+ manage-neutron-plugin-legacy-mode: False
+{% endif %}
+{% if os.beta.public_api %}
+ os-public-hostname: api.{{ opnfv.domain }}
+{% endif %}
+{% if os.network.dvr %}
+ enable-dvr: true
+{% endif %}
+{% if os.network.l2_population %}
+ l2-population: true
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
--- /dev/null
+ neutron-gateway:
+ charm: "./{{ ubuntu.release }}/neutron-gateway"
+ num_units: 1
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+{% if os.api.worker_multiplier %}
+ worker-multiplier: {{ os.api.worker_multiplier }}
+{% endif %}
+{% if opnfv.spaces_dict.data is defined %}
+ os-data-network: {{ opnfv.spaces_dict.data.cidr }}
+{% endif %}
+{% if os.network.controller == 'nosdn' %}
+{% if opnfv.ext_port is defined %}
+{% if opnfv.data_port is defined %}
+ bridge-mappings: physnet1:br-data external:br-ex
+ data-port: br-data:{{ opnfv.data_port }} br-ex:{{ opnfv.ext_port }}
+{% else %}
+ bridge-mappings: external:br-ex
+ data-port: br-ex:{{ opnfv.ext_port }}
+{% endif %}
+{% endif %}
+{% elif os.network.controller == 'onos' %}
+ plugin: onos
+{% if opnfv.ext_port is defined %}
+{% if opnfv.data_port is defined %}
+ bridge-mappings: physnet1:br-data external:br-ex
+ data-port: br-data:{{ opnfv.data_port }} br-ex:{{ opnfv.ext_port }}
+{% else %}
+ bridge-mappings: external:br-ex
+ data-port: br-ex:{{ opnfv.ext_port }}
+{% endif %}
+{% endif %}
+{% if os.network.sfc %}
+ profile: onos-sfc
+{% endif %}
+{% elif os.network.controller == 'odl' %}
+ plugin: ovs-odl
+{% if opnfv.ext_port is defined %}
+ ext-port: {{ opnfv.ext_port }}
+{% endif %}
+{% else %}
+{% if opnfv.ext_port is defined %}
+ ext-port: {{ opnfv.ext_port }}
+{% endif %}
+{% endif %}
+ instance-mtu: 1400
+ to:
+ - "nodes/0"
+{# Empty block to avoid bad block trim #}
--- /dev/null
+ neutron-openvswitch:
+ charm: ./{{ ubuntu.release }}/neutron-openvswitch
+ options:
+{% if opnfv.spaces_dict.data is defined %}
+ os-data-network: {{ opnfv.spaces_dict.data.cidr }}
+{% endif %}
+{% if os.network.dpdk %}
+ enable-dpdk: true
+ #dpdk-driver: uio_pci_generic
+ #data-port: ""
+ #default-socket-memory: 1024
+ #default-socket-cores: 1
+{% endif %}
+{% if opnfv.ext_port is defined %}
+{% if os.network.dvr %}
+{% if os.network.controller == 'nosdn' %}
+{% if opnfv.data_port is defined %}
+ bridge-mappings: physnet1:br-data external:br-ex
+ data-port: br-data:{{ opnfv.data_port }} br-ex:{{ opnfv.ext_port }}
+{% else %}
+ bridge-mappings: external:br-ex
+ data-port: br-ex:{{ opnfv.ext_port }}
+{% endif %}
+{% else %}
+ ext-port: {{ opnfv.ext_port }}
+{% endif %}
+{% endif %}
+{% endif %}
+{# Empty block to avoid bad block trim #}
--- /dev/null
+ nova-cloud-controller:
+ charm: "./{{ ubuntu.release }}/nova-cloud-controller"
+ num_units: {{ unit_qty() }}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ region: {{ os.region }}
+ #console-access-protocol: novnc
+ neutron-external-network: ext-net
+ service-guard: true
+{% if os.beta.public_api %}
+ use-internal-endpoints: true
+{% endif %}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.api.worker_multiplier %}
+ worker-multiplier: {{ os.api.worker_multiplier }}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.nova }}
+{% endif %}
+{% if opnfv.domain is defined %}
+ console-proxy-ip: {{ opnfv.domain }}
+{% endif %}
+ network-manager: Neutron
+{% if os.beta.public_api %}
+ os-public-hostname: api.{{ opnfv.domain }}
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
--- /dev/null
+ nova-compute:
+ charm: "./{{ ubuntu.release }}/nova-compute"
+{% if os.hyperconverged %}
+ num_units: {{ opnfv.units - 1 }}
+{% else %}
+{% if os.ha.mode == 'ha' %}
+ num_units: {{ opnfv.units - 3 }}
+{% else %}
+ num_units: {{ opnfv.units - 1 }}
+{% endif %}
+{% endif %}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ enable-live-migration: true
+ enable-resize: true
+ migration-auth-type: ssh
+{% if os.beta.public_api %}
+ use-internal-endpoints: true
+{% endif %}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if opnfv.storage_dict.ceph is defined %}
+ ceph-osd-replication-count: {{ unit_ceph_qty() }}
+{% endif %}
+{% if os.beta.huge_pages %}
+ hugepages: "50%"
+{% endif %}
+{% if os.lxd %}
+ virt-type: lxd
+{% endif %}
+ to:
+{% if os.hyperconverged %}
+{% for unit_id in range(1, opnfv.units) %}
+ - "nodes/{{ unit_id }}"
+{% endfor %}
+{% else %}
+{% if os.ha.mode == 'ha' %}
+{% for unit_id in range(0, opnfv.units - 3) %}
+ - "nodes-compute/{{ unit_id }}"
+{% endfor %}
+{% else %}
+{% for unit_id in range(0, opnfv.units - 1) %}
+ - "nodes-compute/{{ unit_id }}"
+{% endfor %}
+{% endif %}
+{% endif %}
--- /dev/null
+ zookeeper:
+ charm: ./{{ ubuntu.release }}/zookeeper
+ num_units: {{ unit_qty() }}
+ series: {{ ubuntu.release }}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+ cassandra:
+ charm: ./{{ ubuntu.release }}/cassandra
+ num_units: {{ unit_qty() }}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+ options:
+ authenticator: AllowAllAuthenticator
+ contrail-configuration:
+ charm: ./{{ ubuntu.release }}/contrail-configuration
+ num_units: {{ unit_qty() }}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ install-sources: ppa:opencontrail/trunk-20160812
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.contrailconfig }}
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+ contrail-control:
+ charm: ./{{ ubuntu.release }}/contrail-control
+ num_units: {{ unit_qty() }}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ install-sources: ppa:opencontrail/trunk-20160812
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+ contrail-analytics:
+ charm: ./{{ ubuntu.release }}/contrail-analytics
+ num_units: {{ unit_qty() }}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ install-sources: ppa:opencontrail/trunk-20160812
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+ contrail-webui:
+ charm: ./{{ ubuntu.release }}/contrail-webui
+ num_units: {{ unit_qty() }}
+ options:
+ install-sources: ppa:opencontrail/trunk-20160812
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+ haproxy:
+ charm: ./{{ ubuntu.release }}/haproxy
+ num_units: {{ unit_qty() }}
+ options:
+ peering_mode: active-active
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+ kafka:
+ charm: ./{{ ubuntu.release }}/kafka
+ num_units: {{ unit_qty() }}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+
--- /dev/null
+ odl-controller:
+ charm: ./{{ ubuntu.release }}/odl-controller
+ num_units: 1
+ options:
+ install-url: "https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distribution-karaf/0.5.0-Boron/distribution-karaf-0.5.0-Boron.tar.gz"
+{% if os.network.sfc %}
+ profile: "openvswitch-odl-beryllium-sfc"
+{% elif os.network.bgpvpn %}
+ profile: "openvswitch-odl-beryllium-vpn"
+{% elif os.network.odll3 %}
+ profile: "openvswitch-odl-beryllium-l3"
+{% else %}
+ profile: "openvswitch-odl-boron"
+{% endif %}
+ http-proxy: "http://squid.internal:3128"
+ https-proxy: "http://squid.internal:3128"
+ to:
+ - "lxd:nodes/0"
--- /dev/null
+ onos-controller:
+ charm: ./{{ ubuntu.release }}/onos-controller
+ num_units: 1
+ options:
+{% if opnfv.ext_port is defined %}
+ ext-port: {{ opnfv.ext_port }}
+{% endif %}
+ #gateway-mac: "default"
+ to:
+ - "lxd:nodes/0"
--- /dev/null
+ openstack-dashboard:
+ charm: "./{{ ubuntu.release }}/openstack-dashboard"
+ num_units: {{ unit_qty() }}
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+ secret: admin
+ webroot: /
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.dashboard }}
+{% endif %}
+{% if os.beta.public_api %}
+ endpoint-type: internalURL
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
--- /dev/null
+ opnfv-promise:
+ charm: ./{{ ubuntu.release }}/promise
+ num_units: 1
+ to:
+ - "lxd:nodes/0"
+{# Empty block to avoid bad block trim #}
--- /dev/null
+{% if opnfv.spaces_dict.storage is defined %}
+ #ceph-cluster-network: {{ opnfv.spaces_dict.storage.cidr }}
+{% endif %}
+{% if os.network.ipv6 %}
+ #prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+{% if os.api.haproxy_timeout %}
+ #haproxy-server-timeout: {{ os.api.haproxy_timeout }}
+ #haproxy-client-timeout: {{ os.api.haproxy_timeout }}
+ #haproxy-queue-timeout: {{ os.api.haproxy_timeout }}
+ #haproxy-connect-timeout: {{ os.api.haproxy_timeout }}
+{% endif %}
+{% endif %}
+{% if os.api.worker_multiplier %}
+ #worker-multiplier: {{ os.api.worker_multiplier }}
+{% endif %}
+{% if os.network.dvr %}
+ #enable-dvr: true
+{% endif %}
+{% if os.network.l2_population %}
+ #l2-population: true
+{% endif %}
+{% if ubuntu.release == 'trusty' %}
+ #source: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+{% endif %}
+{% if ubuntu.release == 'xenial' %}
+{% if os.release == 'newton' %}
+ #source: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+{% endif %}
+{% endif %}
+{% if os.ha.mode == 'nonha' %}
+{% if opnfv.storage_dict.ceph is defined %}
+ #ceph-osd-replication-count: {{ unit_ceph_qty() }}
+{% endif %}
+{% endif %}
+ #admin-role: {{ os.admin.role }}
+ #keystone-admin-role: {{ os.admin.role }}
+{% if os.beta.public_api %}
+ #use-internal-endpoints: true
+{% endif %}
--- /dev/null
+ rabbitmq-server:
+ charm: "./{{ ubuntu.release }}/rabbitmq-server"
+ num_units: {{ unit_qty() }}
+ options:
+{% if opnfv.storage_dict.ceph is defined %}
+ ceph-osd-replication-count: {{ unit_ceph_qty() }}
+{% endif %}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+ vip: {{ opnfv.vip.rabbitmq }}
+ min-cluster-size: {{ unit_qty() }}
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
--- /dev/null
+ - [ 'nova-compute:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'neutron-gateway:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'keystone:shared-db', 'mysql:shared-db' ]
+ - [ 'nova-cloud-controller:identity-service', 'keystone:identity-service' ]
+ - [ 'glance:identity-service', 'keystone:identity-service' ]
+ - [ 'neutron-api:identity-service', 'keystone:identity-service' ]
+ - [ 'neutron-api:shared-db', 'mysql:shared-db' ]
+ - [ 'neutron-api:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'neutron-gateway:neutron-plugin-api', 'neutron-api:neutron-plugin-api' ]
+ - [ 'glance:shared-db', 'mysql:shared-db' ]
+ - [ 'glance:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'nova-cloud-controller:image-service', 'glance:image-service' ]
+ - [ 'nova-compute:image-service', 'glance:image-service' ]
+ - [ 'nova-cloud-controller:cloud-compute', 'nova-compute:cloud-compute' ]
+ - [ 'nova-cloud-controller:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'nova-cloud-controller:quantum-network-service', 'neutron-gateway:quantum-network-service' ]
+ - [ 'openstack-dashboard:identity-service', 'keystone:identity-service' ]
+ - [ 'nova-cloud-controller:shared-db', 'mysql:shared-db' ]
+ - [ 'nova-cloud-controller:neutron-api', 'neutron-api:neutron-api' ]
+ - [ 'cinder:image-service', 'glance:image-service' ]
+ - [ 'cinder:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'cinder:identity-service', 'keystone:identity-service' ]
+ - [ 'cinder:cinder-volume-service', 'nova-cloud-controller:cinder-volume-service' ]
+ - [ 'cinder:shared-db', 'mysql:shared-db' ]
+{% if opnfv.storage_dict.scaleio is defined %}
+ - [ 'cinder:storage-backend', 'scaleio-openstack:storage-backend' ]
+ - [ 'scaleio-mdm:scaleio-sds', 'scaleio-sds:scaleio-sds' ]
+# - [ 'scaleio-mdm:scaleio-sds', 'scaleio-sds-pd2:scaleio-sds' ]
+ - [ 'scaleio-mdm:scaleio-mdm', 'scaleio-sdc:scaleio-mdm' ]
+ - [ 'scaleio-mdm:scaleio-mdm', 'scaleio-gw:scaleio-mdm' ]
+ - [ 'scaleio-openstack:scaleio-gw', 'scaleio-gw:scaleio-gw' ]
+ - [ 'nova-compute:ephemeral-backend', 'scaleio-openstack:ephemeral-backend' ]
+{% else %}
+ - [ 'cinder-ceph:storage-backend', 'cinder:storage-backend' ]
+ - [ 'ceph:client', 'nova-compute:ceph' ]
+ - [ 'ceph:client', 'cinder-ceph:ceph' ]
+ - [ 'ceph:client', 'glance:ceph' ]
+ - [ 'ceph-osd:mon', 'ceph:osd' ]
+ - [ 'ceph-radosgw:mon', 'ceph:radosgw' ]
+ - [ 'ceph-radosgw:identity-service', 'keystone:identity-service' ]
+ - [ 'congress:shared-db', 'mysql:shared-db' ]
+ - [ 'congress:identity-service', 'keystone:identity-service' ]
+ - [ 'congress:amqp', 'rabbitmq-server:amqp' ]
+{% endif %}
+ - [ 'ceilometer:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'ceilometer-agent:ceilometer-service', 'ceilometer:ceilometer-service' ]
+ - [ 'ceilometer:identity-service', 'keystone:identity-service' ]
+ - [ 'ceilometer:identity-notifications', 'keystone:identity-notifications' ]
+ - [ 'ceilometer-agent:nova-ceilometer', 'nova-compute:nova-ceilometer' ]
+ - [ 'ceilometer:shared-db', 'mongodb:database' ]
+ - [ 'heat:shared-db', 'mysql:shared-db' ]
+ - [ 'heat:identity-service', 'keystone:identity-service' ]
+ - [ 'heat:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'aodh:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'aodh:shared-db', 'mysql:shared-db' ]
+ - [ 'aodh:identity-service', 'keystone:identity-service' ]
+{% if os.lxd %}
+ - [ 'nova-compute:lxd', 'lxd:lxd' ]
+{% endif %}
+{% if os.network.controller == 'nosdn' %}
+ - [ 'neutron-openvswitch:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'nova-compute:neutron-plugin', 'neutron-openvswitch:neutron-plugin' ]
+ - [ 'neutron-openvswitch:neutron-plugin-api', 'neutron-api:neutron-plugin-api' ]
+{% elif os.network.controller == 'odl' %}
+ - [ 'neutron-api:neutron-plugin-api-subordinate', 'neutron-api-odl:neutron-plugin-api-subordinate' ]
+ - [ 'nova-compute:neutron-plugin', 'openvswitch-odl:neutron-plugin' ]
+ - [ 'neutron-gateway', 'openvswitch-odl' ]
+ - [ 'openvswitch-odl:ovsdb-manager', 'odl-controller:ovsdb-manager' ]
+ - [ 'neutron-api-odl:odl-controller', 'odl-controller:controller-api' ]
+{% elif os.network.controller == 'onos' %}
+ - [ 'neutron-api:neutron-plugin-api-subordinate', 'neutron-api-onos:neutron-plugin-api-subordinate' ]
+ - [ 'nova-compute:neutron-plugin', 'openvswitch-onos:neutron-plugin' ]
+ - [ 'neutron-gateway', 'openvswitch-onos' ]
+ - [ 'openvswitch-onos:ovsdb-manager', 'onos-controller:ovsdb-manager' ]
+ - [ 'neutron-api-onos:onos-controller', 'onos-controller:controller-api' ]
+{% elif os.network.controller == 'ocl' %}
+ - [ 'kafka', 'zookeeper' ]
+ - [ 'contrail-configuration:cassandra', 'cassandra:database' ]
+ - [ 'contrail-configuration:contrail-analytics-api', 'contrail-analytics:contrail-analytics-api' ]
+ - [ 'contrail-configuration', 'zookeeper' ]
+ - [ 'contrail-configuration', 'rabbitmq-server' ]
+ - [ 'contrail-configuration', 'keystone' ]
+ - [ 'contrail-configuration', 'haproxy' ]
+ - [ 'contrail-analytics:cassandra', 'cassandra:database' ]
+ - [ 'contrail-analytics:contrail-api', 'contrail-configuration:contrail-api' ]
+ - [ 'contrail-analytics:contrail-discovery', 'contrail-configuration:contrail-discovery' ]
+ - [ 'contrail-analytics', 'kafka' ]
+ - [ 'contrail-analytics', 'zookeeper' ]
+ - [ 'contrail-analytics', 'keystone' ]
+ - [ 'contrail-analytics', 'haproxy' ]
+ - [ 'contrail-control:contrail-discovery', 'contrail-configuration:contrail-discovery' ]
+ - [ 'contrail-control:contrail-ifmap', 'contrail-configuration:contrail-ifmap' ]
+ - [ 'contrail-control:contrail-api', 'contrail-configuration:contrail-api' ]
+ - [ 'contrail-control', 'keystone' ]
+ - [ 'neutron-api-contrail', 'contrail-configuration' ]
+ - [ 'neutron-api-contrail', 'keystone' ]
+ - [ 'contrail-webui', 'keystone' ]
+ - [ 'contrail-webui:contrail_api', 'contrail-configuration:contrail-api' ]
+ - [ 'contrail-webui:contrail_discovery', 'contrail-configuration:contrail-discovery' ]
+ - [ 'contrail-webui:cassandra', 'cassandra:database' ]
+ - [ 'contrail-webui', 'haproxy' ]
+ - [ 'neutron-contrail', 'keystone' ]
+ - [ 'neutron-contrail:contrail-discovery', 'contrail-configuration:contrail-discovery' ]
+ - [ 'neutron-contrail:contrail-api', 'contrail-configuration:contrail-api' ]
+ - [ 'haproxy', 'keepalived' ]
+ - [ 'ceilometer', 'ceilometer-contrail' ]
+ - [ 'ceilometer-contrail', 'contrail-analytics' ]
+{% endif %}
--- /dev/null
+{% if opnfv.storage_dict.scaleio is defined %}
+ scaleio-mdm:
+ charm: "./{{ ubuntu.release }}/scaleio-mdm"
+ num_units: {{ unit_scaleio_qty() }}
+ options:
+ cluster-mode: 3
+ to:
+{% if os.hyperconverged %}
+{% for unit_id in range(0, 3) %}
+ - "nodes/{{ unit_id }}"
+{% endfor %}
+{% else %}
+ - "nodes/0"
+ - "nodes-compute/0"
+ - "nodes-compute/1"
+{% endif %}
+ scaleio-sds:
+ charm: "./{{ ubuntu.release }}/scaleio-sds"
+ num_units: {{ opnfv.units }}
+ options:
+ protection-domain: 'pd1'
+ device-paths: {{ opnfv.storage_dict.scaleio.disk }}
+ to:
+{% if os.hyperconverged %}
+{% for unit_id in range(0, opnfv.units) %}
+ - "nodes/{{ unit_id }}"
+{% endfor %}
+{% else %}
+{% for unit_id in range(0, opnfv.units) %}
+ - "{{ unit_id }}"
+{% endfor %}
+{% endif %}
+ scaleio-sdc:
+ charm: "./{{ ubuntu.release }}/scaleio-sdc"
+ num_units: {{ opnfv.units }}
+ to:
+{% if os.hyperconverged %}
+{% for unit_id in range(0, opnfv.units) %}
+ - "nodes/{{ unit_id }}"
+{% endfor %}
+{% else %}
+{% for unit_id in range(0, opnfv.units) %}
+ - "{{ unit_id }}"
+{% endfor %}
+{% endif %}
+ scaleio-gw:
+ charm: "./{{ ubuntu.release }}/scaleio-gw"
+ num_units: 1
+ to:
+ - "nodes/0"
+ scaleio-gui:
+ charm: "./{{ ubuntu.release }}/scaleio-gui"
+ num_units: 1
+ to:
+ - "nodes/0"
+{% endif %}
--- /dev/null
+
+ ceilometer-agent:
+ charm: ./{{ ubuntu.release }}/ceilometer-agent
+ options:
+ openstack-origin: "cloud:{{ ubuntu.release }}-{{ os.release }}"
+{% if os.beta.public_api %}
+ use-internal-endpoints: true
+{% endif %}
+{% if opnfv.storage_dict.ceph is defined %}
+ cinder-ceph:
+ charm: ./{{ ubuntu.release }}/cinder-ceph
+ options:
+{% if opnfv.storage_dict.ceph is defined %}
+ ceph-osd-replication-count: {{ unit_ceph_qty() }}
+{% endif %}
+{% endif %}
+{% if os.network.controller == 'nosdn' %}
+{% include 'neutron-ovs.yaml' %}
+{% elif os.network.controller == 'odl' %}
+ neutron-api-odl:
+ charm: ./{{ ubuntu.release }}/neutron-api-odl
+ options:
+ overlay-network-type: 'vxlan'
+ security-groups: True
+ openvswitch-odl:
+ charm: ./{{ ubuntu.release }}/openvswitch-odl
+ options:
+{% if opnfv.spaces_dict.data is defined %}
+ os-data-network: {{ opnfv.spaces_dict.data.cidr }}
+{% endif %}
+{% elif os.network.controller == 'onos' %}
+ neutron-api-onos:
+ charm: ./{{ ubuntu.release }}/neutron-api-onos
+ options:
+ overlay-network-type: 'vxlan'
+ security-groups: True
+ openvswitch-onos:
+ charm: ./{{ ubuntu.release }}/openvswitch-onos
+ options:
+{% if opnfv.spaces_dict.data is defined %}
+ os-data-network: {{ opnfv.spaces_dict.data.cidr }}
+{% endif %}
+{% if os.network.sfc %}
+ profile: onos-sfc
+{% endif %}
+{% elif os.network.controller == 'ocl' %}
+ ceilometer-contrail:
+ charm: ./{{ ubuntu.release }}/ceilometer-contrail
+ options:
+ install-sources: ppa:opencontrail/trunk-20160812
+ neutron-api-contrail:
+ charm: ./{{ ubuntu.release }}/neutron-api-contrail
+ options:
+ install-sources: ppa:opencontrail/trunk-20160812
+ neutron-contrail:
+ charm: ./{{ ubuntu.release }}/neutron-contrail
+ options:
+ install-sources: ppa:opencontrail/trunk-20160812
+ keepalived:
+ charm: ./{{ ubuntu.release }}/keepalived
+ options:
+ router-id: 1
+{% if os.ha.mode == 'ha' %}
+ virtual-ip: {{ opnfv.vip.keepalived }}
+{% endif %}
+{% endif %}
+{% if os.lxd %}
+ lxd:
+ charm: ./{{ ubuntu.release }}/lxd
+ options:
+{% endif %}
+{% if opnfv.storage_dict.scaleio is defined %}
+ scaleio-openstack:
+ charm: ./{{ ubuntu.release }}/scaleio-openstack
+ options:
+ protection-domains: 'pd1'
+{% endif %}
--- /dev/null
+opnfv:
+{% set net_prefix = opnfv.spaces_dict.admin.cidr[:-4] %}
+ admNetgway: {{ opnfv.spaces_dict.admin.gateway }}
+ admNetwork: {{ net_prefix }}2
+ admin_password: openstack
+{% if opnfv.storage_dict.ceph is defined %}
+ ceph-disk: {{ opnfv.storage_dict.ceph.disk }}
+{% endif %}
+{% if opnfv.spaces_dict.data is defined %}
+ dataNetwork: {{ opnfv.spaces_dict.data.cidr }}
+{% endif %}
+ domain: {{ lab.racks[0].osdomainname }}
+ ext-port: {{ lab.racks[0]['ext-port'] }}
+ ext_port: {{ lab.racks[0]['ext-port'] }}
+ floating-ip-range: {{ lab.racks[0]['floating-ip-range'] }}
+ interface-enable: {{ lab.racks[0].ifnamelist }}
+ os-domain-name: {{ lab.racks[0].osdomainname }}
+{% if opnfv.spaces_dict.public is defined %}
+ publicNetwork: {{ opnfv.spaces_dict.public.cidr }}
+{% endif %}
+ spaces:
+{% for net in opnfv.spaces %}
+ - bridge: {{ net.bridge }}
+ cidr: {{ net.cidr }}
+ gateway: {{ net.gateway or '' }}
+ type: {{ net.type }}
+ vlan: {{ net.vlan or '' }}
+{% endfor %}
+ storage:
+{% for storage in opnfv.storage %}
+ - disk: {{ storage.disk }}
+ type: {{ storage.type }}
+{% endfor %}
+{% if opnfv.spaces_dict.storage is defined %}
+ storageNetwork: {{ opnfv.spaces_dict.storage.cidr }}
+{% endif %}
+ units: {{ lab.racks[0].nodes|count }}
+ vip:
+ ceilometer: {{ net_prefix }}24
+ cinder: {{ net_prefix }}29
+ dashboard: {{ net_prefix }}21
+ glance: {{ net_prefix }}22
+ heat: {{ net_prefix }}28
+ keystone: {{ net_prefix }}23
+ mysql: {{ net_prefix }}25
+ neutron: {{ net_prefix }}27
+ nova: {{ net_prefix }}26
+ rabbitmq: {{ net_prefix }}20
--- /dev/null
+opnfv-{{ lab.location }}{{ lab.racks[0].rack }}-maas:
+ juju-bootstrap:
+{% include 'juju-bootstrap.yaml' %}
+ maas:
+ apt_sources:
+ - ppa:maas/stable
+ - ppa:juju/stable
+ arch: amd64
+ boot_source:
+ keyring_filename: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
+ selections:
+ '1':
+ arches: amd64
+ labels: release
+ os: ubuntu
+ release: xenial
+ subarches: '*'
+ url: http://maas.ubuntu.com/images/ephemeral-v2/releases/
+ disk_size: 160G
+ interfaces:
+{% for net in opnfv.spaces %}
+ - bridge={{ net.bridge }},model=virtio
+{% endfor %}
+ ip_address: {{ opnfv.spaces_dict.admin.cidr[:-4] }}5
+ memory: 4096
+ name: opnfv-{{ lab.location }}{{ lab.racks[0].rack }}
+ network_config: |
+{% include 'maas-network_config.yaml' %}
+ node_group_ifaces:
+{% include 'maas-node_group_ifaces.yaml' %}
+ nodes:
+{% include 'maas-nodes.yaml' %}
+ password: ubuntu
+ pool: default
+ release: trusty
+ settings:
+ maas_name: {{ lab.location }}{{ lab.racks[0].rack }}
+ main_archive: http://archive.ubuntu.com/ubuntu
+ upstream_dns: {{ lab.racks[0].dns }}
+ user: ubuntu
+ vcpus: 4
+ virsh:
+ rsa_priv_key: {{ os.home }}/.ssh/id_rsa
+ rsa_pub_key: {{ os.home }}/.ssh/id_rsa.pub
+ uri: qemu+ssh://{{ os.user }}@{{ os.brAdmIP }}/system
--- /dev/null
+ arch: amd64
+ disk_size: 60G
+ interfaces:
+{% for net in ['admin','public','external'] %}
+{% if net in opnfv.spaces_dict %}
+ - bridge={{ opnfv.spaces_dict[net].bridge }},model=virtio
+{% endif %}
+{% endfor %}
+ memory: 4096
+ name: bootstrap
+ pool: default
+ vcpus: 4
+{# Empty block to avoid bad block trim #}
--- /dev/null
+ auto lo
+ iface lo inet loopback
+{% set ethid = 0 %}
+{% for net in opnfv.spaces %}
+
+ auto eth{{ ethid }}
+ iface eth{{ ethid }} inet static
+ netmask 255.255.255.0
+{% if net.type!='external' %}
+{% set net_prefix = net.cidr[:-4] %}
+ address {{ net_prefix }}5
+{% else %}
+ address {{ net.ipaddress }}
+{% endif %}
+{% if net.type=='admin' %}
+ gateway {{ net.gateway }}
+ dns-nameservers {{ lab.racks[0].dns }} {{ net_prefix }}5 127.0.0.1
+{% endif %}
+{% set ethid = ethid+1 %}
+{% endfor %}
--- /dev/null
+{% set ethid = 0 %}
+{% for net in opnfv.spaces %}
+{% if net.type!='external' %}
+{% set net_prefix = net.cidr[:-4] %}
+ - broadcast_ip: {{ net_prefix }}255
+ device: eth{{ ethid }}
+ dynamic_range:
+ high: {{ net_prefix }}250
+ low: {{ net_prefix }}81
+ ip: {{ net_prefix }}5
+{% if net.gateway!= None %}
+ router_ip: {{ net.gateway }}
+{% endif %}
+{% if net.type!='admin' %}
+ management: 1
+{% endif %}
+ static_range:
+ high: {{ net_prefix }}80
+ low: {{ net_prefix }}50
+ subnet_mask: 255.255.255.0
+{% set ethid = ethid+1 %}
+{% endif %}
+{% endfor %}
--- /dev/null
+{% for node in lab.racks[0].nodes %}
+ - interfaces:
+{% for nic in node.nics %}
+ - mac_address: {{ nic.mac[0] }}
+ mode: auto
+ name: {{ nic.ifname }}
+{% endfor %}
+{% if node.architecture=='x86_64' %}
+ architecture: amd64/generic
+{% endif %}
+ mac_addresses:
+{% for nic in node.nics %}
+ - {{ nic.mac[0] }}
+{% endfor %}
+ name: {{ node.name }}
+ power:
+{% if node.power.type=='ipmi' %}
+ address: {{ node.power.address }}
+ driver: LAN_2_0
+ pass: {{ node.power.pass }}
+ type: ipmi
+ user: {{ node.power.user }}
+{% elif node.power.type=='wakeonlan' %}
+ type: ether_wake
+ mac_address: {{ node.power.mac_address }}
+{% endif %}
+ tags: {{ ' '.join(node.roles) }}
+{% endfor %}
--- /dev/null
+ haproxy:
+ charm: "local:{{ opnfv.distro }}/haproxy"
+ num_units: 1
+ options:
+ services: |-
+{% for service in public_api_services.values() %}
+ - service_name: {{ service.name }}
+ service_host: {{ public_api_ip }}
+ service_port: {{ service.port }}
+ service_options: [mode http, balance leastconn]
+ servers: [[{{ service.name }}, {{ service.ip }}, {{ service.port }}, 'maxconn 100 cookie S0 check']]
+{% endfor %}
+ to:
+ - "nodes=0"
esac
done
-deploy_dep() {
- sudo apt-add-repository ppa:juju/stable -y
- sudo apt-get update
- sudo apt-get install juju git juju-deployer -y
- juju init -f
- cp environments.yaml ~/.juju/
-}
-
#by default maas creates two VMs in case of three more VM needed.
createresource() {
maas_ip=`grep " ip_address" deployment.yaml | cut -d " " -f 10`
if [ ! -f ./environments.yaml ] && [ -e ~/.juju/environments.yaml ]; then
cp ~/.juju/environments.yaml ./environments.yaml
+ elif [ ! -f ./environments.yaml ] && [ -e ~/joid_config/environments.yaml ]; then
+ cp ~/joid_config/environments.yaml ./environments.yaml
fi
if [ ! -f ./deployment.yaml ] && [ -e ~/.juju/deployment.yaml ]; then
cp ~/.juju/deployment.yaml ./deployment.yaml
+ elif [ ! -f ./deployment.yaml ] && [ -e ~/joid_config/deployment.yaml ]; then
+ cp ~/joid_config/deployment.yaml ./deployment.yaml
fi
if [ ! -f ./labconfig.yaml ] && [ -e ~/.juju/labconfig.yaml ]; then
cp ~/.juju/labconfig.yaml ./labconfig.yaml
+ elif [ ! -f ./labconfig.yaml ] && [ -e ~/joid_config/labconfig.yaml ]; then
+ cp ~/joid_config/labconfig.yaml ./labconfig.yaml
fi
if [ ! -f ./deployconfig.yaml ] && [ -e ~/.juju/deployconfig.yaml ]; then
cp ~/.juju/deployconfig.yaml ./deployconfig.yaml
+ elif [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
+ cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
fi
#copy the script which needs to get deployed as part of ofnfv release
echo " default-series: $opnfvdistro" >> environments.yaml
cp environments.yaml ~/.juju/
+ cp environments.yaml ~/joid_config/
if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
createresource
juju status > status.txt
if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then
echo " still executing the reltionship within charms ..."
- if [ $timeoutiter -ge 90 ]; then
+ if [ $timeoutiter -ge 120 ]; then
retval=1
fi
timeoutiter=$((timeoutiter+1))
from optparse import OptionParser
from jinja2 import Environment, FileSystemLoader
+from distutils.version import LooseVersion, StrictVersion
import os
+import subprocess
import random
import yaml
import sys
scenarioconfig_file = 'default_deployment_config.yaml'
# Capture our current directory
-TPL_DIR = os.path.dirname(os.path.abspath(__file__))+'/config_tpl/bundle_tpl'
+jujuver = subprocess.check_output(["juju", "--version"])
+
+if LooseVersion(jujuver) >= LooseVersion('2'):
+ TPL_DIR = os.path.dirname(os.path.abspath(__file__))+'/config_tpl/juju2/bundle_tpl'
+else:
+ TPL_DIR = os.path.dirname(os.path.abspath(__file__))+'/config_tpl/bundle_tpl'
#
# Prepare variables