controllername=`awk 'NR==1{print substr($1, 1, length($1)-1)}' deployconfig.yaml`
cloudname=`awk 'NR==1{print substr($1, 1, length($1)-1)}' deployconfig.yaml`
-juju bootstrap $controllername $cloudname --debug --to bootstrap.maas
+juju bootstrap $controllername $cloudname --debug --constraints tags=bootstrap --bootstrap-series=$1
if [ "$opnfvsdn" = "ocl" ]
then
- TAG="ubuntu16.04-4.0.2.0-34.tar.gz"
+ TAG="ubuntu16.04-4.1.1.0-103.tar.gz"
for ROLE in contrail-controller contrail-analytics contrail-analyticsdb
do
sudo apt-add-repository ppa:maas/stable -y
fi
if [ "bionic" != "$opnfvdistro" ]; then
- sudo apt-add-repository cloud-archive:pike -y
+ sudo apt-add-repository cloud-archive:queens -y
if [ "aarch64" == "$NODE_ARCTYPE" ]; then
- sudo add-apt-repository ppa:ubuntu-cloud-archive/pike-staging -y
+ sudo add-apt-repository ppa:ubuntu-cloud-archive/queens-staging -y
fi
fi
sudo virsh pool-start default || true
sudo virsh pool-autostart default || true
-# In case of virtual install set network
if [ "$virtinstall" -eq 1 ]; then
sudo virsh net-dumpxml default > default-net-org.xml
sed -i '/dhcp/d' default-net-org.xml
sudo virsh net-destroy default
sudo virsh net-define default-net-org.xml
sudo virsh net-start default
+ sudo virsh net-autostart default || true
rm -f default-net-org.xml
+else
+ # As we use kvm so setup network on admin network
+ ADMIN_BR=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="admin")'.bridge | cut -d \" -f 2 `
+ sed -i "s@brAdm@$ADMIN_BR@" net.xml
+ sudo virsh net-destroy default || true
+ sudo virsh net-undefine default || true
+ sudo virsh net-define net.xml || true
+ sudo virsh net-autostart default || true
+ sudo virsh net-start default || true
+ sudo virsh net-autostart default || true
fi
#
maas $PROFILE maas set-config name=upstream_dns value=$MY_UPSTREAM_DNS || true
maas $PROFILE maas set-config name='maas_name' value=$MAAS_NAME || true
maas $PROFILE maas set-config name='ntp_server' value='ntp.ubuntu.com' || true
+ maas $PROFILE domain update 0 name=$MAAS_NAME || true
maas $PROFILE sshkeys create "key=$SSH_KEY" || true
for tag in bootstrap compute control storage
maas $PROFILE tags create name='opnfv-dpdk' comment='OPNFV DPDK enablement' \
kernel_opts='hugepagesz=2M hugepages=1024 hugepagesz=1G hugepages=20 default_hugepagesz=1G intel_iommu=on' || true
- maas $PROFILE package-repositories create name="Ubuntu Proposed new" \
- url="http://archive.ubuntu.com/ubuntu" components="main" \
- distributions="xenial-proposed" arches=amd64,i386
+ #maas $PROFILE package-repositories create name="Ubuntu Proposed new" \
+ # url="http://archive.ubuntu.com/ubuntu" components="main" \
+ # distributions="xenial-proposed" arches=amd64,i386
#create the required spaces.
maas $PROFILE space update 0 name=default || true
maas $PROFILE boot-source update $SOURCE_ID \
url=$URL keyring_filename=$KEYRING_FILE || true
+ maas $PROFILE boot-source-selections create 1 \
+ os="ubuntu" release="xenial" arches="amd64" \
+ labels="*" || true
+ maas $PROFILE boot-source-selections create 1 \
+ os="ubuntu" release="bionic" arches="amd64" \
+ labels="*" || true
+
if [ $NODE_ARCTYPE != "x86_64" ] ; then
- maas $PROFILE boot-source-selection update 1 1 arches="$NODE_ARCHES"
+ maas $PROFILE boot-source-selection update 1 1 arches="$NODE_ARCHES" || true
+ maas $PROFILE boot-source-selection update 1 2 arches="$NODE_ARCHES" || true
fi
if [ "$snapinstall" -eq "0" ]; then
VIRSHIP="" # TODO: parse from $VIRSHURL if needed
fi
- if [ "$virtinstall" -eq 1 ]; then
- netw=" --network bridge=virbr0,model=virtio"
- elif ([ "$VIRSHHOST" != "" ]); then
+ if ([ "$VIRSHHOST" != "" ]); then
# Get the bridge hosting the remote virsh
brid=$(ssh $VIRSHHOST "ip a l | grep $VIRSHHOST | perl -pe 's/.* (.*)\$/\$1/g'")
netw=" --network bridge=$brid,model=virtio"
done
fi
- maas $PROFILE pods create type=virsh power_address="$VIRSHURL" power_user=$USER
+ # Iterate to avoid "Conflict error" issue
+ for ii in 1 2 3 4 5 6 7 8 9 10
+ do
+ echo "Try $ii"
+ maas $PROFILE pods create type=virsh power_address="$VIRSHURL" power_user=$USER > /tmp/deploy.out 2>&1 || true
+ cat /tmp/deploy.out
+ if ! fgrep -q 'Conflict' /tmp/deploy.out
+ then
+ break
+ else
+ continue
+ fi
+ done
+
# Make sure nodes are added into MAAS and none of them is in commissioning state
i=0
rm -rf precise
rm -rf trusty
rm -rf xenial
+rm -rf bionic
rm -rf ~/joid_config/admin-openrc
sleep 10
sudo sysctl -w vm.drop_caches=3
aodh:
charm: "./{{ ubuntu.release }}/aodh"
- num_units: 1
+ num_units: {{ unit_qty() }}
{% if os.service.bindings %}
bindings:
"": *oam-space
internal: *internal-space
shared-db: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
+{% if os.ha.mode == 'ha' %}
+ vip: *aodh-vip
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_aodh_public
+ os-internal-hostname: *hostname_aodh_public
+ os-admin-hostname: *hostname_aodh_public
+{% endif %}
to:
- - "lxd:nodes/0"
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
{# Empty block to avoid bad block trim #}
{% else %}
nodes:
charm: "cs:{{ ubuntu.release }}/ubuntu"
+ series: {{ ubuntu.release }}
{% if os.ha.mode == 'ha' %}
num_units: 3
{% else %}
nodes-compute:
charm: "cs:{{ ubuntu.release }}/ubuntu"
+ series: {{ ubuntu.release }}
{% if os.ha.mode == 'ha' %}
num_units: {{ opnfv.units - 3 }}
{% else %}
{% endif %}
ntp:
charm: "./{{ ubuntu.release }}/ntp"
+ series: {{ ubuntu.release }}
{% if os.network.controller == 'ocl' %}
options:
source: "0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org"
{% include 'rabbitmq.yaml' %}
{% include 'neutron-api.yaml' %}
{% include 'heat.yaml' %}
+{% include 'designate.yaml' %}
{% if os.network.controller != 'ocl' %}
{% include 'neutron-gateway.yaml' %}
{% include 'ceilometer.yaml' %}
- mongodb:
- charm: ./{{ ubuntu.release }}/mongodb
- num_units: 1
- constraints: *oam-space-constr
+ gnocchi:
+ charm: ./{{ ubuntu.release }}/gnocchi
+ num_units: {{ unit_qty() }}
+{% if os.service.bindings %}
+ bindings:
+ "": *oam-space
+ public: *public-space
+ admin: *admin-space
+ internal: *internal-space
+{% endif %}
+ series: {{ ubuntu.release }}
+ options:
+ openstack-origin: *openstack-origin
+{% if os.ha.mode == 'ha' %}
+ vip: *gnocchi-vip
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_gnocchi_public
+ os-internal-hostname: *hostname_gnocchi_public
+ os-admin-hostname: *hostname_gnocchi_public
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+ memcached:
+ charm: ./{{ ubuntu.release }}/memcached
+ num_units: 2
{% if os.service.bindings %}
bindings:
"": *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
+ options:
+ allow-ufw-ip6-softfail: True
to:
- "lxd:nodes/0"
+ - "lxd:nodes/1"
ceilometer:
charm: "./{{ ubuntu.release }}/ceilometer"
num_units: {{ unit_qty() }}
admin: *admin-space
internal: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
region: *openstack-region
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.ceilometer }}
+ vip: *ceilometer-vip
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_ceilometer_public
+ os-internal-hostname: *hostname_ceilometer_public
+ os-admin-hostname: *hostname_ceilometer_public
{% endif %}
to:
{% for unit_id in to_select() %}
public: *ceph-public-space
cluster: *ceph-cluster-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
expected-osd-count: {{ unit_ceph_qty() }}
source: *openstack-origin
public: *ceph-public-space
cluster: *ceph-cluster-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
osd-devices: *osd-devices
osd-journal: *osd-journal
- osd-reformat: 'yes'
+ #osd-reformat: 'True'
source: *openstack-origin
{% if os.network.ipv6 %}
prefer-ipv6: {{ os.network.ipv6 }}
internal: *internal-space
mon: *ceph-public-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
region: *openstack-region
operator-roles: "Member,admin,SwiftOperator"
ceph-osd-replication-count: {{ unit_ceph_qty() }}
{% endif %}
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.radosgw }}
+ vip: *rados-gateway-vip
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_radosgw_public
+ os-admin-hostname: *hostname_radosgw_public
+ os-internal-hostname: *hostname_radosgw_public
{% endif %}
to:
{% for unit_id in to_select(unit_qty()) %}
internal: *internal-space
shared-db: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
{% if os.git_repo.origin_git %}
ceph-osd-replication-count: {{ unit_ceph_qty() }}
{% endif %}
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.cinder }}
+ vip: *cinder-vip
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_cinder_public
+ os-internal-hostname: *hostname_cinder_public
+ os-admin-hostname: *hostname_cinder_public
{% endif %}
to:
{% if opnfv.storage_dict.scaleio is defined %}
shared-db: *internal-space
public: *public-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
region: *openstack-region
{% if ubuntu.release == 'trusty' %}
--- /dev/null
+ designate:
+ charm: "./{{ ubuntu.release }}/designate"
+ num_units: {{ unit_qty() }}
+{% if os.service.bindings %}
+ bindings:
+ "": *oam-space
+ public: *public-space
+ admin: *admin-space
+ internal: *internal-space
+ shared-db: *internal-space
+{% endif %}
+ series: {{ ubuntu.release }}
+ options:
+ openstack-origin: *openstack-origin
+{% if os.git_repo.origin_git %}
+ openstack-origin-git: "{{ os.git_repo.branch }}"
+{% endif %}
+ region: *openstack-region
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_keystone_public
+ os-internal-hostname: *hostname_keystone_public
+ os-admin-hostname: *hostname_keystone_public
+{% endif %}
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+{% if os.api.worker_multiplier %}
+ worker-multiplier: *worker-multiplier
+{% endif %}
+{% if os.ha.mode == 'ha' %}
+ vip: *designate-vip
+{% endif %}
+ to:
+{% for unit_id in to_select() %}
+ - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+ designate-bind:
+ charm: "./{{ ubuntu.release }}/designate-bind"
+ num_units: 2
+ bindings:
+ "": *internal-space
+ series: {{ ubuntu.release }}
+ to:
+ - "lxd:nodes/0"
+ - "lxd:nodes/1"
+
internal: *internal-space
shared-db: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
worker-multiplier: *worker-multiplier
ceph-osd-replication-count: {{ unit_ceph_qty() }}
{% endif %}
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.glance }}
+ vip: *glance-vip
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_glance_public
+ os-internal-hostname: *hostname_glance_public
+ os-admin-hostname: *hostname_glance_public
{% endif %}
to:
{% for unit_id in to_select() %}
hacluster-keystone:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% endif %}
hacluster-cinder:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% endif %}
hacluster-heat:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% endif %}
hacluster-horizon:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% endif %}
hacluster-nova:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% endif %}
hacluster-neutron:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% endif %}
hacluster-glance:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% if os.network.controller != 'ocl' %}
hacluster-ceilometer:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% endif %}
hacluster-mysql:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% if os.network.ipv6 %}
prefer-ipv6: {{ os.network.ipv6 }}
{% endif %}
-{% if os.beta.hacluster_ceph_radosgw %}
hacluster-ceph-radosgw:
charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
options:
corosync_transport: unicast
cluster_count: 3
{% if os.network.ipv6 %}
prefer-ipv6: {{ os.network.ipv6 }}
{% endif %}
+ hacluster-aodh:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ hacluster-gnocchi:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
+{% endif %}
+ hacluster-designate:
+ charm: "./{{ ubuntu.release }}/hacluster"
+ series: {{ ubuntu.release }}
+ options:
+ corosync_transport: unicast
+ cluster_count: 3
+{% if os.network.ipv6 %}
+ prefer-ipv6: {{ os.network.ipv6 }}
{% endif %}
- [ 'nova-cloud-controller:ha', 'hacluster-nova:ha' ]
- [ 'openstack-dashboard:ha', 'hacluster-horizon:ha' ]
- [ 'ceph-radosgw:ha', 'hacluster-ceph-radosgw:ha' ]
+ - [ 'aodh:ha', 'hacluster-aodh:ha' ]
+ - [ 'gnocchi:ha', 'hacluster-gnocchi:ha' ]
+ - [ 'designate:ha', 'hacluster-designate:ha' ]
{% endif %}
internal: *internal-space
shared-db: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
region: *openstack-region
{% endif %}
worker-multiplier: *worker-multiplier
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.heat }}
+ vip: *heat-vip
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_heat_public
+ os-internal-hostname: *hostname_heat_public
+ os-admin-hostname: *hostname_heat_public
{% endif %}
to:
{% for unit_id in to_select() %}
internal: *internal-space
shared-db: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
{% if os.git_repo.origin_git %}
admin-role: {{ os.admin.role }}
keystone-admin-role: {{ os.admin.role }}
preferred-api-version: 3
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ #use-https: 'yes'
+ #https-service-endpoints: 'True'
+ os-public-hostname: *hostname_keystone_public
+ os-internal-hostname: *hostname_keystone_public
+ os-admin-hostname: *hostname_keystone_public
+{% endif %}
{% if os.network.ipv6 %}
prefer-ipv6: {{ os.network.ipv6 }}
{% endif %}
admin-password: {{ opnfv.admin_password | default(os.admin.password) }}
admin-token: {{ os.admin.name }}
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.keystone }}
+ vip: *keystone-vip
{% endif %}
to:
{% for unit_id in to_select() %}
cluster: *internal-space
shared-db: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
innodb-buffer-pool-size: 256M
max-connections: 4000
prefer-ipv6: {{ os.network.ipv6 }}
{% endif %}
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.mysql }}
+ vip: *mysql-vip
{% endif %}
min-cluster-size: {{ unit_qty() }}
to:
internal: *internal-space
shared-db: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
{% if os.git_repo.origin_git %}
prefer-ipv6: {{ os.network.ipv6 }}
{% endif %}
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.neutron }}
+ vip: *neutron-api-vip
{% endif %}
{% if os.network.controller == 'nosdn' %}
flat-network-providers: physnet1
{% endif %}
{% if os.network.l2_population %}
l2-population: true
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_neutron_public
+ os-internal-hostname: *hostname_neutron_public
+ os-admin-hostname: *hostname_neutron_public
{% endif %}
to:
{% for unit_id in to_select() %}
"": *oam-space
data: *overlay-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
{% if os.git_repo.origin_git %}
{% if os.service.bindings %}
bindings:
data: *overlay-space
+ series: {{ ubuntu.release }}
options:
{% else %}
options:
{% if os.network.controller == 'nosdn' %}
bridge-mappings: physnet1:br-data
data-port: *data-port
+ #enable-local-dhcp-and-metadata: true
{% else %}
ext-port: {{ opnfv.ext_port }}
{% endif %}
internal: *internal-space
shared-db: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
{% if os.git_repo.origin_git %}
prefer-ipv6: {{ os.network.ipv6 }}
{% endif %}
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.nova }}
+ vip: *nova-cc-vip
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_nova_public
+ os-internal-hostname: *hostname_nova_public
+ os-admin-hostname: *hostname_nova_public
{% endif %}
{% if opnfv.domain is defined %}
#console-proxy-ip: {{ opnfv.domain }}
bindings:
"": *oam-space
internal: *internal-space
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
{% if os.git_repo.origin_git %}
contrail-openstack:
charm: ./{{ ubuntu.release }}/contrail-openstack
+ series: {{ ubuntu.release }}
options:
install-keys: |
- |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
-
- mQENBFgjd4cBCAC7nEIGx6SRod7OONALolVPDV/lGIRUH/qU1HYULTxALnve0vn2
- gOYDROZWWLN5yAWwOnNJ/xrXJdvlJ+wT3vSord3dafe42ynajJB48NNfgq0qCzJy
- 832jA5CUPqoSBEQtWnEl6oDTf9iL/bsvybLKys1llQqZkgLNOunwJe9Mp0nhv9G3
- 7C7MpmjGjd3eviZaTPevONFVtDbWHU5QG8VEdX3n9QBNLzgaddMrY3ejg9ajGJ5H
- OOiSa71OxVZG+hc6O26LtjZgXikZ5CZLbCSHMpzXOZzuKn1xbLz1BLna5F5Y7czs
- XywyHK/Fq07T7ihzZ3GEhVfpbpOF+7egSY4lABEBAAG0J2plbmtpbnMgKGplbmtp
- bnMpIDxqZW5raW5zQGp1bmlwZXIubmV0PokBOAQTAQIAIgUCWCN3hwIbAwYLCQgH
- AwIGFQgCCQoLBBYCAwECHgECF4AACgkQeXOdQRGBcvcSaggAoh/87zZvJ09dH9yn
- gGMtlnRyfob9LIpW9A9QB7yIpwm+awye0rG7cH6vs3LH9sr4V+GDhXNl3eaO9+BT
- 9XxoyPllZ4tjiZ7kCKslK95V5mmHKa8hnnA7bo7sKrXJ3w5nYowEQUYLdaMdrWLx
- cyJFJuYKEP5Fkx6TECsebMR4ly6rJjGDLFysuhWNAjOm7lNG2ZtS9s5d101HNhxN
- RH71YMOGEfc2XeW5B6o2enjRmBWeoEIVhxn8YyPqtHbwQNwoSCdIXNyWF0f6Ju+s
- q7OAPzGkWMdvXwN3Wlko8qitB+yGXE9muip7osEbt3gpwhMCcYiprvIBKdDoQPBc
- 1xYMDrkBDQRYI3eHAQgAniMKcSf1aEergyyZZKFyQ4hkgAsg5eM3H5UngKDqAJZF
- fuaibQ3jzpdqtGQU2xLPHNKsJIqAsGtxEnfXmZAdwJHkx+xMOV0VorMEU5HuuEFc
- WXL1WL6dRfEmmFev0/ZBtZm4EE/ARlnTLBqxLRooEGljIacdqVakr8J23kbChulA
- oz7AaWzJYzCcbvwveNL9ZxEQDLMtIcAXh5LubBZDR0uq9UgkPX3wTdQ0zQ6hR3MZ
- KK0WYvXIPgESmrstCOrSR9X1d8hwjjwsgwVQwp1vGGHjWjEleCZvz7WyM0qFIRcj
- 9kLbnm6flJMK0Bowg/3yuiG55EBK+3b9TNdSDuG/mwARAQABiQEfBBgBAgAJBQJY
- I3eHAhsMAAoJEHlznUERgXL3aYAH/3GKj4V0qJCi7lW2NMH0nCdqp5Q1pyIm77Nv
- VU4WH16jNXcNPKJv+9yLC2tCkfVcX7C2clT25SJ4dwPsos2Kcnnh3nNHi6S9SxWG
- GzVy7o0preT1M01LE5zAADQlBPelOZseEM4EWgszNQ0dxrn030YWH9jKoNjUGI0W
- hRq4c2qV1N7RG/QiIWJhB4Fkyxh5bl0GODjYhPB/3am2i8gkTksH9FQa1cVydVSc
- FV6Ju8WGiUPpHGnDEmyzqudIAXtNApN16AO+09ECOaIJEgO+wcUZpJ8tAkrQ1CZs
- FMy4DESyZ1go1Myueds3X6iCCCtrmk9x5FAKRMb7NvkN2+dbj5s=
- =9u/w
+ mQENBFqVcvkBCACnPEXVBLmB79vQFRMu8sg4cNl0nvxTfStlkbrsuqs4JCKhvVWU
+ 6xF7Fa3e8up6t8Wn+53yqBEQhImN3RTDfE1RijHbIDHED2K8XtmMzHzN/ZuOwTxN
+ PdXN70HF+HBMW4egbDcImSyullPw65D5C3pHKZHmbnwPjXOAEdy3+6/8iNZex3mf
+ 4PDNs+KFmEblRWMTWryK7BdU3KI41o6jlixm5BkgPOQ5Tn0IZ300pGsIm2rvb6nN
+ LHsBDSPkamjGleDidqqEnS7ueMiRNSYg33bxbA/V5EtMcHpGbLSyMaLodqv5H/0L
+ ibt1nKEJnuV1/nNrTQMSpWCIGn5+f3UzQX+XABEBAAG0LFN0dWFydCBNYWNraWUg
+ KE9QTkZWKSA8d3NtYWNraWVAanVuaXBlci5uZXQ+iQE4BBMBAgAiBQJalXL5AhsD
+ BgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRDVIWmOj2VUIlUFB/9T8faY5vnZ
+ zJvU3GUJI4nNjj7kZeotfItBrBTtcWblkalIvDg/qqyMW6ZWr8Bl5I/Cs8LIDi5/
+ UaHkrekpU5tECWZkF+qbUcoZ7e0wENj1L3j+SHHJZ231pU2N6Dkrks+X1QL7qcx1
+ xXUMajzd+BfyxMGshpNu1vRmwVbtJNrRYzpPclfNsdz0D264o04n9XgSZMCz52gM
+ bNF1l9QEm7mVDZMxPImSNr2JjQCtKOuiTyQpXtzXQEVPwLerrG+3Ys6leSggckXp
+ W9sTEGDt8fREsUgWthAST4k2NRtA/yaULnbQZYChTmIzOPcw9gT5gl4KIzOpZc5t
+ fTIc/RNsBB2BuQENBFqVcvkBCACzqDT7QAw7xIN7gKQWzX82DZLCnTuOx83jW3U+
+ cEZVFhYNfsqUY+Kp48IyePqeQCYvoBxkVGhnIBCT4cRzlfwj2AxoI8S4mDMI6GlD
+ Q0DaAlX+BFKDqLPOrDrFGtYBDwMWVjhgV4PaZ5w4J17RKaann5RWBTFObNtUNVtJ
+ Xr7XRUjBnUg76nZX6qzUj22V7LBMtF8vcf+pe+AHY0OV24CGJOvHbUNS+sh8Sfse
+ yGpqSdoVjU9UjgkSpiJl7PWFPrXqoiNsxR6nEeXufy8q9+X4DsCVdiaomifCw1FN
+ HoP/xVUDUDBMeTAC8zseTardit4rt0Gtuk+5DuRm9otpGNSbABEBAAGJAR8EGAEC
+ AAkFAlqVcvkCGwwACgkQ1SFpjo9lVCKUOggAh8CNa+AkoEJsRCtzjW+3dJIaSk5w
+ KEpq9fVlmAcCbyIimB3NDAgGLSCTSc3JeXt5Hdgp4M0+j56lVEOtcTC0HWQYYVL2
+ 4QKv9fLypgJe5DLYtQmAyQOXNmO6P77/2KdeB1flxZiWmCDr3VJRgWseMILKb3oQ
+ 49Hfmk7HQrKOi0KaHbx0tpbUHo3uWR4H/QeQE3pCOA+ighqP3Mu3AHI/ySmWZyhc
+ kvvGambz75gXUdOb9L6eqV9lHQhFUCJznMdjCpd8HUFsbQiAaWKsFv5T5cpmBhDc
+ /MslWGpfFi+hs/ritGELl9+CiFdn6YPXULEb653V65ev8vg7NwX78ifNQw==
+ =SMUT
-----END PGP PUBLIC KEY BLOCK-----
install-sources: |
- - "deb http://65.122.57.42/repo /"
+ - "deb http://65.122.57.45:30002/opnfv /"
contrail-agent:
charm: ./{{ ubuntu.release }}/contrail-agent
+ series: {{ ubuntu.release }}
options:
install-keys: |
- |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
- mQENBFgjd4cBCAC7nEIGx6SRod7OONALolVPDV/lGIRUH/qU1HYULTxALnve0vn2
- gOYDROZWWLN5yAWwOnNJ/xrXJdvlJ+wT3vSord3dafe42ynajJB48NNfgq0qCzJy
- 832jA5CUPqoSBEQtWnEl6oDTf9iL/bsvybLKys1llQqZkgLNOunwJe9Mp0nhv9G3
- 7C7MpmjGjd3eviZaTPevONFVtDbWHU5QG8VEdX3n9QBNLzgaddMrY3ejg9ajGJ5H
- OOiSa71OxVZG+hc6O26LtjZgXikZ5CZLbCSHMpzXOZzuKn1xbLz1BLna5F5Y7czs
- XywyHK/Fq07T7ihzZ3GEhVfpbpOF+7egSY4lABEBAAG0J2plbmtpbnMgKGplbmtp
- bnMpIDxqZW5raW5zQGp1bmlwZXIubmV0PokBOAQTAQIAIgUCWCN3hwIbAwYLCQgH
- AwIGFQgCCQoLBBYCAwECHgECF4AACgkQeXOdQRGBcvcSaggAoh/87zZvJ09dH9yn
- gGMtlnRyfob9LIpW9A9QB7yIpwm+awye0rG7cH6vs3LH9sr4V+GDhXNl3eaO9+BT
- 9XxoyPllZ4tjiZ7kCKslK95V5mmHKa8hnnA7bo7sKrXJ3w5nYowEQUYLdaMdrWLx
- cyJFJuYKEP5Fkx6TECsebMR4ly6rJjGDLFysuhWNAjOm7lNG2ZtS9s5d101HNhxN
- RH71YMOGEfc2XeW5B6o2enjRmBWeoEIVhxn8YyPqtHbwQNwoSCdIXNyWF0f6Ju+s
- q7OAPzGkWMdvXwN3Wlko8qitB+yGXE9muip7osEbt3gpwhMCcYiprvIBKdDoQPBc
- 1xYMDrkBDQRYI3eHAQgAniMKcSf1aEergyyZZKFyQ4hkgAsg5eM3H5UngKDqAJZF
- fuaibQ3jzpdqtGQU2xLPHNKsJIqAsGtxEnfXmZAdwJHkx+xMOV0VorMEU5HuuEFc
- WXL1WL6dRfEmmFev0/ZBtZm4EE/ARlnTLBqxLRooEGljIacdqVakr8J23kbChulA
- oz7AaWzJYzCcbvwveNL9ZxEQDLMtIcAXh5LubBZDR0uq9UgkPX3wTdQ0zQ6hR3MZ
- KK0WYvXIPgESmrstCOrSR9X1d8hwjjwsgwVQwp1vGGHjWjEleCZvz7WyM0qFIRcj
- 9kLbnm6flJMK0Bowg/3yuiG55EBK+3b9TNdSDuG/mwARAQABiQEfBBgBAgAJBQJY
- I3eHAhsMAAoJEHlznUERgXL3aYAH/3GKj4V0qJCi7lW2NMH0nCdqp5Q1pyIm77Nv
- VU4WH16jNXcNPKJv+9yLC2tCkfVcX7C2clT25SJ4dwPsos2Kcnnh3nNHi6S9SxWG
- GzVy7o0preT1M01LE5zAADQlBPelOZseEM4EWgszNQ0dxrn030YWH9jKoNjUGI0W
- hRq4c2qV1N7RG/QiIWJhB4Fkyxh5bl0GODjYhPB/3am2i8gkTksH9FQa1cVydVSc
- FV6Ju8WGiUPpHGnDEmyzqudIAXtNApN16AO+09ECOaIJEgO+wcUZpJ8tAkrQ1CZs
- FMy4DESyZ1go1Myueds3X6iCCCtrmk9x5FAKRMb7NvkN2+dbj5s=
- =9u/w
+ mQENBFqVcvkBCACnPEXVBLmB79vQFRMu8sg4cNl0nvxTfStlkbrsuqs4JCKhvVWU
+ 6xF7Fa3e8up6t8Wn+53yqBEQhImN3RTDfE1RijHbIDHED2K8XtmMzHzN/ZuOwTxN
+ PdXN70HF+HBMW4egbDcImSyullPw65D5C3pHKZHmbnwPjXOAEdy3+6/8iNZex3mf
+ 4PDNs+KFmEblRWMTWryK7BdU3KI41o6jlixm5BkgPOQ5Tn0IZ300pGsIm2rvb6nN
+ LHsBDSPkamjGleDidqqEnS7ueMiRNSYg33bxbA/V5EtMcHpGbLSyMaLodqv5H/0L
+ ibt1nKEJnuV1/nNrTQMSpWCIGn5+f3UzQX+XABEBAAG0LFN0dWFydCBNYWNraWUg
+ KE9QTkZWKSA8d3NtYWNraWVAanVuaXBlci5uZXQ+iQE4BBMBAgAiBQJalXL5AhsD
+ BgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRDVIWmOj2VUIlUFB/9T8faY5vnZ
+ zJvU3GUJI4nNjj7kZeotfItBrBTtcWblkalIvDg/qqyMW6ZWr8Bl5I/Cs8LIDi5/
+ UaHkrekpU5tECWZkF+qbUcoZ7e0wENj1L3j+SHHJZ231pU2N6Dkrks+X1QL7qcx1
+ xXUMajzd+BfyxMGshpNu1vRmwVbtJNrRYzpPclfNsdz0D264o04n9XgSZMCz52gM
+ bNF1l9QEm7mVDZMxPImSNr2JjQCtKOuiTyQpXtzXQEVPwLerrG+3Ys6leSggckXp
+ W9sTEGDt8fREsUgWthAST4k2NRtA/yaULnbQZYChTmIzOPcw9gT5gl4KIzOpZc5t
+ fTIc/RNsBB2BuQENBFqVcvkBCACzqDT7QAw7xIN7gKQWzX82DZLCnTuOx83jW3U+
+ cEZVFhYNfsqUY+Kp48IyePqeQCYvoBxkVGhnIBCT4cRzlfwj2AxoI8S4mDMI6GlD
+ Q0DaAlX+BFKDqLPOrDrFGtYBDwMWVjhgV4PaZ5w4J17RKaann5RWBTFObNtUNVtJ
+ Xr7XRUjBnUg76nZX6qzUj22V7LBMtF8vcf+pe+AHY0OV24CGJOvHbUNS+sh8Sfse
+ yGpqSdoVjU9UjgkSpiJl7PWFPrXqoiNsxR6nEeXufy8q9+X4DsCVdiaomifCw1FN
+ HoP/xVUDUDBMeTAC8zseTardit4rt0Gtuk+5DuRm9otpGNSbABEBAAGJAR8EGAEC
+ AAkFAlqVcvkCGwwACgkQ1SFpjo9lVCKUOggAh8CNa+AkoEJsRCtzjW+3dJIaSk5w
+ KEpq9fVlmAcCbyIimB3NDAgGLSCTSc3JeXt5Hdgp4M0+j56lVEOtcTC0HWQYYVL2
+ 4QKv9fLypgJe5DLYtQmAyQOXNmO6P77/2KdeB1flxZiWmCDr3VJRgWseMILKb3oQ
+ 49Hfmk7HQrKOi0KaHbx0tpbUHo3uWR4H/QeQE3pCOA+ighqP3Mu3AHI/ySmWZyhc
+ kvvGambz75gXUdOb9L6eqV9lHQhFUCJznMdjCpd8HUFsbQiAaWKsFv5T5cpmBhDc
+ /MslWGpfFi+hs/ritGELl9+CiFdn6YPXULEb653V65ev8vg7NwX78ifNQw==
+ =SMUT
-----END PGP PUBLIC KEY BLOCK-----
install-sources: |
- - "deb http://65.122.57.42/repo /"
+ - "deb http://65.122.57.45:30002/opnfv /"
contrail-analytics:
charm: ./{{ ubuntu.release }}/contrail-analytics
controller-api: *internal-space
ovsdb-manager: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
install-url: "https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distribution-karaf/0.6.0-Carbon/distribution-karaf-0.6.0-Carbon.tar.gz"
{% if os.network.sfc %}
onos-controller:
charm: ./{{ ubuntu.release }}/onos-controller
num_units: 1
+ series: {{ ubuntu.release }}
options:
{% if opnfv.ext_port is defined %}
ext-port: {{ opnfv.ext_port }}
"": *oam-space
internal: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
to:
- "lxd:nodes/0"
{# Empty block to avoid bad block trim #}
"": *public-space
shared-db: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
{% if os.git_repo.origin_git %}
prefer-ipv6: {{ os.network.ipv6 }}
{% endif %}
{% if os.ha.mode == 'ha' %}
- vip: {{ opnfv.vip.dashboard }}
+ vip: *dashboard-vip
+{% endif %}
+{% if os.api.ssl %}
+ ssl_ca: *ssl_ca
+ ssl_cert: *ssl_cert
+ ssl_key: *ssl_key
+ os-public-hostname: *hostname_dashboard_public
+ os-internal-hostname: *hostname_dashboard_public
+ os-admin-hostname: *hostname_dashboard_public
{% endif %}
to:
{% for unit_id in to_select() %}
bindings:
"": *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
to:
- "lxd:nodes/0"
{# Empty block to avoid bad block trim #}
amqp: *internal-space
cluster: *internal-space
{% endif %}
+ series: {{ ubuntu.release }}
options:
source: *openstack-origin
{% if opnfv.storage_dict.ceph is defined %}
- [ 'nova-cloud-controller:quantum-network-service', 'neutron-gateway:quantum-network-service' ]
- [ 'ceilometer:amqp', 'rabbitmq-server:amqp' ]
- [ 'ceilometer-agent:ceilometer-service', 'ceilometer:ceilometer-service' ]
- - [ 'ceilometer:identity-service', 'keystone:identity-service' ]
- [ 'ceilometer:identity-notifications', 'keystone:identity-notifications' ]
+ - [ 'ceilometer:identity-service', 'keystone:identity-service' ]
+ - [ 'ceilometer:identity-credentials', 'keystone:identity-credentials' ]
- [ 'ceilometer-agent:nova-ceilometer', 'nova-compute:nova-ceilometer' ]
- - [ 'ceilometer:shared-db', 'mongodb:database' ]
+ - [ 'gnocchi:storage-ceph', 'ceph-mon:client' ]
+ - [ 'gnocchi:shared-db', 'mysql:shared-db' ]
+ - [ 'gnocchi:amqp', 'rabbitmq-server:amqp' ]
+ - [ 'gnocchi:coordinator-memcached', 'memcached:cache' ]
+ - [ 'gnocchi:metric-service', 'ceilometer:metric-service' ]
+ - [ 'gnocchi:identity-service', 'keystone:identity-service' ]
{% endif %}
- [ 'heat:shared-db', 'mysql:shared-db' ]
- [ 'heat:identity-service', 'keystone:identity-service' ]
- [ 'aodh:amqp', 'rabbitmq-server:amqp' ]
- [ 'aodh:shared-db', 'mysql:shared-db' ]
- [ 'aodh:identity-service', 'keystone:identity-service' ]
+ - [ designate, mysql ]
+ - [ designate, designate-bind ]
+ - [ designate, keystone ]
+ - [ designate, rabbitmq-server ]
+ - [ designate, memcached ]
{% if os.lxd %}
- [ 'nova-compute:lxd', 'lxd:lxd' ]
{% endif %}
scaleio-mdm:
charm: "./{{ ubuntu.release }}/scaleio-mdm"
num_units: {{ unit_scaleio_qty() }}
+ series: {{ ubuntu.release }}
options:
cluster-mode: 3
to:
scaleio-sds:
charm: "./{{ ubuntu.release }}/scaleio-sds"
num_units: {{ opnfv.units }}
+ series: {{ ubuntu.release }}
options:
protection-domain: 'pd1'
device-paths: {{ opnfv.storage_dict.scaleio.disk }}
{% endif %}
scaleio-sdc:
charm: "./{{ ubuntu.release }}/scaleio-sdc"
+ series: {{ ubuntu.release }}
num_units: {{ opnfv.units }}
to:
{% if os.hyperconverged %}
{% endif %}
scaleio-gw:
charm: "./{{ ubuntu.release }}/scaleio-gw"
+ series: {{ ubuntu.release }}
num_units: 1
to:
- "nodes/0"
scaleio-gui:
charm: "./{{ ubuntu.release }}/scaleio-gui"
num_units: 1
+ series: {{ ubuntu.release }}
to:
- "nodes/0"
{% endif %}
-{% if os.release == 'mitaka' %}
+{% if ubuntu.release == 'bionic' %}
openstack-origin: &openstack-origin distro
{% else %}
openstack-origin: &openstack-origin cloud:{{ ubuntu.release }}-{{ os.release }}
{% endif %}
osd-journal: &osd-journal
+{% if os.ha.mode == 'ha' %}
+ # Various VIPs
+ aodh-vip: &aodh-vip {{ opnfv.vip.aodh }}
+ ceilometer-vip: &ceilometer-vip {{ opnfv.vip.ceilometer }}
+ cinder-vip: &cinder-vip {{ opnfv.vip.cinder }}
+ dashboard-vip: &dashboard-vip {{ opnfv.vip.dashboard }}
+ designate-vip: &designate-vip {{ opnfv.vip.designate }}
+ glance-vip: &glance-vip {{ opnfv.vip.glance }}
+ gnocchi-vip: &gnocchi-vip {{ opnfv.vip.gnocchi }}
+ heat-vip: &heat-vip {{ opnfv.vip.heat }}
+ keystone-vip: &keystone-vip {{ opnfv.vip.keystone }}
+ mysql-vip: &mysql-vip {{ opnfv.vip.mysql }}
+ neutron-api-vip: &neutron-api-vip {{ opnfv.vip.neutron }}
+ nova-cc-vip: &nova-cc-vip {{ opnfv.vip.nova }}
+ rados-gateway-vip: &rados-gateway-vip {{ opnfv.vip.radosgw }}
+{% endif %}
+
+{% if os.api.ssl %}
+ ssl_ca: &ssl_ca include-base64://ssl/ca.crt
+ ssl_cert: &ssl_cert include-base64://ssl/maas.crt
+ ssl_key: &ssl_key include-base64://ssl/maas.key
+
+ hostname_aodh_public: &hostname_aodh_public aodh.maas
+ hostname_ceilometer_public: &hostname_ceilometer_public ceilometer.maas
+ hostname_cinder_public: &hostname_cinder_public cinder.maas
+ hostname_dashboard_public: &hostname_dashboard_public openstack-dashboard.maas
+ hostname_designate_public: &hostname_designate_public designate.maas
+ hostname_glance_public: &hostname_glance_public glance.maas
+ hostname_gnocchi_public: &hostname_gnocchi_public gnocchi.maas
+ hostname_heat_public: &hostname_heat_public heat.maas
+ hostname_keystone_public: &hostname_keystone_public keystone.maas
+ hostname_rabbitmq: &hostname_rabbitmq rabbitmq.maas
+ hostname_neutron_public: &hostname_neutron_public neutron-api.maas
+ hostname_nova_public: &hostname_nova_public nova-cloud-controller.maas
+ hostname_radosgw_public: &hostname_radosgw_public rados-gateway.maas
+{% endif %}
+
{% if os.network.controller != 'ocl' %}
ceilometer-agent:
charm: ./{{ ubuntu.release }}/ceilometer-agent
+ series: {{ ubuntu.release }}
options:
openstack-origin: *openstack-origin
{% endif %}
{% if opnfv.storage_dict.ceph is defined %}
cinder-ceph:
charm: ./{{ ubuntu.release }}/cinder-ceph
+ series: {{ ubuntu.release }}
options:
{% if opnfv.storage_dict.ceph is defined %}
ceph-osd-replication-count: {{ unit_ceph_qty() }}
{% elif os.network.controller == 'odl' %}
neutron-api-odl:
charm: ./{{ ubuntu.release }}/neutron-api-odl
+ series: {{ ubuntu.release }}
options:
overlay-network-type: 'vxlan'
security-groups: True
openvswitch-odl:
charm: ./{{ ubuntu.release }}/openvswitch-odl
+ series: {{ ubuntu.release }}
options:
{% if opnfv.ext_port is defined %}
provider_mappings: "physnet1:{{ opnfv.ext_port }}"
{% elif os.network.controller == 'onos' %}
neutron-api-onos:
charm: ./{{ ubuntu.release }}/neutron-api-onos
+ series: {{ ubuntu.release }}
options:
overlay-network-type: 'vxlan'
security-groups: True
openvswitch-onos:
charm: ./{{ ubuntu.release }}/openvswitch-onos
+ series: {{ ubuntu.release }}
options:
{% if opnfv.spaces_dict.data is defined %}
os-data-network: {{ opnfv.spaces_dict.data.cidr }}
{% if os.lxd %}
lxd:
charm: ./{{ ubuntu.release }}/lxd
+ series: {{ ubuntu.release }}
options:
{% if 'srv' not in opnfv.storage_dict.ceph.disk %}
block-devices: {{ opnfv.storage_dict.ceph.disk }}
{% if opnfv.storage_dict.scaleio is defined %}
scaleio-openstack:
charm: ./{{ ubuntu.release }}/scaleio-openstack
+ series: {{ ubuntu.release }}
options:
protection-domains: 'pd1'
{% endif %}
units: {{ lab.racks[0].nodes|count }}
vip:
{% if opnfv.spaces_dict.public is defined %}
+ aodh: {{ netpublic_prefix }}33 {{ net_prefix }}33
+ ceilometer: {{ netpublic_prefix }}24 {{ net_prefix }}24
+ cinder: {{ netpublic_prefix }}29 {{ net_prefix }}29
dashboard: {{ netpublic_prefix }}21 {{ net_prefix }}21
+ designate: {{ netpublic_prefix }}32 {{ net_prefix }}32
glance: {{ netpublic_prefix }}22 {{ net_prefix }}22
+ gnocchi: {{ netpublic_prefix }}31 {{ net_prefix }}31
+ heat: {{ netpublic_prefix }}28 {{ net_prefix }}28
keystone: {{ netpublic_prefix }}23 {{ net_prefix }}23
- ceilometer: {{ netpublic_prefix }}24 {{ net_prefix }}24
mysql: {{ net_prefix }}25
- nova: {{ netpublic_prefix }}26 {{ net_prefix }}26
neutron: {{ netpublic_prefix }}27 {{ net_prefix }}27
- heat: {{ netpublic_prefix }}28 {{ net_prefix }}28
- cinder: {{ netpublic_prefix }}29 {{ net_prefix }}29
+ nova: {{ netpublic_prefix }}26 {{ net_prefix }}26
radosgw: {{ netpublic_prefix }}30 {{ net_prefix }}30
{% else %}
+ aodh: {{ net_prefix }}33
+ ceilometer: {{ net_prefix }}24
+ cinder: {{ net_prefix }}29
dashboard: {{ net_prefix }}21
+ designate: {{ net_prefix }}32
glance: {{ net_prefix }}22
+ gnocchi: {{ net_prefix }}31
+ heat: {{ net_prefix }}28
keystone: {{ net_prefix }}23
- ceilometer: {{ net_prefix }}24
mysql: {{ net_prefix }}25
- nova: {{ net_prefix }}26
neutron: {{ net_prefix }}27
- heat: {{ net_prefix }}28
- cinder: {{ net_prefix }}29
+ nova: {{ net_prefix }}26
radosgw: {{ net_prefix }}30
{% endif %}
+
ubuntu:
release: xenial
os:
- release: pike
+ release: queens
git_repo:
origin_git: False
- branch: pike
+ branch: queens
hyperconverged: True
ha:
mode: ha
api:
worker_multiplier: 1.0
haproxy_timeout: 10000
+ ssl: False
admin:
role: admin
name: admin
ipv6: False
l2_population: False
beta:
- hacluster_ceph_radosgw: True
huge_pages: False
cpu_pin: False
cpu_pin_set: all
opnfvsdn=nosdn
opnfvtype=noha
-openstack=pike
+openstack=queens
opnfvlab=default
opnfvlabfile=
opnfvrel=e
maasinstall=0
usage() { echo "Usage: $0
- [-s|--sdn <nosdn|odl|opencontrail>]
+ [-s|--sdn <nosdn|odl|ocl>]
[-t|--type <noha|ha|tip>]
- [-o|--openstack <ocata|pike>]
+ [-o|--openstack <ocata|queens>]
[-l|--lab <default|custom>]
[-f|--feature <ipv6,dpdk,lxd,dvr,openbaton,multus>]
[-d|--distro <xenial>]
-d|--distro )
if ([ "arguments[index]" != "" ]); then
- opnfdistro=${arguments[index]}
+ opnfvdistro=${arguments[index]}
fi;
;;
fi
#bootstrap the node
- ./01-bootstrap.sh
+ ./01-bootstrap.sh $opnfvdistro
juju model-config default-series=$opnfvdistro enable-os-refresh-update=false enable-os-upgrade=false
+ juju set-model-constraints tags=
# case default deploy the opnfv platform:
./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro $opnfvmodel
# Configuring deployment
if ([ $opnfvmodel == "openstack" ]); then
- if ([ $opnfvsdn == "ocl" ]); then
- echo_info "Patching OpenContrail controller container"
- juju run --application contrail-controller sudo docker cp contrail-controller:/etc/contrail/vnc_api_lib.ini /tmp
- juju run --application contrail-controller cp /tmp/vnc_api_lib.ini /tmp/vnc_api_lib.ini2
- juju run --application contrail-controller 'echo "AUTHN_DOMAIN = admin_domain" >> /tmp/vnc_api_lib.ini2'
- juju run --application contrail-controller sudo docker cp /tmp/vnc_api_lib.ini2 contrail-controller:/etc/contrail/vnc_api_lib.ini
- juju run --application contrail-controller sudo docker exec contrail-controller service contrail-api restart
-
- juju run --application contrail-controller sudo docker cp /tmp/vnc_api_lib.ini2 contrail-analytics:/etc/contrail/vnc_api_lib.ini
- echo_info "Wait for OpenContrail components to stabilize"
- sleep 600
- fi
echo_info "Configuring OpenStack deployment"
# creating heat domain after pushing the public API into /etc/hosts
status=`juju run-action heat/0 domain-setup`
echo $status
+ if ([ $opnfvsdn != "ocl" ]) then
+ status=`juju run-action ceilometer/0 ceilometer-upgrade`
+ fi
+ echo $status
if ([ $opnftype == "ha" ]); then
status=`juju run-action heat/1 domain-setup`
echo $status
+ if ([ $opnfvsdn != "ocl" ]) then
+ status=`juju run-action ceilometer/1 ceilometer-upgrade`
+ fi
+ echo $status
status=`juju run-action heat/2 domain-setup`
echo $status
+ if ([ $opnfvsdn != "ocl" ]) then
+ status=`juju run-action ceilometer/2 ceilometer-upgrade`
+ fi
+ echo $status
fi
sudo ../juju/get-cloud-images || true
if 'hugepages' in extra:
config['os']['beta']['huge_pages'] = True
config['os']['beta']['cpu_pin'] = True
-if 'mitaka' in extra:
- config['os']['release'] = 'mitaka'
-if 'trusty' in extra:
- config['ubuntu']['release'] = 'trusty'
- if 'liberty' in extra:
- config['os']['release'] = 'liberty'
+if 'pike' in extra:
+ config['os']['release'] = 'pike'
if 'xenial' in extra:
config['ubuntu']['release'] = 'xenial'
+if 'bionic' in extra:
+ config['ubuntu']['release'] = 'bionic'
+ if 'pike' in extra:
+ config['os']['release'] = 'queens'
if 'dishypcon' in extra:
config['os']['hyperconverged'] = False
if 'openbaton' in features:
opnfvfeature=$1
+juju scp kubernetes-master/0:/home/ubuntu/config ~/joid_config/config
+
+export KUBE_MASTER_IP=`juju status kubernetes-master --format=yaml | grep public-address | cut -d ":" -f 2 | head -1 | sed "s/^[ \t]*//g"`
+export KUBE_MASTER_URL=http://${KUBE_MASTER_IP}:6443
+
+configk8(){
+cat <<-EOF
+export KUBERNETES_PROVIDER=local
+export KUBE_MASTER_IP=${KUBE_MASTER_IP}
+export KUBE_MASTER_URL=${KUBE_MASTER_URL}
+EOF
+}
+
+configk8 > ~/joid_config/k8config
+
juju run-action kubernetes-worker/0 microbot replicas=3
juju config kubernetes-master enable-dashboard-addons=true || true
juju expose kubernetes-worker || true
# openstack
charm pull cs:ntp $distro/ntp
-git clone -b stable/17.11 https://github.com/openstack/charm-ceph-mon.git $distro/ceph-mon
-git clone -b stable/17.11 https://github.com/openstack/charm-ceph-osd.git $distro/ceph-osd
+git clone -b stable/18.02 https://github.com/openstack/charm-ceph-mon.git $distro/ceph-mon
+git clone -b stable/18.02 https://github.com/openstack/charm-ceph-osd.git $distro/ceph-osd
--- /dev/null
+<network>
+ <name>default</name>
+ <forward mode='bridge'/>
+ <bridge name='brAdm'/>
+</network>
# openstack
bzr branch lp:~narindergupta/charms/trusty/promise/trunk $distro/promise
-git clone -b stable/17.11 https://github.com/openstack/charm-hacluster.git $distro/hacluster
-git clone -b stable/17.11 https://github.com/openstack/charm-ceilometer.git $distro/ceilometer
-git clone -b stable/17.11 https://github.com/openstack/charm-ceilometer-agent.git $distro/ceilometer-agent
-git clone -b stable/17.11 https://github.com/openstack/charm-ceph-mon.git $distro/ceph-mon
-git clone -b stable/17.11 https://github.com/openstack/charm-ceph-osd.git $distro/ceph-osd
-git clone -b stable/17.11 https://github.com/openstack/charm-ceph-radosgw.git $distro/ceph-radosgw
-git clone -b stable/17.11 https://github.com/openstack/charm-cinder.git $distro/cinder
-git clone -b stable/17.11 https://github.com/openstack/charm-cinder-ceph.git $distro/cinder-ceph
-git clone -b stable/17.11 https://github.com/openstack/charm-glance.git $distro/glance
-git clone -b stable/17.11 https://github.com/openstack/charm-keystone.git $distro/keystone
-git clone -b stable/17.11 https://github.com/openstack/charm-percona-cluster.git $distro/percona-cluster
-git clone -b stable/17.11 https://github.com/openstack/charm-neutron-api.git $distro/neutron-api
-git clone -b stable/17.11 https://github.com/openstack/charm-neutron-gateway.git $distro/neutron-gateway
-git clone -b stable/17.11 https://github.com/openstack/charm-neutron-openvswitch.git $distro/neutron-openvswitch
-git clone -b stable/17.11 https://github.com/openstack/charm-nova-cloud-controller.git $distro/nova-cloud-controller
-git clone -b stable/17.11 https://github.com/openstack/charm-nova-compute.git $distro/nova-compute
-git clone -b stable/17.11 https://github.com/openstack/charm-openstack-dashboard.git $distro/openstack-dashboard
-git clone -b stable/17.11 https://github.com/openstack/charm-rabbitmq-server.git $distro/rabbitmq-server
-git clone -b stable/17.11 https://github.com/openstack/charm-heat.git $distro/heat
-git clone -b stable/17.11 https://github.com/openstack/charm-lxd.git $distro/lxd
+git clone https://github.com/openstack/charm-hacluster.git $distro/hacluster
+git clone https://github.com/openstack/charm-ceilometer.git $distro/ceilometer
+git clone https://github.com/openstack/charm-ceilometer-agent.git $distro/ceilometer-agent
+git clone https://github.com/openstack/charm-ceph-mon.git $distro/ceph-mon
+git clone https://github.com/openstack/charm-ceph-osd.git $distro/ceph-osd
+git clone https://github.com/openstack/charm-ceph-radosgw.git $distro/ceph-radosgw
+git clone https://github.com/openstack/charm-cinder.git $distro/cinder
+git clone https://github.com/openstack/charm-cinder-ceph.git $distro/cinder-ceph
+git clone https://github.com/openstack/charm-glance.git $distro/glance
+git clone https://github.com/openstack/charm-keystone.git $distro/keystone
+git clone https://github.com/openstack/charm-percona-cluster.git $distro/percona-cluster
+git clone https://github.com/openstack/charm-neutron-api.git $distro/neutron-api
+git clone https://github.com/openstack/charm-neutron-gateway.git $distro/neutron-gateway
+git clone https://github.com/openstack/charm-neutron-openvswitch.git $distro/neutron-openvswitch
+git clone https://github.com/openstack/charm-nova-cloud-controller.git $distro/nova-cloud-controller
+git clone https://github.com/openstack/charm-nova-compute.git $distro/nova-compute
+git clone https://github.com/openstack/charm-openstack-dashboard.git $distro/openstack-dashboard
+git clone https://github.com/openstack/charm-rabbitmq-server.git $distro/rabbitmq-server
+git clone https://github.com/openstack/charm-heat.git $distro/heat
+git clone https://github.com/openstack/charm-lxd.git $distro/lxd
git clone https://github.com/openbaton/juju-charm.git $distro/openbaton
+charm pull cs:designate $distro/designate
+charm pull cs:designate-bind $distro/designate-bind
+charm pull cs:memcached $distro/memcached
+charm pull cs:gnocchi $distro/gnocchi
charm pull cs:$distro/aodh $distro/aodh
-charm pull cs:$distro/mongodb $distro/mongodb
charm pull cs:ntp $distro/ntp
charm pull cs:$distro/haproxy $distro/haproxy
charm pull cs:~narindergupta/congress-1 $distro/congress
# openstack
bzr branch lp:~narindergupta/charms/trusty/promise/trunk $distro/promise
-git clone -b stable/17.11 https://github.com/openstack/charm-hacluster.git $distro/hacluster
-git clone -b stable/17.11 https://github.com/openstack/charm-ceilometer.git $distro/ceilometer
-git clone -b stable/17.11 https://github.com/openstack/charm-ceilometer-agent.git $distro/ceilometer-agent
-git clone -b stable/17.11 https://github.com/openstack/charm-ceph-mon.git $distro/ceph-mon
-git clone -b stable/17.11 https://github.com/openstack/charm-ceph-osd.git $distro/ceph-osd
-git clone -b stable/17.11 https://github.com/openstack/charm-ceph-radosgw.git $distro/ceph-radosgw
-git clone -b stable/17.11 https://github.com/openstack/charm-cinder.git $distro/cinder
-git clone -b stable/17.11 https://github.com/openstack/charm-cinder-ceph.git $distro/cinder-ceph
-git clone -b stable/17.11 https://github.com/openstack/charm-glance.git $distro/glance
-git clone -b stable/17.11 https://github.com/openstack/charm-keystone.git $distro/keystone
-git clone -b stable/17.11 https://github.com/openstack/charm-percona-cluster.git $distro/percona-cluster
-git clone -b stable/17.11 https://github.com/openstack/charm-neutron-api.git $distro/neutron-api
-git clone -b stable/17.11 https://github.com/openstack/charm-neutron-gateway.git $distro/neutron-gateway
-git clone -b stable/17.11 https://github.com/openstack/charm-neutron-openvswitch.git $distro/neutron-openvswitch
-git clone -b stable/17.11 https://github.com/openstack/charm-nova-cloud-controller.git $distro/nova-cloud-controller
-git clone -b stable/17.11 https://github.com/openstack/charm-nova-compute.git $distro/nova-compute
-git clone -b stable/17.11 https://github.com/openstack/charm-openstack-dashboard.git $distro/openstack-dashboard
-git clone -b stable/17.11 https://github.com/openstack/charm-rabbitmq-server.git $distro/rabbitmq-server
-git clone -b stable/17.11 https://github.com/openstack/charm-heat.git $distro/heat
-git clone -b stable/17.11 https://github.com/openstack/charm-lxd.git $distro/lxd
-git clone -b stable/17.11 https://github.com/openstack/charm-odl-controller.git $distro/odl-controller
-git clone -b stable/17.11 https://github.com/openstack/charm-neutron-api-odl.git $distro/neutron-api-odl
-git clone -b stable/17.11 https://github.com/openstack/charm-openvswitch-odl.git $distro/openvswitch-odl
+git clone -b stable/18.02 https://github.com/openstack/charm-hacluster.git $distro/hacluster
+git clone -b stable/18.02 https://github.com/openstack/charm-ceilometer.git $distro/ceilometer
+git clone -b stable/18.02 https://github.com/openstack/charm-ceilometer-agent.git $distro/ceilometer-agent
+git clone -b stable/18.02 https://github.com/openstack/charm-ceph-mon.git $distro/ceph-mon
+git clone -b stable/18.02 https://github.com/openstack/charm-ceph-osd.git $distro/ceph-osd
+git clone -b stable/18.02 https://github.com/openstack/charm-ceph-radosgw.git $distro/ceph-radosgw
+git clone -b stable/18.02 https://github.com/openstack/charm-cinder.git $distro/cinder
+git clone -b stable/18.02 https://github.com/openstack/charm-cinder-ceph.git $distro/cinder-ceph
+git clone -b stable/18.02 https://github.com/openstack/charm-glance.git $distro/glance
+git clone -b stable/18.02 https://github.com/openstack/charm-keystone.git $distro/keystone
+git clone -b stable/18.02 https://github.com/openstack/charm-percona-cluster.git $distro/percona-cluster
+git clone -b stable/18.02 https://github.com/openstack/charm-neutron-api.git $distro/neutron-api
+git clone -b stable/18.02 https://github.com/openstack/charm-neutron-gateway.git $distro/neutron-gateway
+git clone -b stable/18.02 https://github.com/openstack/charm-neutron-openvswitch.git $distro/neutron-openvswitch
+git clone -b stable/18.02 https://github.com/openstack/charm-nova-cloud-controller.git $distro/nova-cloud-controller
+git clone -b stable/18.02 https://github.com/openstack/charm-nova-compute.git $distro/nova-compute
+git clone -b stable/18.02 https://github.com/openstack/charm-openstack-dashboard.git $distro/openstack-dashboard
+git clone -b stable/18.02 https://github.com/openstack/charm-rabbitmq-server.git $distro/rabbitmq-server
+git clone -b stable/18.02 https://github.com/openstack/charm-heat.git $distro/heat
+git clone -b stable/18.02 https://github.com/openstack/charm-lxd.git $distro/lxd
+git clone -b stable/18.02 https://github.com/openstack/charm-odl-controller.git $distro/odl-controller
+git clone -b stable/18.02 https://github.com/openstack/charm-neutron-api-odl.git $distro/neutron-api-odl
+git clone -b stable/18.02 https://github.com/openstack/charm-openvswitch-odl.git $distro/openvswitch-odl
git clone https://github.com/openbaton/juju-charm.git $distro/openbaton
charm pull cs:$distro/aodh $distro/aodh
# openstack
bzr branch lp:~narindergupta/charms/trusty/promise/trunk $distro/promise
-git clone -b stable/17.08 https://github.com/openstack/charm-hacluster.git $distro/hacluster
-git clone -b stable/17.08 https://github.com/openstack/charm-ceilometer.git $distro/ceilometer
-git clone -b stable/17.08 https://github.com/openstack/charm-ceilometer-agent.git $distro/ceilometer-agent
-git clone -b stable/17.08 https://github.com/openstack/charm-ceph-mon.git $distro/ceph-mon
-git clone -b stable/17.08 https://github.com/openstack/charm-ceph-osd.git $distro/ceph-osd
-git clone -b stable/17.08 https://github.com/openstack/charm-ceph-radosgw.git $distro/ceph-radosgw
-git clone -b stable/17.08 https://github.com/openstack/charm-cinder.git $distro/cinder
-git clone -b stable/17.08 https://github.com/openstack/charm-cinder-ceph.git $distro/cinder-ceph
-git clone -b stable/17.08 https://github.com/openstack/charm-glance.git $distro/glance
-git clone -b stable/17.08 https://github.com/openstack/charm-keystone.git $distro/keystone
-git clone -b stable/17.08 https://github.com/openstack/charm-percona-cluster.git $distro/percona-cluster
-git clone -b stable/17.08 https://github.com/openstack/charm-neutron-api.git $distro/neutron-api
-git clone -b stable/17.08 https://github.com/openstack/charm-neutron-gateway.git $distro/neutron-gateway
-git clone -b stable/17.08 https://github.com/openstack/charm-neutron-openvswitch.git $distro/neutron-openvswitch
-git clone -b stable/17.08 https://github.com/openstack/charm-nova-cloud-controller.git $distro/nova-cloud-controller
-git clone -b stable/17.08 https://github.com/openstack/charm-nova-compute.git $distro/nova-compute
-git clone -b stable/17.08 https://github.com/openstack/charm-openstack-dashboard.git $distro/openstack-dashboard
-git clone -b stable/17.08 https://github.com/openstack/charm-rabbitmq-server.git $distro/rabbitmq-server
-git clone -b stable/17.08 https://github.com/openstack/charm-heat.git $distro/heat
-git clone -b stable/17.08 https://github.com/openstack/charm-lxd.git $distro/lxd
+git clone -b stable/18.02 https://github.com/openstack/charm-hacluster.git $distro/hacluster
+git clone -b stable/18.02 https://github.com/openstack/charm-ceilometer.git $distro/ceilometer
+git clone -b stable/18.02 https://github.com/openstack/charm-ceilometer-agent.git $distro/ceilometer-agent
+git clone -b stable/18.02 https://github.com/openstack/charm-ceph-mon.git $distro/ceph-mon
+git clone -b stable/18.02 https://github.com/openstack/charm-ceph-osd.git $distro/ceph-osd
+git clone -b stable/18.02 https://github.com/openstack/charm-ceph-radosgw.git $distro/ceph-radosgw
+git clone -b stable/18.02 https://github.com/openstack/charm-cinder.git $distro/cinder
+git clone -b stable/18.02 https://github.com/openstack/charm-cinder-ceph.git $distro/cinder-ceph
+git clone -b stable/18.02 https://github.com/openstack/charm-glance.git $distro/glance
+git clone -b stable/18.02 https://github.com/openstack/charm-keystone.git $distro/keystone
+git clone -b stable/18.02 https://github.com/openstack/charm-percona-cluster.git $distro/percona-cluster
+git clone -b stable/18.02 https://github.com/openstack/charm-neutron-api.git $distro/neutron-api
+git clone -b stable/18.02 https://github.com/openstack/charm-neutron-gateway.git $distro/neutron-gateway
+git clone -b stable/18.02 https://github.com/openstack/charm-neutron-openvswitch.git $distro/neutron-openvswitch
+git clone -b stable/18.02 https://github.com/openstack/charm-nova-cloud-controller.git $distro/nova-cloud-controller
+git clone -b stable/18.02 https://github.com/openstack/charm-nova-compute.git $distro/nova-compute
+git clone -b stable/18.02 https://github.com/openstack/charm-openstack-dashboard.git $distro/openstack-dashboard
+git clone -b stable/18.02 https://github.com/openstack/charm-rabbitmq-server.git $distro/rabbitmq-server
+git clone -b stable/18.02 https://github.com/openstack/charm-heat.git $distro/heat
+git clone -b stable/18.02 https://github.com/openstack/charm-lxd.git $distro/lxd
git clone https://github.com/openbaton/juju-charm.git $distro/openbaton
+charm pull cs:memcached $distro/memcached
+charm pull cs:gnocchi $distro/gnocchi
charm pull cs:$distro/mongodb $distro/mongodb
charm pull cs:ntp $distro/ntp
charm pull cs:$distro/aodh $distro/aodh
if [ $(juju status keystone --format=short | grep " keystone"|wc -l) == 1 ];then
unitAddress keystone 0
else
- juju config keystone | python -c "import yaml; import sys; print yaml.load(sys.stdin)['settings']['vip']['value']" | cut -d " " -f 1
+ juju config keystone vip | cut -d " " -f 1
fi
}
echo_info "Creating the openrc (OpenStack client environment scripts)"
mkdir -m 0700 -p cloud
- keystoneIp=$(keystoneIp)
- adminPasswd=$(juju config keystone | python -c "import yaml; import sys; print yaml.load(sys.stdin)['settings']['admin-password']['value']" | cut -d " " -f 1)
+ usessl=$(juju config keystone ssl_ca)
+ if [[ "$usessl" == "" ]]; then
+ usessl=no
+ else
+ usessl=yes
+ fi
+ keystoneIp=$(juju config keystone os-public-hostname | cut -d " " -f 1)
+ if [[ "$keystoneIp" == "" ]]; then
+ keystoneIp=$(keystoneIp)
+ fi
+ adminPasswd=$(juju config keystone admin-password | cut -d " " -f 1)
- v3api=`juju config keystone preferred-api-version`
+ v3api=$(juju config keystone preferred-api-version)
if [[ "$v3api" == "3" ]]; then
- configOpenrc admin $adminPasswd admin http://$keystoneIp:5000/v3 RegionOne publicURL > ~/joid_config/admin-openrc
+ if [ "$usessl" == "yes" ]; then
+ configOpenrc admin $adminPasswd admin https://$keystoneIp:5000/v3 RegionOne publicURL > ~/joid_config/admin-openrc
+ else
+ configOpenrc admin $adminPasswd admin http://$keystoneIp:5000/v3 RegionOne publicURL > ~/joid_config/admin-openrc
+ fi
chmod 0600 ~/joid_config/admin-openrc
source ~/joid_config/admin-openrc
projectid=`openstack project show admin -c id -f value`
}
configOpenrc() {
+if [ "$usessl" == "yes" ]; then
cat <<-EOF
#export OS_NO_CACHE='true'
export OS_AUTH_URL=$4
export OS_IDENTITY_API_VERSION=3
export OS_REGION_NAME=$5
export OS_INTERFACE=public
-#export OS_INSECURE=true
-#export OS_CASSL=~/joid_config/ca.pem
+export OS_CACERT=~/joid_config/keystone_juju_ca_cert.crt
EOF
+else
+cat <<-EOF
+#export OS_NO_CACHE='true'
+export OS_AUTH_URL=$4
+export OS_USER_DOMAIN_NAME=admin_domain
+export OS_PROJECT_DOMAIN_NAME=admin_domain
+export OS_USERNAME=$1
+export OS_TENANT_NAME=$3
+export OS_PROJECT_NAME=$3
+export OS_PASSWORD=$2
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=$5
+export OS_INTERFACE=public
+#export OS_CACERT=~/joid_config/bradm.etsi-ubuntu-jh.maas.pem
+EOF
+fi
}
+
+if [ "$usessl" == "yes" ]; then
+ juju scp keystone/0:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt ~/joid_config/
+fi
+
# Create an load openrc
create_openrc
Scenario 1: Nosdn
-----------------
-*./deploy.sh -o ocata -s nosdn -t ha -l custom -f none -d xenial -m openstack*
+*./deploy.sh -o pike -s nosdn -t ha -l custom -f none -d xenial -m openstack*
Scenario 2: Kubernetes core
---------------------------
Scenario 5: Openstack with Opencontrail
---------------------------------------
-*./deploy.sh -o ocata -s ocl -t ha -l custom -f none -d xenial -m openstack*
+*./deploy.sh -o pike -s ocl -t ha -l custom -f none -d xenial -m openstack*
+
+Scenario 6: Kubernetes Load Balancer with Canal CNI
+---------------------------------------------------
+
+*./deploy.sh -s canal -l custom -f lb -m kubernetes*
+
+Scenario 7: Kubernetes Load Balancer with Ceph
+----------------------------------------------
+
+*./deploy.sh -l custom -f lb,ceph -m kubernetes*
release: d
distro: xenial
type: noha
- openstack: newton
+ openstack: pike
sdncontroller:
- type: nosdn
storage:
OPNFV Install
-------------
-| ``Â Â Â ./deploy.sh -o newton -s nosdn -t noha -l custom -f none -d xenial -m openstack``
+| ``Â Â Â ./deploy.sh -o pike -s nosdn -t noha -l custom -f none -d xenial -m openstack``
| ``Â Â Â ``
-./deploy.sh -o newton -s nosdn -t noha -l custom -f none -d xenial -m openstack
+./deploy.sh -o pike -s nosdn -t noha -l custom -f none -d xenial -m openstack
NOTE: Possible options are as follows:
ha: HA mode of openstack.
Wihch version of Openstack deployed.
- [-o|--openstack <ocata|newton>]
+ [-o|--openstack <pike|ocata>]
+ pike: Pike version of openstack.
Ocata: Ocata version of openstack.
- Newton: Newton version of openstack.
Where to deploy
[-l|--lab <custom | default>] etc...
openstack.sh under joid/ci used to configure the openstack after deployment
-./openstack.sh <nosdn> custom xenial newton
+./openstack.sh <nosdn> custom xenial pike
Below commands are used to setup domain in heat.
juju run-action heat/0 domain-setup
Abstract
========
-This document will explain how to install the Euphrates release of OPNFV with
+This document will explain how to install the Fraser release of OPNFV with
JOID including installing JOID, configuring JOID for your environment, and
deploying OPNFV with different SDN solutions in HA, or non-HA mode.
::
- git clone -b stable/danube https://gerrit.opnfv.org/gerrit/p/joid.git
+ git clone -b stable/fraser https://gerrit.opnfv.org/gerrit/p/joid.git
Create a directory in ``joid/labconfig/<company_name>/<pod_number>/`` and
create or copy a ``labconfig.yaml`` configuration file to that directory.
release: d
distro: xenial
type: noha
- openstack: ocata
+ openstack: pike
sdncontroller:
- type: nosdn
storage:
second of two JOID deployment steps.
JOID allows you to deploy different combinations of OpenStack and SDN solutions
-in HA or no-HA mode. For OpenStack, it supports Newton and Ocata. For SDN, it
+in HA or no-HA mode. For OpenStack, it supports Pike and Ocata. For SDN, it
supports Open vSwitch, OpenContrail, OpenDaylight and ONOS (Open Network
Operating System). In addition to HA or no-HA mode, it also supports deploying
the latest from the development tree (tip).
::
# in joid/ci directory
- ./deploy.sh -d xenial -m openstack -o ocata -s nosdn -f none -t noha -l custom
+ ./deploy.sh -d xenial -m openstack -o pike -s nosdn -f none -t noha -l custom
The above command starts an OPNFV deployment with Ubuntu Xenial (16.04) distro,
-OpenStack model, Ocata version of OpenStack, Open vSwitch (and no other SDN),
+OpenStack model, Pike version of OpenStack, Open vSwitch (and no other SDN),
no special features, no-HA OpenStack mode and with custom labconfig. I.e. this
corresponds to the ``os-nosdn-nofeature-noha`` OPNFV deployment scenario.
**Version of Openstack deployed**
::
- [-o <newton|mitaka>]
+ [-o <pike|ocata>]
- - ``newton``: Newton version of OpenStack.
+ - ``pike``: Pike version of OpenStack.
- ``ocata``: Ocata version of OpenStack.
**SDN controller**
::
- [-s <nosdn|odl|opencontrail|onos>]
+ [-s <nosdn|odl|opencontrail|onos|canal>]
- ``nosdn``: Open vSwitch only and no other SDN.
- ``odl``: OpenDayLight Boron version.
- ``opencontrail``: OpenContrail SDN.
- ``onos``: ONOS framework as SDN.
+ - ``cana;``: canal CNI plugin for kubernetes.
**Feature to deploy** (comma separated list)
::
- ``dpdk``: Will enable DPDK feature.
- ``sfc``: Will enable sfc feature only supported with ONOS deployment.
- ``lb``: Load balancing in case of Kubernetes will be enabled.
+ - ``ceph``: Ceph storage Kubernetes will be enabled.
**Mode of Openstack deployed**
::
::
- ./deploy.sh -o newton -s nosdn -t ha -l custom -f none
+ ./deploy.sh -o pike -s nosdn -t ha -l custom -f none
- If you have not setup MAAS with ``03-maasdeploy.sh`` then the
``./clean.sh`` command could hang, the ``juju status`` command may hang
-------------
JOID as *Juju OPNFV Infrastructure Deployer* allows you to deploy different
combinations of OpenStack release and SDN solution in HA or non-HA mode. For
-OpenStack, JOID currently supports Newton and Ocata. For SDN, it supports
+OpenStack, JOID currently supports Ocata and Pike. For SDN, it supports
Openvswitch, OpenContrail, OpenDayLight, and ONOS. In addition to HA or non-HA
mode, it also supports deploying from the latest development tree.
::
- ./openstack.sh <nosdn> custom xenial newton
+ ./openstack.sh <nosdn> custom xenial pike
Below commands are used to setup domain in heat.
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+/. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
Abstract
========
-This document compiles the release notes for the Euphrates release of
+This document compiles the release notes for the Fraser release of
OPNFV when using JOID as a deployment tool.
Introduction
============
These notes provides release information for the use of joid as deployment
-tool for the Euphrates release of OPNFV.
+tool for the Fraser release of OPNFV.
-The goal of the Euphrates release and this JOID based deployment process is
+The goal of the Fraser release and this JOID based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure.
Summary
=======
-The Euphrates release with the JOID deployment toolchain will establish an OPNFV target system on a Pharos compliant lab infrastructure.
-The current definition of an OPNFV target system is and OpenStack Newton combined with OpenDaylight Boron.
+The Fraser release with the JOID deployment toolchain will establish an OPNFV target system on a Pharos compliant lab infrastructure.
+The current definition of an OPNFV target system is and OpenStack Pike combined with OpenDaylight Boron.
The system is deployed with OpenStack High Availability (HA) for most OpenStack services.
-Ceph storage is used as Cinder backend, and is the only supported storage for Euphrates. Ceph is setup as 3 OSDs and 3 Monitors, one radosgw.
+Ceph storage is used as Cinder backend, and is the only supported storage for Fraser. Ceph is setup as 3 OSDs and 3 Monitors, one radosgw.
User has following choices to make to do the deployment.
- - Openstack -- Newton
+ - Openstack -- Pike
- Type -- HA, nonHA, tip (stable git branch of respective openstack)
- - SDN controller -- OpenDaylight, nosdn(Openvswitch), Onos, OpenContrail
- - Feature -- IPV6, DVR(distributed virtual routing), SFC(service function chaining odl only), BGPVPN(odl only), LB(Load Balancer for Kubernetes)
+ - SDN controller -- nosdn(Openvswitch), Onos, OpenContrail, Canal(k8), OVN (K8)
+ - Feature -- IPV6, DVR(distributed virtual routing), ceph(Kubernetes storage), LB(Load Balancer for Kubernetes)
- Distro -- Xenial
- Model -- Openstack, Kubernetes
| | |
+--------------------------------------+--------------------------------------+
| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
-| | stable/euphrates |
+| | opnfv-6.0.0 |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Euphrates release |
+| **Release designation** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | December 15 2017 |
+| **Release date** | April 27 2018 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Euphrates release |
+| **Purpose of the delivery** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
Module version change
~~~~~~~~~~~~~~~~~~~~~
- - Euphrates release with the JOID deployment toolchain.
- - OpenStack (Ocata release)
+ - Fraser release with the JOID deployment toolchain.
+ - OpenStack (Pike release)
- Kubernetes 1.8
- Ubuntu 16.04 LTS
Document version change
~~~~~~~~~~~~~~~~~~~~~~~
-- OPNFV Installation instructions for the Euphrates release using JOID deployment
+- OPNFV Installation instructions for the Fraser release using JOID deployment
toolchain - ver. 1.0.0
- OPNFV Release Notes with the JOID deployment toolchain - ver. 1.0.0 (this document)
+--------------------------------------+--------------------------------------+
| JIRA: JOID-106 | Kubernetes on Baremetal |
+--------------------------------------+--------------------------------------+
-| JIRA: JOID-102 | Enable OpenStack Ocata |
+| JIRA: | Enable OpenStack Pike |
+--------------------------------------+--------------------------------------+
| | Enable OpenContrail |
+--------------------------------------+--------------------------------------+
Scenario Releases
=================
Name: joid-os-nosdn-nofeature-ha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-nofeature-ha-baremetal-daily-euphrates/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-nofeature-ha-baremetal-daily-fraser/
Notes:
Name: joid-os-nosdn-lxd-ha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-euphrates/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-fraser/
Notes:
Name: joid-os-nosdn-lxd-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-euphrates/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-fraser/
Notes:
Name: joid-os-nosdn-nofeature-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-nofeature-noha-baremetal-daily-euphrates/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-nofeature-noha-baremetal-daily-fraser/
Notes:
Name: joid-k8-nosdn-lb-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-nosdn-lb-noha-baremetal-daily-euphrates/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-nosdn-lb-noha-baremetal-daily-fraser/
Notes:
Name: joid-k8-ovn-lb-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-ovn-lb-noha-baremetal-daily-euphrates/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-ovn-lb-noha-baremetal-daily-fraser/
Notes:
Name: joid-os-ocl-nofeature-ha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-ocl-nofeature-ha-baremetal-daily-euphrates/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-ocl-nofeature-ha-baremetal-daily-fraser/
Notes:
Name: joid-os-ocl-nofeature-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-ocl-nofeature-noha-baremetal-daily-euphrates/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-ocl-nofeature-noha-baremetal-daily-fraser/
+Notes:
+
+Name: joid-k8-canal-lb-noha
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-canal-lb-noha-baremetal-daily-fraser/
+Notes:
+
+Name: joid-k8-nosdn-lb_ceph-noha
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-nosdn-lb_ceph-noha-baremetal-daily-fraser/
Notes:
References
==========
-For more information on the OPNFV Euphrates release, please visit
-- `OPNFV Euphrates release <http://www.opnfv.org/euphrates>`_
+For more information on the OPNFV Fraser release, please visit
+- `OPNFV Fraser release <http://www.opnfv.org/fraser>`_
Juju
----
JOID
----
- `OPNFV JOID wiki <https://wiki.opnfv.org/joid>`_
-- `OPNFV Release Notes <http://docs.opnfv.org/en/stable-danube/submodules/joid/docs/release/release-notes/release-notes.html>`_
+- `OPNFV Release Notes <http://docs.opnfv.org/en/stable-fraser/submodules/joid/docs/release/release-notes/release-notes.html>`_
- `OPNFV JOID Install Guide <http://docs.opnfv.org/en/latest/submodules/joid/docs/release/installation/index.html>`_
OpenStack
---------
-- `OpenStack Newton Release artifacts <http://www.openstack.org/software/ocata>`_
+- `OpenStack Pike Release artifacts <http://www.openstack.org/software/pike>`_
- `OpenStack documentation <http://docs.openstack.org>`_
OpenDaylight
--- /dev/null
+.. _k8-canal-lb-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+=============================
+JOID Kubernetes Release Notes
+=============================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ release-notes.rst
+
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+
+Abstract
+========
+
+This document compiles the release notes for the Fraser release of
+OPNFV when using JOID as a deployment tool for Kubernetes with cancal CNI.
+
+Introduction
+============
+
+These notes provides release information for the use of joid as deployment
+tool for the Fraser release of OPNFV for Kubernetes scenario.
+
+The goal of the Fraser release and this JOID based deployment process is
+to establish a lab ready platform accelerating further development
+of the OPNFV infrastructure for docker based workloads.
+
+Carefully follow the installation-instructions which guides a user to deploy
+OPNFV using JOID which is based on MAAS and Juju.
+
+Summary
+=======
+
+Kubernetes is an open-source system for automating deployment, scaling, and
+management of containerized applications.
+
+This is a Kubernetes cluster that includes logging, monitoring, and operational
+knowledge. It is comprised of the following components and features:
+
+Kubernetes (automated deployment, operations, and scaling)
+ TLS used for communication between nodes for security.
+ A CNI plugin (e.g., Canal)
+ Optional Ingress Controller (on worker)
+ Optional Dashboard addon (on master) including Heapster for cluster monitoring
+
+EasyRSA
+ Performs the role of a certificate authority serving self signed certificates
+ to the requesting units of the cluster.
+
+Etcd (distributed key value store)
+ Minimum Three node cluster for reliability.
+
+Fraser release with the JOID deployment with Kubernetes will establish an
+OPNFV target system on a Pharos compliant lab infrastructure.
+
+NOTE: Detailed information on how to install in your lab can be find in installation guide
+command to deploy lxd feature is:
+
+#Kubernetes deployment
+./deploy.sh -m kubernetes -f lb -l custom -s canal
+
+Using Kubernetes after Deployment
+=================================
+
+Once you have finished installinf the JOID with Kubernetes you can use the
+following command to test the deployment.
+
+To deploy 5 replicas of the microbot web application inside the Kubernetes
+cluster run the following command:
+
+juju run-action kubernetes-worker/0 microbot replicas=5
+
+This action performs the following steps:
+
+It creates a deployment titled 'microbots' comprised of 5 replicas defined
+during the run of the action. It also creates a service named 'microbots'
+which binds an 'endpoint', using all 5 of the 'microbots' pods.
+Finally, it will create an ingress resource, which points at a
+xip.io domain to simulate a proper DNS service.
+
+Running the packaged example
+
+You can run a Juju action to create an example microbot web application:
+
+$ juju run-action kubernetes-worker/0 microbot replicas=3
+Action queued with id: db7cc72b-5f35-4a4d-877c-284c4b776eb8
+
+$ juju show-action-output db7cc72b-5f35-4a4d-877c-284c4b776eb8
+results:
+ address: microbot.104.198.77.197.xip.io
+status: completed
+timing:
+ completed: 2016-09-26 20:42:42 +0000 UTC
+ enqueued: 2016-09-26 20:42:39 +0000 UTC
+ started: 2016-09-26 20:42:41 +0000 UTC
+Note: Your FQDN will be different and contain the address of the cloud
+instance.
+At this point, you can inspect the cluster to observe the workload coming
+online.
+
+Mor einformation on using Canonical distribution of kubernetes can be found
+at https://jujucharms.com/canonical-kubernetes/
+
+Release Data
+============
+
++--------------------------------------+--------------------------------------+
+| **Project** | JOID |
+| | |
++--------------------------------------+--------------------------------------+
+| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
+| | opnfv-6.0.0 |
++--------------------------------------+--------------------------------------+
+| **Release designation** | Fraser release |
+| | |
++--------------------------------------+--------------------------------------+
+| **Release date** | April 27 2018 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Purpose of the delivery** | Fraser release |
+| | |
++--------------------------------------+--------------------------------------+
+
+Deliverables
+------------
+
+Software deliverables
+~~~~~~~~~~~~~~~~~~~~~
+`JOID based installer script files <https://gerrit.opnfv.org/gerrit/gitweb?p=joid.git>`_
+
+Known Limitations, Issues and Workarounds
+=========================================
+
+Known issues
+------------
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: | No support for yardstick and functest|
+| | for Kubernetes scenarios (OPNFV) |
++--------------------------------------+--------------------------------------+
+| JIRA: | |
++--------------------------------------+--------------------------------------+
+
+
+Scenario Releases
+=================
+Name: joid-k8-canal-lb-noha
+Test Link: https://build.opnfv.org/ci/user/narindergupta/my-views/view/joid/job/joid-k8-canal-lb-noha-baremetal-daily-fraser/
+Notes:
+
+References
+==========
+
+Juju
+----
+- `Juju Charm store <https://jujucharms.com/>`_
+- `Juju documents <https://jujucharms.com/docs/stable/getting-started>`_
+- `Canonical Distibuytion of Kubernetes <https://jujucharms.com/canonical-kubernetes/>`_
+
+MAAS
+----
+- `Bare metal management (Metal-As-A-Service) <http://maas.io/get-started>`_
+- `MAAS API documents <http://maas.ubuntu.com/docs/>`_
+
+JOID
+----
+- `OPNFV JOID wiki <https://wiki.opnfv.org/joid>`_
+- `OPNFV JOID Get Started <https://wiki.opnfv.org/display/joid/JOID+Get+Started>`_
+
+Kubernetes
+----------
+- `Kubernetes Release artifacts <https://get.k8s.io/>`_
+- `Kubernetes documentation <https://kubernetes.io/>`_
+
Abstract
========
-This document compiles the release notes for the Danube release of
+This document compiles the release notes for the Fraser release of
OPNFV when using JOID as a deployment tool for Kubernets and load balancer.
Introduction
============
These notes provides release information for the use of joid as deployment
-tool for the Danube release of OPNFV for Kubernets and load balancer
+tool for the Fraser release of OPNFV for Kubernets and load balancer
scenario.
-The goal of the Danube release and this JOID based deployment process is
+The goal of the Fraser release and this JOID based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure for docker based workloads.
Etcd (distributed key value store)
Minimum Three node cluster for reliability.
-Danube release with the JOID deployment with Kubernetes with load balancer will establish an
+Fraser release with the JOID deployment with Kubernetes with load balancer will establish an
OPNFV target system on a Pharos compliant lab infrastructure.
NOTE: Detailed information on how to install in your lab can be find in installation guide
| | |
+--------------------------------------+--------------------------------------+
| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
-| | stable/danube |
+| | opnfv-6.0.0 |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Danube release |
+| **Release designation** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | March 31 2017 |
+| **Release date** | April 27 2018 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Danube release |
+| **Purpose of the delivery** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
Scenario Releases
=================
Name: joid-k8-nosdn-lb-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-nosdn-lb-noha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-nosdn-lb-noha-baremetal-daily-fraser/
Notes:
References
--- /dev/null
+.. _k8-nosdn-lb_ceph-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+=============================
+JOID Kubernetes Release Notes
+=============================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ release-notes.rst
+
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+
+Abstract
+========
+
+This document compiles the release notes for the Fraser release of
+OPNFV when using JOID as a deployment tool for Kubernetes with cancal CNI.
+
+Introduction
+============
+
+These notes provides release information for the use of joid as deployment
+tool for the Fraser release of OPNFV for Kubernetes scenario.
+
+The goal of the Fraser release and this JOID based deployment process is
+to establish a lab ready platform accelerating further development
+of the OPNFV infrastructure for docker based workloads.
+
+Carefully follow the installation-instructions which guides a user to deploy
+OPNFV using JOID which is based on MAAS and Juju.
+
+Summary
+=======
+
+Kubernetes is an open-source system for automating deployment, scaling, and
+management of containerized applications.
+
+This is a Kubernetes cluster that includes logging, monitoring, and operational
+knowledge. It is comprised of the following components and features:
+
+Kubernetes (automated deployment, operations, and scaling)
+ TLS used for communication between nodes for security.
+ A CNI plugin (e.g., Canal)
+ Ceph based storage solution ( LXD container)
+ Optional Ingress Controller (on worker)
+ Optional Dashboard addon (on master) including Heapster for cluster monitoring
+
+EasyRSA
+ Performs the role of a certificate authority serving self signed certificates
+ to the requesting units of the cluster.
+
+Etcd (distributed key value store)
+ Minimum Three node cluster for reliability.
+
+Fraser release with the JOID deployment with Kubernetes will establish an
+OPNFV target system on a Pharos compliant lab infrastructure.
+
+NOTE: Detailed information on how to install in your lab can be find in installation guide
+command to deploy lxd feature is:
+
+#Kubernetes deployment
+./deploy.sh -m kubernetes -f lb,ceph -l custom
+
+Using Kubernetes after Deployment
+=================================
+
+Once you have finished installinf the JOID with Kubernetes you can use the
+following command to test the deployment.
+
+To deploy 5 replicas of the microbot web application inside the Kubernetes
+cluster run the following command:
+
+juju run-action kubernetes-worker/0 microbot replicas=5
+
+This action performs the following steps:
+
+It creates a deployment titled 'microbots' comprised of 5 replicas defined
+during the run of the action. It also creates a service named 'microbots'
+which binds an 'endpoint', using all 5 of the 'microbots' pods.
+Finally, it will create an ingress resource, which points at a
+xip.io domain to simulate a proper DNS service.
+
+Running the packaged example
+
+You can run a Juju action to create an example microbot web application:
+
+$ juju run-action kubernetes-worker/0 microbot replicas=3
+Action queued with id: db7cc72b-5f35-4a4d-877c-284c4b776eb8
+
+$ juju show-action-output db7cc72b-5f35-4a4d-877c-284c4b776eb8
+results:
+ address: microbot.104.198.77.197.xip.io
+status: completed
+timing:
+ completed: 2016-09-26 20:42:42 +0000 UTC
+ enqueued: 2016-09-26 20:42:39 +0000 UTC
+ started: 2016-09-26 20:42:41 +0000 UTC
+Note: Your FQDN will be different and contain the address of the cloud
+instance.
+At this point, you can inspect the cluster to observe the workload coming
+online.
+
+Mor einformation on using Canonical distribution of kubernetes can be found
+at https://jujucharms.com/canonical-kubernetes/
+
+Release Data
+============
+
++--------------------------------------+--------------------------------------+
+| **Project** | JOID |
+| | |
++--------------------------------------+--------------------------------------+
+| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
+| | opnfv-6.0.0 |
++--------------------------------------+--------------------------------------+
+| **Release designation** | Fraser release |
+| | |
++--------------------------------------+--------------------------------------+
+| **Release date** | April 27 2018 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Purpose of the delivery** | Fraser release |
+| | |
++--------------------------------------+--------------------------------------+
+
+Deliverables
+------------
+
+Software deliverables
+~~~~~~~~~~~~~~~~~~~~~
+`JOID based installer script files <https://gerrit.opnfv.org/gerrit/gitweb?p=joid.git>`_
+
+Known Limitations, Issues and Workarounds
+=========================================
+
+Known issues
+------------
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: | No support for yardstick and functest|
+| | for Kubernetes scenarios (OPNFV) |
++--------------------------------------+--------------------------------------+
+| JIRA: | |
++--------------------------------------+--------------------------------------+
+
+
+Scenario Releases
+=================
+Name: joid-k8-nosdn-lb_ceph-noha
+Test Link: https://build.opnfv.org/ci/user/narindergupta/my-views/view/joid/job/joid-k8-nosdn-lb_ceph-noha-baremetal-daily-fraser/
+Notes:
+
+References
+==========
+
+Juju
+----
+- `Juju Charm store <https://jujucharms.com/>`_
+- `Juju documents <https://jujucharms.com/docs/stable/getting-started>`_
+- `Canonical Distibuytion of Kubernetes <https://jujucharms.com/canonical-kubernetes/>`_
+
+MAAS
+----
+- `Bare metal management (Metal-As-A-Service) <http://maas.io/get-started>`_
+- `MAAS API documents <http://maas.ubuntu.com/docs/>`_
+
+JOID
+----
+- `OPNFV JOID wiki <https://wiki.opnfv.org/joid>`_
+- `OPNFV JOID Get Started <https://wiki.opnfv.org/display/joid/JOID+Get+Started>`_
+
+Kubernetes
+----------
+- `Kubernetes Release artifacts <https://get.k8s.io/>`_
+- `Kubernetes documentation <https://kubernetes.io/>`_
+
Abstract
========
-This document compiles the release notes for the Danube release of
+This document compiles the release notes for the Fraser release of
OPNFV when using JOID as a deployment tool for Kubernetes.
Introduction
============
These notes provides release information for the use of joid as deployment
-tool for the Danube release of OPNFV for Kubernetes scenario.
+tool for the Fraser release of OPNFV for Kubernetes scenario.
-The goal of the Danube release and this JOID based deployment process is
+The goal of the Fraser release and this JOID based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure for docker based workloads.
Etcd (distributed key value store)
Minimum Three node cluster for reliability.
-Danube release with the JOID deployment with Kubernetes will establish an
+Fraser release with the JOID deployment with Kubernetes will establish an
OPNFV target system on a Pharos compliant lab infrastructure.
NOTE: Detailed information on how to install in your lab can be find in installation guide
| | |
+--------------------------------------+--------------------------------------+
| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
-| | stable/danube |
+| | opnfv-6.0.0 |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Danube release |
+| **Release designation** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | March 31 2017 |
+| **Release date** | April 27 2018 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Danube release |
+| **Purpose of the delivery** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
Scenario Releases
=================
Name: joid-k8-nosdn-nofeature-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-nosdn-nofeature-noha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-nosdn-nofeature-noha-baremetal-daily-fraser/
Notes:
References
Abstract
========
-This document compiles the release notes for the Euphrates release of
+This document compiles the release notes for the Fraser release of
OPNFV when using JOID as a deployment tool for Kubernets and load balancer.
Introduction
============
These notes provides release information for the use of joid as deployment
-tool for the Euphrates release of OPNFV for Kubernets and load balancer
+tool for the Fraser release of OPNFV for Kubernets and load balancer
scenario.
-The goal of the Euphrates release and this JOID based deployment process is
+The goal of the Fraser release and this JOID based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure for docker based workloads.
Etcd (distributed key value store)
Minimum Three node cluster for reliability.
-Euphrates release with the JOID deployment with Kubernetes with load balancer will establish an
+Fraser release with the JOID deployment with Kubernetes with load balancer will establish an
OPNFV target system on a Pharos compliant lab infrastructure.
NOTE: Detailed information on how to install in your lab can be find in installation guide
| | |
+--------------------------------------+--------------------------------------+
| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
-| | stable/euphrates |
+| | opnfv-6.0.0 |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Euphrates release |
+| **Release designation** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | March 31 2017 |
+| **Release date** | April 27 2018 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Euphrates release |
+| **Purpose of the delivery** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
Scenario Releases
=================
Name: joid-k8-ovn-lb-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-ovn-lb-noha-baremetal-daily-euphrates/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-k8-ovn-lb-noha-baremetal-daily-fraser/
Notes:
References
Abstract
========
-This document compiles the release notes for the Danube release of
+This document compiles the release notes for the Fraser release of
OPNFV when using JOID as a deployment tool with LXD container hypervisor.
Introduction
============
These notes provides release information for the use of joid as deployment
-tool for the Danube release of OPNFV with LXD hypervisor for containers
+tool for the Fraser release of OPNFV with LXD hypervisor for containers
scenario.
-The goal of the Danube release and this JOID based deployment process is
+The goal of the Fraser release and this JOID based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure.
container. LXD uses the same container technology found in the Linux kernel
(cgroups, namespaces, LSM, etc).
-Danube release with the JOID deployment with LXD hypervisor will establish an
+Fraser release with the JOID deployment with LXD hypervisor will establish an
OPNFV target system on a Pharos compliant lab infrastructure.
-The current definition of an OPNFV target system is and OpenStack Newton combined
+The current definition of an OPNFV target system is and OpenStack Pike combined
with LXD Hypervisor.
The system is deployed with OpenStack High Availability (HA) for most OpenStack services.
User has following choices to make to do the deployment.
- - Openstack -- Newton
+ - Openstack -- Pike
- Type -- HA, nonHA, tip (stable git branch of respective openstack)
- Feature -- LXD (container hypervisor)
command to deploy lxd feature is:
#LXD deployment with HA Openstack
-./deploy.sh -o newton -f lxd -t ha -l custom -s nosdn
+./deploy.sh -o pike -f lxd -t ha -l custom -s nosdn
#LXD deployment with no HA Openstack
-./deploy.sh -o newton -f lxd -t noha -l custom -s nosdn
+./deploy.sh -o pike -f lxd -t noha -l custom -s nosdn
Using LXD with Openstack
========================
| | |
+--------------------------------------+--------------------------------------+
| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
-| | stable/danube |
+| | opnfv-6.0.0 |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Danube release |
+| **Release designation** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | April 01 2017 |
+| **Release date** | April 27 2018 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Danube release |
+| **Purpose of the delivery** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
Scenario Releases
=================
Name: joid-os-nosdn-lxd-ha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-fraser/
Notes:
Name: joid-os-nosdn-lxd-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-fraser/
Notes:
References
OpenStack
---------
-- `OpenStack Newton Release artifacts <http://www.openstack.org/software/newton>`_
+- `OpenStack Pike Release artifacts <http://www.openstack.org/software/pike>`_
- `OpenStack documentation <http://docs.openstack.org>`_
Abstract
========
-This document compiles the release notes for the Danube release of
+This document compiles the release notes for the Fraser release of
OPNFV when using JOID as a deployment tool with LXD container hypervisor.
Introduction
============
These notes provides release information for the use of joid as deployment
-tool for the Danube release of OPNFV with LXD hypervisor for containers
+tool for the Fraser release of OPNFV with LXD hypervisor for containers
scenario.
-The goal of the Danube release and this JOID based deployment process is
+The goal of the Fraser release and this JOID based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure.
container. LXD uses the same container technology found in the Linux kernel
(cgroups, namespaces, LSM, etc).
-Danube release with the JOID deployment with LXD hypervisor will establish an
+Fraser release with the JOID deployment with LXD hypervisor will establish an
OPNFV target system on a Pharos compliant lab infrastructure.
-The current definition of an OPNFV target system is and OpenStack Newton combined
+The current definition of an OPNFV target system is and OpenStack Pike combined
with LXD Hypervisor.
The system is deployed with OpenStack High Availability (HA) for most OpenStack services.
User has following choices to make to do the deployment.
- - Openstack -- Newton
+ - Openstack -- Pike
- Type -- HA, nonHA, tip (stable git branch of respective openstack)
- Feature -- LXD (container hypervisor)
command to deploy lxd feature is:
#LXD deployment with HA Openstack
-./deploy.sh -o newton -f lxd -t ha -l custom -s nosdn
+./deploy.sh -o pike -f lxd -t ha -l custom -s nosdn
#LXD deployment with no HA Openstack
-./deploy.sh -o newton -f lxd -t noha -l custom -s nosdn
+./deploy.sh -o pike -f lxd -t noha -l custom -s nosdn
Using LXD with Openstack
========================
| | |
+--------------------------------------+--------------------------------------+
| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
-| | stable/danube |
+| | opnfv-6.0.0 |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Danube release |
+| **Release designation** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | April 01 2017 |
+| **Release date** | April 30 2019 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Danube release |
+| **Purpose of the delivery** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
Scenario Releases
=================
Name: joid-os-nosdn-lxd-ha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-fraser/
Notes:
Name: joid-os-nosdn-lxd-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-fraser/
Notes:
References
OpenStack
---------
-- `OpenStack Newton Release artifacts <http://www.openstack.org/software/newton>`_
+- `OpenStack Pike Release artifacts <http://www.openstack.org/software/pike>`_
- `OpenStack documentation <http://docs.openstack.org>`_
Abstract
========
-This document compiles the release notes for the Danube release of
+This document compiles the release notes for the Fraser release of
OPNFV when using JOID as a deployment tool with KVM hypervisor.
Introduction
============
These notes provides release information for the use of joid as deployment
-tool for the Danube release of OPNFV with KVM hypervisor for containers
+tool for the Fraser release of OPNFV with KVM hypervisor for containers
scenario.
-The goal of the Danube release and this JOID based deployment process is
+The goal of the Fraser release and this JOID based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure.
It consists of a loadable kernel module, kvm.ko, that provides the core
virtualization infrastructure and a processor specific module, kvm-intel.ko or kvm-amd.ko.
-Danube release with the JOID deployment with KVM hypervisor will establish an
+Fraser release with the JOID deployment with KVM hypervisor will establish an
OPNFV target system on a Pharos compliant lab infrastructure.
-The current definition of an OPNFV target system is and OpenStack Newton.
+The current definition of an OPNFV target system is and OpenStack Pike.
The system is deployed with OpenStack High Availability (HA) for most OpenStack services.
User has following choices to make to do the deployment.
- - Openstack -- Newton
+ - Openstack -- Pike
- Type -- HA, nonHA, tip (stable git branch of respective openstack)
- Feature -- KVM (hypervisor)
command to deploy lxd feature is:
#KVM deployment with HA Openstack
-./deploy.sh -o newton -f none -t ha -l custom -s nosdn
+./deploy.sh -o pike -f none -t ha -l custom -s nosdn
#LXD deployment with no HA Openstack
-./deploy.sh -o newton -f none -t noha -l custom -s nosdn
+./deploy.sh -o pike -f none -t noha -l custom -s nosdn
Using Openstack
===============
| | |
+--------------------------------------+--------------------------------------+
| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
-| | stable/danube |
+| | opnfv-6.0.0 |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Danube release |
+| **Release designation** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | April 01 2017 |
+| **Release date** | April 27 2018 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Danube release |
+| **Purpose of the delivery** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
Scenario Releases
=================
Name: joid-os-nosdn-lxd-ha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-fraser/
Notes:
Name: joid-os-nosdn-lxd-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-fraser/
Notes:
References
OpenStack
---------
-- `OpenStack Newton Release artifacts <http://www.openstack.org/software/newton>`_
+- `OpenStack Pike Release artifacts <http://www.openstack.org/software/pike>`_
- `OpenStack documentation <http://docs.openstack.org>`_
Abstract
========
-This document compiles the release notes for the Danube release of
+This document compiles the release notes for the Fraser release of
OPNFV when using JOID as a deployment tool with KVM hypervisor.
Introduction
============
These notes provides release information for the use of joid as deployment
-tool for the Danube release of OPNFV with KVM hypervisor for containers
+tool for the Fraser release of OPNFV with KVM hypervisor for containers
scenario.
-The goal of the Danube release and this JOID based deployment process is
+The goal of the Fraser release and this JOID based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure.
It consists of a loadable kernel module, kvm.ko, that provides the core
virtualization infrastructure and a processor specific module, kvm-intel.ko or kvm-amd.ko.
-Danube release with the JOID deployment with KVM hypervisor will establish an
+Fraser release with the JOID deployment with KVM hypervisor will establish an
OPNFV target system on a Pharos compliant lab infrastructure.
-The current definition of an OPNFV target system is and OpenStack Newton.
+The current definition of an OPNFV target system is and OpenStack Pike.
The system is deployed with OpenStack High Availability (HA) for most OpenStack services.
User has following choices to make to do the deployment.
- - Openstack -- Newton
+ - Openstack -- Pike
- Type -- HA, nonHA, tip (stable git branch of respective openstack)
- Feature -- KVM (hypervisor)
command to deploy lxd feature is:
#KVM deployment with HA Openstack
-./deploy.sh -o newton -f none -t ha -l custom -s nosdn
+./deploy.sh -o pike -f none -t ha -l custom -s nosdn
#LXD deployment with no HA Openstack
-./deploy.sh -o newton -f none -t noha -l custom -s nosdn
+./deploy.sh -o pike -f none -t noha -l custom -s nosdn
Using Openstack
===============
| | |
+--------------------------------------+--------------------------------------+
| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
-| | stable/danube |
+| | opnfv-6.0.0 |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Danube release |
+| **Release designation** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | April 01 2017 |
+| **Release date** | April 27 2018 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Danube release |
+| **Purpose of the delivery** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
Scenario Releases
=================
Name: joid-os-nosdn-lxd-ha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-ha-baremetal-daily-fraser/
Notes:
Name: joid-os-nosdn-lxd-noha
-Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-danube/
+Test Link: https://build.opnfv.org/ci/view/joid/job/joid-os-nosdn-lxd-noha-baremetal-daily-fraser/
Notes:
References
OpenStack
---------
-- `OpenStack Newton Release artifacts <http://www.openstack.org/software/newton>`_
+- `OpenStack Pike Release artifacts <http://www.openstack.org/software/pike>`_
- `OpenStack documentation <http://docs.openstack.org>`_
Abstract
========
-This document compiles the release notes for the Euphrates release of
+This document compiles the release notes for the Fraser release of
OPNFV when using JOID as a deployment tool with the Open Baton NFV MANO framework
provided by the OPNFV orchestra project.
============
These notes provides release information for the use of joid as deployment
-tool for the Euphrates release of OPNFV for orchestra
+tool for the Fraser release of OPNFV for orchestra
scenario.
-The goal of the Euphrates release and this JOID based deployment process is
+The goal of the Fraser release and this JOID based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure.
of an extensible and customizable framework capable of orchestrating network services across
heterogeneous NFV Infrastructures.
-Euphrates release with the JOID deployment enables deployment of orchestra
+Fraser release with the JOID deployment enables deployment of orchestra
on a Pharos compliant lab infrastructure.
-The current definition of an OPNFV target system is based on OpenStack Ocata.
+The current definition of an OPNFV target system is based on OpenStack Pike.
The system is deployed with OpenStack High Availability (HA) for most OpenStack services.
User has following choices to make to do the deployment.
- - Openstack -- Ocata
+ - Openstack -- Pike
- Type -- HA, nonHA, tip (stable git branch of respective openstack)
- Feature -- Open Baton (NFV MANO framework)
command to deploy orchestra feature is:
#Orchestra deployment with no HA Openstack
-./deploy.sh -o ocata -m openstack -f openbaton -s nosdn -t nonha
+./deploy.sh -o pike -m openstack -f openbaton -s nosdn -t nonha
#Orchestra deployment with no HA Openstack
-./deploy.sh -o ocata -m openstack -f openbaton -s nosdn -t ha
+./deploy.sh -o pike -m openstack -f openbaton -s nosdn -t ha
Using Openstack
| | |
+--------------------------------------+--------------------------------------+
| **Repo/tag** | gerrit.opnfv.org/gerrit/joid.git |
-| | stable/euphrates |
+| | stable/hrates |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Euphrates release |
+| **Release designation** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | October 24 2017 |
+| **Release date** | April 27 2018 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Euphrates release |
+| **Purpose of the delivery** | Fraser release |
| | |
+--------------------------------------+--------------------------------------+
=================
Name: os-nosdn-openbaton-ha
-Test Link: https://build.opnfv.org/ci/job/joid-deploy-baremetal-daily-euphrates
+Test Link: https://build.opnfv.org/ci/job/joid-deploy-baremetal-daily-hrates
Notes:
References
OpenStack
---------
-- `OpenStack Newton Release artifacts <http://www.openstack.org/software/newton>`_
+- `OpenStack Release artifacts <http://www.openstack.org/software/pike>`_
- `OpenStack documentation <http://docs.openstack.org>`_
Orchestra
---------
-- `Orchestra Release Notes <http://docs.opnfv.org/en/stable-euphrates/submodules/orchestra/docs/release/release-notes/index.html#orchestra-releasenotes>`_
+- `Orchestra Release Notes <http://docs.opnfv.org/en/stable-hrates/submodules/orchestra/docs/release/release-notes/index.html#orchestra-releasenotes>`_
- `Open Baton documentation <http://openbaton.github.io/documentation/>`_
Introduction
============
-This document will explain how to install OPNFV Euphrates with JOID including installing JOID, configuring JOID for your environment, and deploying OPNFV with different SDN solutions in HA, or non-HA mode. Prerequisites include
+This document will explain how to install OPNFV Fraser with JOID including installing JOID, configuring JOID for your environment, and deploying OPNFV with different SDN solutions in HA, or non-HA mode. Prerequisites include
- An Ubuntu 16.04 LTS Server Jumphost
- Minimum 2 Networks per Pharos requirement
release: d
distro: xenial
type: noha
- openstack: newton
+ openstack: pike
sdncontroller:
- type: nosdn
storage:
vSwitch, OpenContrail, OpenDaylight and ONOS (Open Network Operating System). In addition
to HA or non-HA mode, it also supports deploying the latest from the development tree (tip).
-The deploy.sh script in the joid/ci directoy will do all the work for you. For example, the following deploys OpenStack Newton with OpenvSwitch in a HA mode.
+The deploy.sh script in the joid/ci directoy will do all the work for you. For example, the following deploys OpenStack Pike with OpenvSwitch in a HA mode.
::
- ~/joid/ci$ ./deploy.sh -o newton -s nosdn -t ha -l custom -f none -m openstack
+ ~/joid/ci$ ./deploy.sh -o pike -s nosdn -t ha -l custom -f none -m openstack
The deploy.sh script in the joid/ci directoy will do all the work for you. For example, the following deploys Kubernetes with Load balancer on the pod.
ha: HA mode of OpenStack.
tip: The tip of the development.
[-o]
- mitak: OpenStack Mitaka version.
- newton: OpenStack Newton version.
+ ocata: OpenStack Ocata version.
+ pike: OpenStack Pike version.
[-l]
default: For virtual deployment where installation will be done on KVM created using ./03-maasdeploy.sh
custom: Install on bare metal OPNFV defined by labconfig.yaml
::
- ./deploy.sh -o newton -s nosdn -t ha -l custom -f none
+ ./deploy.sh -o pike -s nosdn -t ha -l custom -f none
- If you have setup maas not with 03-maasdeploy.sh then the ./clean.sh command could hang,
the juju status command may hang because the correct MAAS API keys are not mentioned in
if grep -q 'virt-type: lxd' bundles.yaml; then
URLS=" \
- http://download.cirros-cloud.net/daily/20161201/cirros-dl161201-$NODE_ARCTYPE-lxc.tar.gz \
- http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-$NODE_ARCHES-root.tar.gz "
-
-else
- URLS=" \
+ http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-$NODE_ARCTYPE-lxc.tar.gz \
+ http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-$NODE_ARCHES-root.tar.gz \
http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-$NODE_ARCHES-uefi1.img \
http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-$NODE_ARCHES-uefi1.img \
http://mirror.catn.com/pub/catn/images/qcow2/centos6.4-x86_64-gold-master.img \
http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 \
- http://download.cirros-cloud.net/daily/20161201/cirros-dl161201-$NODE_ARCTYPE-disk.img "
+ http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-$NODE_ARCTYPE-disk.img \
fi
for URL in $URLS
--- /dev/null
+lab:
+ location: kontron
+ racks:
+ - rack: pod1
+ nodes:
+ - name: rack-1-s3-c1
+ architecture: x86_64
+ roles: [network,control]
+ nics:
+ - ifname: intf1
+ spaces: [admin]
+ mac: ["00:a0:a5:9a:d7:26"]
+ - ifname: intf2
+ spaces: []
+ mac: ["00:a0:a5:9a:d7:24"]
+ - ifname: intf2.30
+ spaces: [floating]
+ mac: ["00:a0:a5:9a:d7:24"]
+ - ifname: intf2.10
+ spaces: [data]
+ mac: ["00:a0:a5:9a:d7:24"]
+ power:
+ type: ipmi
+ address: 192.168.101.13
+ user: admin
+ pass: admin
+ - name: rack-1-s4-c1
+ architecture: x86_64
+ roles: [compute,control,storage]
+ nics:
+ - ifname: intf1
+ spaces: [admin]
+ mac: ["00:a0:a5:9a:d7:20"]
+ - ifname: intf2
+ spaces: []
+ mac: ["00:a0:a5:9a:d7:1e"]
+ - ifname: intf2.30
+ spaces: [floating]
+ mac: ["00:a0:a5:9a:d7:1e"]
+ - ifname: intf2.10
+ spaces: [data]
+ mac: ["00:a0:a5:9a:d7:1e"]
+ power:
+ type: ipmi
+ address: 192.168.101.14
+ user: admin
+ pass: admin
+ - name: rack-1-s5-c1
+ architecture: x86_64
+ roles: [compute,storage]
+ nics:
+ - ifname: intf1
+ spaces: [admin]
+ mac: ["00:a0:a5:9a:f1:50"]
+ - ifname: intf2
+ spaces: []
+ mac: ["00:a0:a5:9a:f1:4e"]
+ - ifname: intf2.30
+ spaces: [floating]
+ mac: ["00:a0:a5:9a:f1:4e"]
+ - ifname: intf2.10
+ spaces: [data]
+ mac: ["00:a0:a5:9a:f1:4e"]
+ power:
+ type: ipmi
+ address: 192.168.101.15
+ user: admin
+ pass: admin
+ - name: rack-1-s6-c1
+ architecture: x86_64
+ roles: [compute,storage]
+ nics:
+ - ifname: intf1
+ spaces: [admin]
+ mac: ["00:a0:a5:9b:00:b0"]
+ - ifname: intf2
+ spaces: []
+ mac: ["00:a0:a5:9b:00:ae"]
+ - ifname: intf2.30
+ spaces: [floating]
+ mac: ["00:a0:a5:9b:00:ae"]
+ - ifname: intf2.10
+ spaces: [data]
+ mac: ["00:a0:a5:9b:00:ae"]
+ power:
+ type: ipmi
+ address: 192.168.101.16
+ user: admin
+ pass: admin
+ - name: rack-1-s7-c1
+ architecture: x86_64
+ roles: [compute,control,storage]
+ nics:
+ - ifname: intf1
+ spaces: [admin]
+ mac: ["00:a0:a5:9b:05:7a"]
+ - ifname: intf2
+ spaces: []
+ mac: ["00:a0:a5:9b:05:78"]
+ - ifname: intf2.30
+ spaces: [floating]
+ mac: ["00:a0:a5:9b:05:78"]
+ - ifname: intf2.10
+ spaces: [data]
+ mac: ["00:a0:a5:9b:05:78"]
+ power:
+ type: ipmi
+ address: 192.168.101.17
+ user: admin
+ pass: admin
+ floating-ip-range: 10.10.3.40,10.10.3.250,10.10.3.1,10.10.3.0/24
+ ext-port: "intf2.30"
+ dns: 8.8.8.8
+ osdomainname:
+opnfv:
+ release: f
+ distro: xenial
+ type: noha
+ openstack: pike
+ sdncontroller:
+ - type: nosdn
+ storage:
+ - type: ceph
+ disk: /dev/sdb
+ feature: odl_l2
+ spaces:
+ - type: admin
+ bridge: brAdmin
+ cidr: 10.10.0.0/24
+ gateway: 10.10.0.1
+ vlan:
+ - type: floating
+ bridge: brExt
+ cidr: 10.10.3.0/24
+ gateway: 10.10.3.1
+ vlan: 30
+ - type: data
+ bridge: brData
+ cidr: 10.10.1.0/24
+ gateway:
+ vlan: 10