opnfvsdn=$4
opnfvfeature=$5
opnfvdistro=$6
+opnfvmodel=$7
-#copy and download charms
-cp $opnfvsdn/fetch-charms.sh ./fetch-charms.sh
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ #copy and download charms
+ cp $opnfvsdn/fetch-charms.sh ./fetch-charms.sh
+else
+ cp kubernates/fetch-charms.sh ./fetch-charms.sh
+fi
jujuver=`juju --version`
./fetch-charms.sh $opnfvdistro
-tar xvf common/scaleio.tar -C ./$opnfvdistro/ --strip=2 juju-scaleio/trusty/
-
-osdomname=''
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ tar xvf common/scaleio.tar -C ./$opnfvdistro/ --strip=2 juju-scaleio/trusty/
+ osdomname=''
+fi
#check whether charms are still executing the code even juju-deployer says installed.
check_status() {
#read the value from deployment.yaml
-if [ -e ./deployment.yaml ]; then
- if [ -e ./deployconfig.yaml ]; then
- extport=`grep "ext-port" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //' | tr ',' ' '`
- datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
- admnet=`grep "admNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
- cephdisk=`grep "ceph-disk" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
- osdomname=`grep "os-domain-name" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
- fi
-
- workmutiple=`maas maas nodes list | grep "cpu_count" | cut -d ':' -f 2 | sed -e 's/ //' | tr ',' ' '`
- max=0
- for v in ${workmutiple[@]}; do
- if (( $v > $max )); then max=$v; fi;
- done
- echo $max
-
- if [ "$max" -lt 4 ];then
- workmutiple=1.1
- elif [ "$max" -lt 33 ]; then
- workmutiple=0.25
- elif [ "$max" -lt 73 ]; then
- workmutiple=0.1
- else
- workmutiple=0.05
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ if [ -e ./deployment.yaml ]; then
+ if [ -e ./deployconfig.yaml ]; then
+ extport=`grep "ext-port" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //' | tr ',' ' '`
+ datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
+ admnet=`grep "admNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
+ cephdisk=`grep "ceph-disk" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
+ osdomname=`grep "os-domain-name" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
+ fi
+
+ workmutiple=`maas maas nodes list | grep "cpu_count" | cut -d ':' -f 2 | sed -e 's/ //' | tr ',' ' '`
+ max=0
+ for v in ${workmutiple[@]}; do
+ if (( $v > $max )); then max=$v; fi;
+ done
+ echo $max
+
+ if [ "$max" -lt 4 ];then
+ workmutiple=1.1
+ elif [ "$max" -lt 33 ]; then
+ workmutiple=0.25
+ elif [ "$max" -lt 73 ]; then
+ workmutiple=0.1
+ else
+ workmutiple=0.05
+ fi
+ sed -i "s/worker_multiplier: 1.0/worker_multiplier: ${workmutiple}/g" default_deployment_config.yaml
fi
- sed -i "s/worker_multiplier: 1.0/worker_multiplier: ${workmutiple}/g" default_deployment_config.yaml
fi
case "$opnfvlab" in
fi
done
-#update source if trusty is target distribution
-var=os-$opnfvsdn-$fea-$opnfvtype"-"$opnfvdistro"_"$openstack
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ #update source if trusty is target distribution
+ var=os-$opnfvsdn-$fea-$opnfvtype"-"$opnfvdistro"_"$openstack
+
+ if [ "$osdomname" != "None" ]; then
+ var=$var"_"publicapi
+ fi
+fi
-if [ "$osdomname" != "None" ]; then
- var=$var"_"publicapi
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ #lets generate the bundle for all target using genBundle.py
+ python genBundle.py -l deployconfig.yaml -s $var > bundles.yaml
+else
+ #lets generate the bundle for k8 target using genK8Bundle.py
+ python genK8Bundle.py -l deployconfig.yaml -s $var > bundles.yaml
fi
-#lets generate the bundle for all target using genBundle.py
-python genBundle.py -l deployconfig.yaml -s $var > bundles.yaml
#keep the back in cloud for later debugging.
pastebinit bundles.yaml || true
--- /dev/null
+# vim: set ts=2 et:
+# The machine constraints for each service in this bundle
+# have been commented out so you don't run into quota
+# problems on public clouds. Modify and uncomment the
+# constraints: lines for each service to reflect your
+# deployment before moving to production.
+#
+ series: {{ ubuntu.release }}
+ services:
+ nodes:
+ charm: "cs:{{ ubuntu.release }}/ubuntu"
+ num_units: {{ opnfv.units }}
+ ntp:
+ charm: "./{{ ubuntu.release }}/ntp"
+{% include 'kubernetes.yaml' %}
+{% include 'easyrsa.yaml' %}
+{% include 'etcd.yaml' %}
+{% include 'subordinate.yaml' %}
+
+ relations:
+ - [ 'ntp:juju-info', 'nodes:juju-info' ]
+{% include 'relations.yaml' %}
--- /dev/null
+ easyrsa:
+ charm: "./{{ ubuntu.release }}/easyrsa"
+ num_units: 1
+ to:
+ - "lxd:nodes/0"
+
--- /dev/null
+ etcd:
+ charm: "./{{ ubuntu.release }}/etcd"
+ num_units: 1
+ to:
+ - "nodes/0"
+
--- /dev/null
+ kubernetes-master:
+ charm: "./{{ ubuntu.release }}/kubernetes-master"
+ num_units: 1
+ expose: true
+ to:
+ - "nodes/0"
+
+ kubernetes-worker:
+ charm: "./{{ ubuntu.release }}/kubernetes-worker"
+ num_units: {{ opnfv.units - 1 }}
+ expose: true
+ to:
+{% for unit_id in range(1, opnfv.units) %}
+ - "nodes/{{ unit_id }}"
+{% endfor %}
--- /dev/null
+ - [ "kubernetes-master:kube-api-endpoint", "kubernetes-worker:kube-api-endpoint" ]
+ - [ "kubernetes-master:cluster-dns", "kubernetes-worker:kube-dns" ]
+ - [ "kubernetes-master:certificates", "easyrsa:client" ]
+ - [ "kubernetes-master:etcd", "etcd:db" ]
+ - [ "kubernetes-worker:certificates", "easyrsa:client" ]
+ - [ "flannel:etcd", "etcd:db" ]
+ - [ "flannel:cni", "kubernetes-master:cni" ]
+ - [ "flannel:cni", "kubernetes-worker:cni" ]
+ - [ "etcd:certificates", "easyrsa:client" ]
--- /dev/null
+
+ flannel:
+ charm: ./{{ ubuntu.release }}/flannel
+
opnfvfeature=none
opnfvdistro=xenial
opnfvarch=amd64
+opnfvmodel=openstack
jujuver=`juju --version`
[-f <ipv6,dpdk,lxd,dvr>]
[-d <trusty|xenial>]
[-a <amd64>]
+ [-m <openstack|kubernates>]
[-r <a|b>]" 1>&2 exit 1; }
-while getopts ":s:t:o:l:h:r:f:d:a:" opt; do
+while getopts ":s:t:o:l:h:r:f:d:a:m:" opt; do
case "${opt}" in
s)
opnfvsdn=${OPTARG}
a)
opnfvarch=${OPTARG}
;;
+ m)
+ opnfvmodel=${OPTARG}
+ ;;
h)
usage
;;
fi
#case default deploy the opnfv platform:
- ./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro
+ ./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro $opnfvmodel
}
#check whether charms are still executing the code even juju-deployer says installed.
fi
done
- juju expose ceph-radosgw
- #juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"'
-
+ if [[ "$opnfvmodel" = "openstack" ]]; then
+ juju expose ceph-radosgw || true
+ #juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"'
+ fi
echo "...... deployment finishing ......."
}
echo "...... deployment finished ......."
-./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ ./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true
-# creating heat domain after puching the public API into /etc/hosts
+ # creating heat domain after puching the public API into /etc/hosts
-if [[ "$jujuver" > "2" ]]; then
- status=`juju run-action heat/0 domain-setup`
- echo $status
-else
- status=`juju action do heat/0 domain-setup`
- echo $status
-fi
+ if [[ "$jujuver" > "2" ]]; then
+ status=`juju run-action heat/0 domain-setup`
+ echo $status
+ else
+ status=`juju action do heat/0 domain-setup`
+ echo $status
+ fi
-sudo ../juju/get-cloud-images || true
-../juju/joid-configure-openstack || true
+ sudo ../juju/get-cloud-images || true
+ ../juju/joid-configure-openstack || true
+
+fi
echo "...... finished ......."
--- /dev/null
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+This script generates a juju deployer bundle based on
+scenario name, and lab config file.
+
+Parameters:
+ -s, --scenario : scenario name
+ -l, --lab : lab config file
+"""
+
+from optparse import OptionParser
+from jinja2 import Environment, FileSystemLoader
+from distutils.version import LooseVersion, StrictVersion
+import os
+import subprocess
+import random
+import yaml
+import sys
+
+#
+# Parse parameters
+#
+
+parser = OptionParser()
+parser.add_option("-s", "--scenario", dest="scenario", help="scenario name")
+parser.add_option("-l", "--lab", dest="lab", help="lab config file")
+(options, args) = parser.parse_args()
+scenario = options.scenario
+labconfig_file = options.lab
+
+#
+# Set Path and configs path
+#
+
+scenarioconfig_file = 'default_deployment_config.yaml'
+# Capture our current directory
+jujuver = subprocess.check_output(["juju", "--version"])
+
+TPL_DIR = os.path.dirname(os.path.abspath(__file__))+'/config_tpl/juju2/bundlek8_tpl'
+
+#
+# Prepare variables
+#
+
+# Prepare a storage for passwords
+passwords_store = dict()
+
+#
+# Local Functions
+#
+
+
+def load_yaml(filepath):
+ """Load YAML file"""
+ with open(filepath, 'r') as stream:
+ try:
+ return yaml.load(stream)
+ except yaml.YAMLError as exc:
+ print(exc)
+
+#
+# Templates functions
+#
+
+
+def unit_qty():
+ """Return quantity of units to deploy"""
+ global config
+ if config['os']['ha']['mode'] == 'ha':
+ return config['os']['ha']['cluster_size']
+ else:
+ return 1
+
+
+def unit_ceph_qty():
+ """Return size of the ceph cluster"""
+ global config
+ if config['os']['ha']['mode'] == 'ha':
+ return config['os']['ha']['cluster_size']
+ else:
+ if config['opnfv']['units'] >= 3:
+ return config['os']['ha']['cluster_size']
+ else:
+ return 2
+
+def unit_scaleio_qty():
+ """Return size of the scaleio cluster"""
+ return 3
+
+def to_select(qty=False):
+ """Return a random list of machines numbers to deploy"""
+ global config
+ if not qty:
+ qty = config['os']['ha']['cluster_size'] if \
+ config['os']['ha']['mode'] == 'ha' else 1
+ if config['os']['hyperconverged']:
+ return random.sample(range(0, config['opnfv']['units']), qty)
+ else:
+ return random.sample(range(0, qty), qty)
+
+
+def get_password(key, length=16, special=False):
+ """Return a new random password or a already created one"""
+ global passwords_store
+ if key not in passwords_store.keys():
+ alphabet = "abcdefghijklmnopqrstuvwxyz"
+ upperalphabet = alphabet.upper()
+ char_list = alphabet + upperalphabet + '0123456789'
+ pwlist = []
+ if special:
+ char_list += "+-,;./:?!*"
+ for i in range(length):
+ pwlist.append(char_list[random.randrange(len(char_list))])
+ random.shuffle(pwlist)
+ passwords_store[key] = "".join(pwlist)
+ return passwords_store[key]
+
+#
+# Config import
+#
+
+# Load scenario Config
+config = load_yaml(scenarioconfig_file)
+# Load lab Config
+config.update(load_yaml(labconfig_file))
+
+# We transform array to hash for an easier work
+config['opnfv']['spaces_dict'] = dict()
+for space in config['opnfv']['spaces']:
+ config['opnfv']['spaces_dict'][space['type']] = space
+config['opnfv']['storage_dict'] = dict()
+for storage in config['opnfv']['storage']:
+ config['opnfv']['storage_dict'][storage['type']] = storage
+
+#
+# Parse scenario name
+#
+
+# Set default scenario name
+if not scenario:
+ scenario = "k8-nosdn-baremetal-core"
+
+# Parse scenario name
+try:
+ sc = scenario.split('-')
+ (sdn, features, hamode) = sc[1:4]
+ features = features.split('_')
+ if len(sc) > 4:
+ extra = sc[4].split('_')
+ else:
+ extra = []
+except ValueError as err:
+ print('Error: Bad scenario name syntax, use '
+ '"k8-nosdn-baremetal-core" format')
+ sys.exit(1)
+
+#
+# Update config with scenario name
+#
+
+if 'dpdk' in features:
+ config['os']['network']['dpdk'] = True
+
+# Set beta option from extra
+if 'hugepages' in extra:
+ config['os']['beta']['huge_pages'] = True
+if 'mitaka' in extra:
+ config['os']['release'] = 'mitaka'
+if 'xenial' in extra:
+ config['ubuntu']['release'] = 'xenial'
+
+#
+# Transform template to bundle.yaml according to config
+#
+
+# Create the jinja2 environment.
+env = Environment(loader=FileSystemLoader(TPL_DIR),
+ trim_blocks=True)
+template = env.get_template('bundle.yaml')
+
+# Add functions
+env.globals.update(get_password=get_password)
+env.globals.update(unit_qty=unit_qty)
+env.globals.update(unit_ceph_qty=unit_ceph_qty)
+env.globals.update(unit_scaleio_qty=unit_scaleio_qty)
+env.globals.update(to_select=to_select)
+
+# Render the template
+output = template.render(**config)
+
+# Check output syntax
+try:
+ yaml.load(output)
+except yaml.YAMLError as exc:
+ print(exc)
+
+# print output
+print(output)
--- /dev/null
+#!/bin/bash -ex
+
+distro=$1
+mkdir -p $distro
+
+function build {
+ sudo apt-get install charm-tools -y
+ (cd $distro/charm-$1; charm build -s $distro -obuild src)
+ mv $distro/charm-$1/build/$distro/$1 $distro
+}
+
+# openstack
+charm pull cs:~containers/kubernetes-master $distro/kubernetes-master
+charm pull cs:~containers/kubernetes-worker $distro/kubernetes-worker
+charm pull cs:~containers/flannel $distro/flannel
+charm pull cs:~containers/etcd $distro/etcd
+charm pull cs:~containers/easyrca $distro/easyrca