fi
#install the packages needed
-sudo apt-add-repository ppa:juju/devel -y
+sudo apt-get install software-properties-common -y
+sudo apt-add-repository ppa:juju/stable -y
sudo apt-add-repository ppa:maas/stable -y
-sudo apt-add-repository cloud-archive:newton -y
+sudo apt-add-repository cloud-archive:ocata -y
sudo apt-get update -y
#sudo apt-get dist-upgrade -y
-sudo apt-get install openssh-server bzr git virtinst qemu-kvm libvirt-bin juju \
+sudo apt-get install bridge-utils openssh-server bzr git virtinst qemu-kvm libvirt-bin juju \
maas maas-region-controller python-pip python-psutil python-openstackclient \
python-congressclient gsutil charm-tools pastebinit python-jinja2 sshpass \
openssh-server vlan ipmitool jq expect -y
KEYRING_FILE=/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
SOURCE_ID=1
FABRIC_ID=1
-VLAN_TAG=""
PRIMARY_RACK_CONTROLLER="$MAAS_IP"
SUBNET_CIDR=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="admin")'.cidr | cut -d \" -f 2 `
-VLAN_TAG="untagged"
+SUBNETDATA_CIDR=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="data")'.cidr | cut -d \" -f 2 `
+SUBNETPUB_CIDR=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="public")'.cidr | cut -d \" -f 2 `
+SUBNETSTOR_CIDR=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="storage")'.cidr | cut -d \" -f 2 `
+SUBNETFLOAT_CIDR=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="floating")'.cidr | cut -d \" -f 2 `
+VLAN_UNTTAGED="untagged"
# In the case of a virtual deployment get deployconfig.yaml
if [ "$virtinstall" -eq 1 ]; then
#reconfigure maas with correct MAAS address.
#Below code is needed as MAAS have issue in commisoning without restart.
sudo ./maas-reconfigure-region.sh $MAAS_IP
+ sleep 30
sudo maas-rack config --region-url http://$MAAS_IP:5240/MAAS
sudo maas createadmin --username=ubuntu --email=ubuntu@ubuntu.com --password=ubuntu || true
maas $PROFILE maas set-config name='ntp_server' value='ntp.ubuntu.com' || true
maas $PROFILE sshkeys create "key=$SSH_KEY" || true
- maas $PROFILE tags create name='bootstrap' || true
- maas $PROFILE tags create name='compute' || true
- maas $PROFILE tags create name='control' || true
- maas $PROFILE tags create name='storage' || true
+ for tag in bootstrap compute control storage
+ do
+ maas $PROFILE tags create name=$tag || true
+ done
#create the required spaces.
maas $PROFILE space update 0 name=default || true
- maas $PROFILE spaces create name=unused || true
- maas $PROFILE spaces create name=admin-api || true
- maas $PROFILE spaces create name=internal-api || true
- maas $PROFILE spaces create name=public-api || true
- maas $PROFILE spaces create name=compute-data || true
- maas $PROFILE spaces create name=compute-external || true
- maas $PROFILE spaces create name=storage-data || true
- maas $PROFILE spaces create name=storage-cluster || true
-
- maas $PROFILE boot-source update $SOURCE_ID \
- url=$URL keyring_filename=$KEYRING_FILE || true
+ for space in unused admin-api internal-api public-api compute-data \
+ compute-external storage-data storage-cluster
+ do
+ echo "Creating the space $space"
+ maas $PROFILE spaces create name=$space || true
+ done
+ #maas $PROFILE boot-source update $SOURCE_ID \
+ # url=$URL keyring_filename=$KEYRING_FILE || true
maas $PROFILE boot-resources import || true
sleep 10
do
sleep 60
done
+}
- #maas $PROFILE subnet update vlan:<vlan id> name=internal-api space=<0> gateway_ip=10.5.1.1
- #maas $PROFILE subnet update vlan:<vlan id> name=admin-api space=<2> gateway_ip=10.5.12.1
- #maas $PROFILE subnet update vlan:<vlan id> name=public-api space=<1> gateway_ip=10.5.15.1
- #maas $PROFILE subnet update vlan:<vlan id> name=compute-data space=<3> gateway_ip=10.5.17.1
- #maas $PROFILE subnet update vlan:<vlan id> name=compute-external space=<4> gateway_ip=10.5.19.1
- #maas $PROFILE subnet update vlan:<vlan id> name=storage-data space=<5> gateway_ip=10.5.20.1
- #maas $PROFILE subnet update vlan:<vlan id> name=storage-cluster space=<6> gateway_ip=10.5.21.1
+deleteexistingnetw(){
+ NETID_LIST=$(maas $PROFILE subnets read | jq ".[].id")
+ for NETID in $NETW; do
+ maas $PROFILE subnet delete $NETID_LIST
+ done
+}
+setopnfvfabrics(){
+ # Based on first node we get the fabric mapping
+ NODE_0_MAC_LIST=$(cat labconfig.json | jq --raw-output ".lab.racks[0].nodes[0].nics[] ".mac[] | sort -u)
+ FAB_ID=1
+ for MAC in $NODE_0_MAC_LIST; do
+ # Create a new fabric
+ FABRIC_ID=$(maas $PROFILE fabrics create name=opnfv$FAB_ID| jq --raw-output ".id")
+ # Get the spaces attached to a mac
+ IF_SPACES=$(cat labconfig.json | jq --raw-output ".lab.racks[0].nodes[$NODE_ID].nics[] | select(.mac[] | contains(\"$MAC\")) ".spaces[])
+ # Create the network attached to a space
+ for SPACE in $IF_SPACES; do
+ # First check if this space have a vlan
+ SP_VLAN=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"$SPACE\")".vlan)
+ # Create it if needed
+ if ([ $SP_VLAN ] && [ "$SP_VLAN" != "null" ]); then
+ maas $PROFILE vlans create $FABRIC_ID vid=$SP_VLAN
+ VID="vid=$SP_VLAN"
+ else
+ SP_VLAN=$VLAN_UNTTAGED
+ VID=""
+ fi
+ # Create the network
+ case "$SPACE" in
+ 'admin') SUBNET_CIDR=$SUBNET_CIDR; JUJU_SPACE="admin"; DHCP='enabled' ;;
+ 'data') SUBNET_CIDR=$SUBNETDATA_CIDR; JUJU_SPACE="tenant-data"; DHCP='' ;;
+ 'public') SUBNET_CIDR=$SUBNETPUB_CIDR; JUJU_SPACE="public-api"; DHCP='' ;;
+ 'storage') SUBNET_CIDR=$SUBNETSTOR_CIDR; JUJU_SPACE="tenant-api"; DHCP='' ;;
+ 'floating') SUBNET_CIDR=$SUBNETFLOAT_CIDR; JUJU_SPACE="tenant-public"; DHCP='' ;;
+ *) JUJU_SPACE='null'; DHCP='OFF'; echo " >>> Unknown SPACE" ;;
+ esac
+ # If we have a network, we create it
+ if ([ $SUBNET_CIDR ] && [ "$SUBNET_CIDR" != "null" ]); then
+ maas $PROFILE subnets create fabric=$FABRIC_ID cidr=$SUBNET_CIDR $VID
+ # Add the Gateway
+ GW=$(cat labconfig.json | jq ".opnfv.spaces[] | select(.type==\"$SPACE\")".gateway | cut -d \" -f 2)
+ if ([ $GW ] && [ "$GW" != "null" ]); then
+ maas $PROFILE subnet update $SUBNET_CIDR gateway_ip=$GW || true
+ fi
+ # Set ranges
+ SUBNET_PREFIX=${SUBNET_CIDR::-5}
+ IP_RES_RANGE_LOW="$SUBNET_PREFIX.1"
+ IP_RES_RANGE_HIGH="$SUBNET_PREFIX.39"
+ IP_DYNAMIC_RANGE_LOW="$SUBNET_PREFIX.40"
+ IP_DYNAMIC_RANGE_HIGH="$SUBNET_PREFIX.150"
+ maas $PROFILE ipranges create type=reserved \
+ start_ip=$IP_RES_RANGE_LOW end_ip=$IP_RES_RANGE_HIGH \
+ comment='This is a reserved range' || true
+ maas $PROFILE ipranges create type=dynamic \
+ start_ip=$IP_DYNAMIC_RANGE_LOW end_ip=$IP_DYNAMIC_RANGE_HIGH \
+ comment='This is a reserved dynamic range' || true
+ # Set DHCP
+ if [ $DHCP ]; then
+ PRIMARY_RACK_CONTROLLER=$(maas $PROFILE rack-controllers read | jq -r '.[0].system_id')
+ maas $PROFILE vlan update $FABRIC_ID $SP_VLAN dhcp_on=True primary_rack=$PRIMARY_RACK_CONTROLLER || true
+ fi
+ fi
+ done
+ FAB_ID=$((FAB_ID+1))
+ done
}
enablesubnetanddhcp(){
- SUBNET_PREFIX=${SUBNET_CIDR::-5}
+ TEMP_CIDR=$1
+ enabledhcp=$2
+ space=$3
+
+ SUBNET_PREFIX=${TEMP_CIDR::-5}
IP_RES_RANGE_LOW="$SUBNET_PREFIX.1"
IP_RES_RANGE_HIGH="$SUBNET_PREFIX.39"
start_ip=$IP_DYNAMIC_RANGE_LOW end_ip=$IP_DYNAMIC_RANGE_HIGH \
comment='This is a reserved dynamic range' || true
-
- FABRIC_ID=$(maas $PROFILE subnet read $SUBNET_CIDR | jq '.vlan.fabric_id')
+ FABRIC_ID=$(maas $PROFILE subnet read $TEMP_CIDR | jq '.vlan.fabric_id')
PRIMARY_RACK_CONTROLLER=$(maas $PROFILE rack-controllers read | jq -r '.[0].system_id')
- maas $PROFILE vlan update $FABRIC_ID $VLAN_TAG dhcp_on=True primary_rack=$PRIMARY_RACK_CONTROLLER || true
-
- MY_GATEWAY=`cat deployconfig.json | jq '.opnfv.admNetgway' | cut -d \" -f 2`
- MY_NAMESERVER=`cat deployconfig.json | jq '.opnfv.upstream_dns' | cut -d \" -f 2`
- maas $PROFILE subnet update $SUBNET_CIDR gateway_ip=$MY_GATEWAY || true
- maas $PROFILE subnet update $SUBNET_CIDR dns_servers=$MY_NAMESERVER || true
-
- #below command will enable the interface with internal-api space.
-
- SPACEID=$(maas $PROFILE space read internal-api | jq '.id')
- maas $PROFILE subnet update $SUBNET_CIDR space=$SPACEID || true
-
+ if [ "$space" == "admin" ]; then
+ MY_GATEWAY=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="admin")'.gateway | cut -d \" -f 2 `
+ #MY_NAMESERVER=`cat deployconfig.json | jq '.opnfv.upstream_dns' | cut -d \" -f 2`
+ if ([ $MY_GATEWAY ] && [ "$MY_GATEWAY" != "null" ]); then
+ maas $PROFILE subnet update $TEMP_CIDR gateway_ip=$MY_GATEWAY || true
+ fi
+ #maas $PROFILE subnet update $TEMP_CIDR dns_servers=$MY_NAMESERVER || true
+ #below command will enable the interface with internal-api space.
+ SPACEID=$(maas $PROFILE space read internal-api | jq '.id')
+ maas $PROFILE subnet update $TEMP_CIDR space=$SPACEID || true
+ if [ "$enabledhcp" == "true" ]; then
+ maas $PROFILE vlan update $FABRIC_ID $VLAN_UNTTAGED dhcp_on=True primary_rack=$PRIMARY_RACK_CONTROLLER || true
+ fi
+ elif [ "$space" == "data" ]; then
+ MY_GATEWAY=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="data")'.gateway | cut -d \" -f 2 `
+ if ([ $MY_GATEWAY ] && [ "$MY_GATEWAY" != "null" ]); then
+ maas $PROFILE subnet update $TEMP_CIDR gateway_ip=$MY_GATEWAY || true
+ fi
+ #below command will enable the interface with data-api space for data network.
+ SPACEID=$(maas $PROFILE space read admin-api | jq '.id')
+ maas $PROFILE subnet update $TEMP_CIDR space=$SPACEID || true
+ if [ "$enabledhcp" == "true" ]; then
+ maas $PROFILE vlan update $FABRIC_ID $VLAN_UNTTAGED dhcp_on=True primary_rack=$PRIMARY_RACK_CONTROLLER || true
+ fi
+ elif [ "$space" == "public" ]; then
+ MY_GATEWAY=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="data")'.public | cut -d \" -f 2 `
+ if ([ $MY_GATEWAY ] && [ "$MY_GATEWAY" != "null" ]); then
+ maas $PROFILE subnet update $TEMP_CIDR gateway_ip=$MY_GATEWAY || true
+ fi
+ #below command will enable the interface with public-api space for data network.
+ SPACEID=$(maas $PROFILE space read public-api | jq '.id')
+ maas $PROFILE subnet update $TEMP_CIDR space=$SPACEID || true
+ if [ "$enabledhcp" == "true" ]; then
+ maas $PROFILE vlan update $FABRIC_ID $VLAN_UNTTAGED dhcp_on=True primary_rack=$PRIMARY_RACK_CONTROLLER || true
+ fi
+ elif [ "$space" == "storage" ]; then
+ MY_GATEWAY=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="data")'.storage | cut -d \" -f 2 `
+ if ([ $MY_GATEWAY ] && [ "$MY_GATEWAY" != "null" ]); then
+ maas $PROFILE subnet update $TEMP_CIDR gateway_ip=$MY_GATEWAY || true
+ fi
+ #below command will enable the interface with public-api space for data network.
+ SPACEID=$(maas $PROFILE space read storage-data | jq '.id')
+ maas $PROFILE subnet update $TEMP_CIDR space=$SPACEID || true
+ if [ "$enabledhcp" == "true" ]; then
+ maas $PROFILE vlan update $FABRIC_ID $VLAN_UNTTAGED dhcp_on=True primary_rack=$PRIMARY_RACK_CONTROLLER || true
+ fi
+ fi
}
addnodes(){
fi
sudo virt-install --connect qemu:///system --name bootstrap --ram 4098 --cpu host --vcpus 2 --video \
- cirrus --arch x86_64 --disk size=20,format=qcow2,bus=virtio,io=native,pool=default \
+ cirrus --arch x86_64 --disk size=20,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
$netw --boot network,hd,menu=off --noautoconsole \
--vnc --print-xml | tee bootstrap
maas $PROFILE tag update-nodes bootstrap add=$bootstrapid
if [ "$virtinstall" -eq 1 ]; then
+ units=`cat deployconfig.json | jq .opnfv.units`
- sudo virt-install --connect qemu:///system --name node1-control --ram 8192 --cpu host --vcpus 4 \
- --disk size=120,format=qcow2,bus=virtio,io=native,pool=default \
- $netw $netw --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node1-control
-
- sudo virt-install --connect qemu:///system --name node2-compute --ram 8192 --cpu host --vcpus 4 \
- --disk size=120,format=qcow2,bus=virtio,io=native,pool=default \
- $netw $netw --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node2-compute
-
- sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --cpu host --vcpus 4 \
- --disk size=120,format=qcow2,bus=virtio,io=native,pool=default \
- $netw $netw --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
-
-
- node1controlmac=`grep "mac address" node1-control | head -1 | cut -d '"' -f 2`
- node2computemac=`grep "mac address" node2-compute | head -1 | cut -d '"' -f 2`
- node5computemac=`grep "mac address" node5-compute | head -1 | cut -d '"' -f 2`
-
- sudo virsh -c qemu:///system define --file node1-control
- sudo virsh -c qemu:///system define --file node2-compute
- sudo virsh -c qemu:///system define --file node5-compute
- rm -f node1-control node2-compute node5-compute
-
-
- maas $PROFILE machines create autodetect_nodegroup='yes' name='node1-control' \
- tags='control' hostname='node1-control' power_type='virsh' mac_addresses=$node1controlmac \
- power_parameters_power_address='qemu+ssh://'$USER'@'$MAAS_IP'/system' \
- architecture='amd64/generic' power_parameters_power_id='node1-control'
- controlnodeid=$(maas $PROFILE machines read | jq -r '.[] | select(.hostname == "node1-control").system_id')
- maas $PROFILE machines create autodetect_nodegroup='yes' name='node2-compute' \
- tags='compute' hostname='node2-compute' power_type='virsh' mac_addresses=$node2computemac \
- power_parameters_power_address='qemu+ssh://'$USER'@'$MAAS_IP'/system' \
- architecture='amd64/generic' power_parameters_power_id='node2-compute'
- compute2nodeid=$(maas $PROFILE machines read | jq -r '.[] | select(.hostname == "node2-compute").system_id')
- maas $PROFILE machines create autodetect_nodegroup='yes' name='node5-compute' \
- tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac \
- power_parameters_power_address='qemu+ssh://'$USER'@'$MAAS_IP'/system' \
- architecture='amd64/generic' power_parameters_power_id='node5-compute'
- compute5nodeid=$(maas $PROFILE machines read | jq -r '.[] | select(.hostname == "node5-compute").system_id')
-
- maas $PROFILE tag update-nodes control add=$controlnodeid || true
- maas $PROFILE tag update-nodes compute add=$compute2nodeid || true
- maas $PROFILE tag update-nodes compute add=$compute5nodeid || true
+ until [ $(($units)) -lt 1 ]; do
+ units=$(($units - 1));
+ NODE_NAME=`cat labconfig.json | jq ".lab.racks[].nodes[$units].name" | cut -d \" -f 2 `
+
+ sudo virt-install --connect qemu:///system --name $NODE_NAME --ram 8192 --cpu host --vcpus 4 \
+ --disk size=120,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
+ $netw $netw --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee $NODE_NAME
+
+ nodemac=`grep "mac address" $NODE_NAME | head -1 | cut -d '"' -f 2`
+ sudo virsh -c qemu:///system define --file $NODE_NAME
+ rm -f $NODE_NAME
+ maas $PROFILE machines create autodetect_nodegroup='yes' name=$NODE_NAME \
+ tags='control compute' hostname=$NODE_NAME power_type='virsh' mac_addresses=$nodemac \
+ power_parameters_power_address='qemu+ssh://'$USER'@'$MAAS_IP'/system' \
+ architecture='amd64/generic' power_parameters_power_id=$NODE_NAME
+ nodeid=$(maas $PROFILE machines read | jq -r '.[] | select(.hostname == '\"$NODE_NAME\"').system_id')
+ maas $PROFILE tag update-nodes control add=$nodeid || true
+ maas $PROFILE tag update-nodes compute add=$nodeid || true
+ done
else
units=`cat deployconfig.json | jq .opnfv.units`
#configure MAAS with the different options.
configuremaas
-#not virtual lab only. Can be done using any physical pod now.
-enablesubnetanddhcp
+# functioncall with subnetid to add and second parameter is dhcp enable
+# third parameter will define the space. It is required to have admin
+
+if [ $SUBNET_CIDR ]; then
+ enablesubnetanddhcp $SUBNET_CIDR true admin
+else
+ echo "atleast admin network should be defined"
+ echo "MAAS configuration can not continue"
+ exit 2
+fi
+
+if [ $SUBNETDATA_CIDR ]; then
+ enablesubnetanddhcp $SUBNETDATA_CIDR false data
+fi
+if [ $SUBNETPUB_CIDR ]; then
+ enablesubnetanddhcp $SUBNETPUB_CIDR false public
+fi
+
+if [ $SUBNETSTOR_CIDR ]; then
+ enablesubnetanddhcp $SUBNETSTOR_CIDR false storage
+fi
#just make sure rack controller has been synced and import only
# just whether images have been imported or not.
# Functions for MAAS network customization
#
-#Below function will mark the interfaces in Auto mode to enbled by MAAS
-enableautomode() {
- API_KEY=`sudo maas-region apikey --username=ubuntu`
- maas login $PROFILE $API_SERVERMAAS $API_KEY
-
- for node in $(maas $PROFILE nodes read | jq -r '.[].system_id')
- do
- maas $PROFILE interface link-subnet $node $1 mode=$2 subnet=$3 || true
- done
-}
-
#Below function will mark the interfaces in Auto mode to enbled by MAAS
# using hostname of the node added into MAAS
enableautomodebyname() {
for node in $(maas $PROFILE nodes read | jq -r '.[].system_id')
do
- interface=$(maas $PROFILE interface read $node $2 | jq -r '.id')
- maas $PROFILE interfaces create-vlan $node vlan=$1 parent=$interface
+ vlanid=$(maas $PROFILE subnets read | jq '.[].vlan | select(.vid=='$1')'.id)
+ fabricid=`maas $PROFILE subnets read | jq '.[].vlan | select(.vid=='$1')'.fabric_id`
+ interface=`maas $PROFILE interfaces read $node | jq '.[] | select(.vlan.fabric_id=='$fabricid')'.id`
+ maas $PROFILE interfaces create-vlan $node vlan=$vlanid parent=$interface || true
done
}
juju add-cloud $cloudname maas-cloud.yaml --replace
}
-
#
# VLAN customization
#
#
# Enable MAAS nodes interfaces
#
+API_KEY=`sudo maas-region apikey --username=ubuntu`
+maas login $PROFILE $API_SERVERMAAS $API_KEY
+
+if [ -e ./labconfig.json ]; then
+ # We will configure all node, so we need the qty, and loop on it
+ NODE_QTY=$(cat labconfig.json | jq --raw-output '.lab.racks[0].nodes[]'.name | wc -l)
+ NODE_QTY=$((NODE_QTY-1))
+ for NODE_ID in $(seq 0 $NODE_QTY); do
+ # Get the NAME/SYS_ID of this node
+ NODE_NAME=$(cat labconfig.json | jq --raw-output ".lab.racks[0].nodes[$NODE_ID].name")
+ NODE_SYS_ID=$(maas $PROFILE nodes read | jq -r ".[] | select(.hostname==\"$NODE_NAME\")".system_id)
+ echo ">>> Configuring node $NODE_NAME [$NODE_ID][$NODE_SYS_ID]"
+ # Recover the network interfaces list and configure each one
+ # with sorting the list, we have hardware interface first, than the vlan interfaces
+ IF_LIST=$(cat labconfig.json | jq --raw-output ".lab.racks[0].nodes[$NODE_ID].nics[] ".ifname )
+ for IF_NAME in $IF_LIST; do
+ # get the space of the interface
+ IF_SPACE=$(cat labconfig.json | jq --raw-output ".lab.racks[0].nodes[$NODE_ID].nics[] | select(.ifname==\"$IF_NAME\") ".spaces[])
+ case "$IF_SPACE" in
+ 'data') SUBNET_CIDR=$SUBNETDATA_CIDR; IF_MODE='AUTO' ;;
+ 'public') SUBNET_CIDR=$SUBNETPUB_CIDR; IF_MODE='AUTO' ;;
+ 'storage') SUBNET_CIDR=$SUBNETSTOR_CIDR; IF_MODE='AUTO' ;;
+ 'floating') SUBNET_CIDR=$SUBNETFLOAT_CIDR; IF_MODE='link_up' ;;
+ *) SUBNET_CIDR='null'; IF_MODE='null'; echo " >>> Unknown SPACE" ;;
+ esac
+ echo " >>> Configuring interface $IF_NAME [$IF_SPACE][$SUBNET_CIDR]"
+
+ # if we have a vlan parameter in the space config
+ IF_VLAN=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"$IF_SPACE\")".vlan)
+ if ([ -z $IF_VLAN ] && [ $IF_NAME =~ \. ]); then
+ # We have no vlan specified on spaces, but we have a vlan subinterface
+ IF_VLAN = ${IF_NAME##*.}; fi
+
+ # in case of interface renaming
+ IF_NEWNAME=$IF_NAME
+
+ if ([ $IF_NEWNAME ] && [ "$IF_NEWNAME" != "null" ]); then
+ # rename interface if needed
+ IF_MACLOWER=$( cat labconfig.json | jq ".lab.racks[0].nodes[$NODE_ID].nics[] | select(.ifname==\"$IF_NEWNAME\")".mac[0])
+ IF_MAC=(${IF_MACLOWER,,})
+ IF_ID=$( maas ubuntu interfaces read $NODE_SYS_ID | jq ".[] | select(.mac_address==$IF_MAC)".id)
+ if ([ $IF_ID ] && [ "$IF_ID" != "null" ]); then
+ maas $PROFILE interface update $NODE_SYS_ID $IF_ID name=$IF_NEWNAME
+ IF_NAME=$IF_NEWNAME
+ fi
+ fi
-#read interface needed in Auto mode and enable it. Will be rmeoved once auto enablement will be implemented in the maas-deployer.
+ # In case of a VLAN interface
+ if ([ $IF_VLAN ] && [ "$IF_VLAN" != "null" ]); then
+ echo " >>> Configuring VLAN $IF_VLAN"
+ VLANID=$(maas $PROFILE subnets read | jq ".[].vlan | select(.vid==$IF_VLAN)".id)
+ FABRICID=$(maas $PROFILE subnets read | jq ".[].vlan | select(.vid==$IF_VLAN)".fabric_id)
+ INTERFACE=$(maas $PROFILE interfaces read $NODE_SYS_ID | jq ".[] | select(.vlan.fabric_id==$FABRICID)".id)
+ if [[ -z $INTERFACE ]]; then
+ # parent interface is not set because it does not have a SUBNET_CIDR
+ PARENT_VLANID=$(maas $PROFILE fabrics read | jq ".[].vlans[] | select(.fabric_id==$FABRICID and .name==\"untagged\")".id)
+ # If we need to rename the interface, use new interface name
+ if ([ $IF_NEWNAME ] && [ "$IF_NEWNAME" != "null" ]); then
+ PARENT_IF_NAME=${IF_NEWNAME%%.*}
+ IF_NAME=$IF_NEWNAME
+ else
+ PARENT_IF_NAME=${IF_NAME%%.*}
+ fi
+ # We set the physical interface to the targeted fabric
+ maas $PROFILE interface update $NODE_SYS_ID $PARENT_IF_NAME vlan=$PARENT_VLANID
+ sleep 2
+ INTERFACE=$(maas $PROFILE interfaces read $NODE_SYS_ID | jq ".[] | select(.vlan.fabric_id==$FABRICID)".id)
+ fi
+ maas $PROFILE interfaces create-vlan $NODE_SYS_ID vlan=$VLANID parent=$INTERFACE || true
+ fi
+ # Configure the interface
+ if ([ $SUBNET_CIDR ] && [ "$SUBNET_CIDR" != "null" ]); then
+ VLANID=$(maas $PROFILE subnet read $SUBNET_CIDR | jq -r '.vlan.id')
+ if !([ $IF_VLAN ] && [ "$IF_VLAN" != "null" ]); then
+ # If this interface is not a VLAN (done withe create-vlan)
+ maas $PROFILE interface update $NODE_SYS_ID $IF_NAME vlan=$VLANID
+ fi
+ maas $PROFILE interface link-subnet $NODE_SYS_ID $IF_NAME mode=$IF_MODE subnet=$SUBNET_CIDR || true
+ sleep 2
+ else
+ echo " >>> Not configuring, we have an empty Subnet CIDR"
+ fi
-if [ -e ./deployconfig.yaml ]; then
- enableiflist=`grep "interface-enable" deployconfig.yaml | cut -d ' ' -f 4 `
- datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
- stornet=`grep "storageNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
- pubnet=`grep "publicNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
-
- # split EXTERNAL_NETWORK=first ip;last ip; gateway;network
-
- if [ "$datanet" != "''" ]; then
- EXTNET=(${enableiflist//,/ })
- i="0"
- while [ ! -z "${EXTNET[i]}" ];
- do
- enableautomode ${EXTNET[i]} AUTO $datanet || true
- i=$[$i+1]
- done
-
- fi
- if [ "$stornet" != "''" ]; then
- EXTNET=(${enableiflist//,/ })
- i="0"
- while [ ! -z "${EXTNET[i]}" ];
- do
- enableautomode ${EXTNET[i]} AUTO $stornet || true
- i=$[$i+1]
- done
- fi
- if [ "$pubnet" != "''" ]; then
- EXTNET=(${enableiflist//,/ })
- i="0"
- while [ ! -z "${EXTNET[i]}" ];
- do
- enableautomode ${EXTNET[i]} AUTO $pubnet || true
- i=$[$i+1]
- done
- fi
+ done
+ done
fi
-
# Add the cloud and controller credentials for MAAS for that lab.
jujuver=`juju --version`