-sudo apt-get install bridge-utils openssh-server bzr git virtinst qemu-kvm libvirt-bin juju \
- maas maas-region-controller python-pip python-psutil python-openstackclient \
+sudo apt-get install bridge-utils openssh-server bzr git virtinst qemu-kvm libvirt-bin \
+ maas maas-region-controller juju python-pip python-psutil python-openstackclient \
-sudo pip install --upgrade pip
+#sudo apt-get install snap -y
+#sudo snap install maas --classic
+#sudo snap install juju --classic
+
+sudo -H pip install --upgrade pip
#first parameter should be custom and second should be either
# absolute location of file (including file name) or url of the
#first parameter should be custom and second should be either
# absolute location of file (including file name) or url of the
# define the pool and try to start even though its already exist.
# For fresh install this may or may not there.
# define the pool and try to start even though its already exist.
# For fresh install this may or may not there.
-sudo adduser $USER libvirtd
+#some system i am seeing libvirt and some have libvirtd looks like libvirt-bin is
+#keep switching so lets try both.
+
+sudo adduser $USER libvirtd || true
+sudo adduser $USER libvirt || true
sudo virsh pool-define-as default --type dir --target /var/lib/libvirt/images/ || true
sudo virsh pool-start default || true
sudo virsh pool-autostart default || true
sudo virsh pool-define-as default --type dir --target /var/lib/libvirt/images/ || true
sudo virsh pool-start default || true
sudo virsh pool-autostart default || true
# In case of virtual install set network
if [ "$virtinstall" -eq 1 ]; then
sudo virsh net-dumpxml default > default-net-org.xml
# In case of virtual install set network
if [ "$virtinstall" -eq 1 ]; then
sudo virsh net-dumpxml default > default-net-org.xml
- sudo sed -i '/dhcp/d' default-net-org.xml
- sudo sed -i '/range/d' default-net-org.xml
- sudo virsh net-define default-net-org.xml
+ sed -i '/dhcp/d' default-net-org.xml
+ sed -i '/range/d' default-net-org.xml
# To avoid problem between apiclient/maas_client and apiclient from google
# we remove the package google-api-python-client from yardstick installer
# To avoid problem between apiclient/maas_client and apiclient from google
# we remove the package google-api-python-client from yardstick installer
*) JUJU_SPACE='default'; DHCP='OFF'; echo " >>> Unknown SPACE" ;;
esac
JUJU_SPACE_ID=$(maas $PROFILE spaces read | jq -r ".[] | select(.name==\"$JUJU_SPACE\")".id)
*) JUJU_SPACE='default'; DHCP='OFF'; echo " >>> Unknown SPACE" ;;
esac
JUJU_SPACE_ID=$(maas $PROFILE spaces read | jq -r ".[] | select(.name==\"$JUJU_SPACE\")".id)
- if ([ $NET_FABRIC_NAME ] && [ $NET_FABRIC_NAME != "null" ]); then
- maas $PROFILE subnet update $SPACE_CIDR space=$JUJU_SPACE_ID
+ JUJU_VLAN_VID=$(maas $PROFILE subnets read | jq -r ".[] | select(.name==\"$SPACE_CIDR\")".vlan.vid)
+ NET_FABRIC_ID=$(maas $PROFILE fabric read $NET_FABRIC_NAME | jq -r ".id")
+ if ([ $NET_FABRIC_ID ] && [ $NET_FABRIC_ID != "null" ]); then
+ if ([ $JUJU_VLAN_VID ] && [ $JUJU_VLAN_VID != "null" ]); then
+ maas $PROFILE vlan update $NET_FABRIC_ID $JUJU_VLAN_VID space=$JUJU_SPACE_ID
+ fi
fi
if ([ $type == "admin" ]); then
# If we have a network, we create it
fi
if ([ $type == "admin" ]); then
# If we have a network, we create it
# make sure there is no machine entry in maas
for m in $(maas $PROFILE machines read | jq -r '.[].system_id')
do
# make sure there is no machine entry in maas
for m in $(maas $PROFILE machines read | jq -r '.[].system_id')
do
if [ "$virtinstall" -eq 1 ]; then
netw=" --network bridge=virbr0,model=virtio"
if [ "$virtinstall" -eq 1 ]; then
netw=" --network bridge=virbr0,model=virtio"
# Get the bridge hosting the remote virsh
brid=$(ssh $VIRSHHOST "ip a l | grep $VIRSHHOST | perl -pe 's/.* (.*)\$/\$1/g'")
netw=" --network bridge=$brid,model=virtio"
# Get the bridge hosting the remote virsh
brid=$(ssh $VIRSHHOST "ip a l | grep $VIRSHHOST | perl -pe 's/.* (.*)\$/\$1/g'")
netw=" --network bridge=$brid,model=virtio"
--disk size=120,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
$netw $netw --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee $NODE_NAME
nodemac=`grep "mac address" $NODE_NAME | head -1 | cut -d '"' -f 2`
--disk size=120,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
$netw $netw --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee $NODE_NAME
nodemac=`grep "mac address" $NODE_NAME | head -1 | cut -d '"' -f 2`
rm -f $NODE_NAME
maas $PROFILE machines create autodetect_nodegroup='yes' name=$NODE_NAME \
tags='control compute' hostname=$NODE_NAME power_type='virsh' mac_addresses=$nodemac \
rm -f $NODE_NAME
maas $PROFILE machines create autodetect_nodegroup='yes' name=$NODE_NAME \
tags='control compute' hostname=$NODE_NAME power_type='virsh' mac_addresses=$nodemac \
units=$(($units - 1));
NODE_NAME=`cat labconfig.json | jq ".lab.racks[].nodes[$units].name" | cut -d \" -f 2 `
MAC_ADDRESS=`cat labconfig.json | jq ".lab.racks[].nodes[$units].nics[] | select(.spaces[]==\"admin\").mac"[0] | cut -d \" -f 2 `
units=$(($units - 1));
NODE_NAME=`cat labconfig.json | jq ".lab.racks[].nodes[$units].name" | cut -d \" -f 2 `
MAC_ADDRESS=`cat labconfig.json | jq ".lab.racks[].nodes[$units].nics[] | select(.spaces[]==\"admin\").mac"[0] | cut -d \" -f 2 `
POWER_TYPE=`cat labconfig.json | jq ".lab.racks[].nodes[$units].power.type" | cut -d \" -f 2 `
POWER_IP=`cat labconfig.json | jq ".lab.racks[].nodes[$units].power.address" | cut -d \" -f 2 `
POWER_USER=`cat labconfig.json | jq ".lab.racks[].nodes[$units].power.user" | cut -d \" -f 2 `
POWER_TYPE=`cat labconfig.json | jq ".lab.racks[].nodes[$units].power.type" | cut -d \" -f 2 `
POWER_IP=`cat labconfig.json | jq ".lab.racks[].nodes[$units].power.address" | cut -d \" -f 2 `
POWER_USER=`cat labconfig.json | jq ".lab.racks[].nodes[$units].power.user" | cut -d \" -f 2 `
maas $PROFILE machines create autodetect_nodegroup='yes' name=$NODE_NAME \
hostname=$NODE_NAME power_type=$POWER_TYPE power_parameters_power_address=$POWER_IP \
power_parameters_power_user=$POWER_USER power_parameters_power_pass=$POWER_PASS mac_addresses=$MAC_ADDRESS \
maas $PROFILE machines create autodetect_nodegroup='yes' name=$NODE_NAME \
hostname=$NODE_NAME power_type=$POWER_TYPE power_parameters_power_address=$POWER_IP \
power_parameters_power_user=$POWER_USER power_parameters_power_pass=$POWER_PASS mac_addresses=$MAC_ADDRESS \
# make sure nodes are added into MAAS and none of them is in commisoning state
while [ "$(maas $PROFILE nodes read | grep Commissioning )" ];
do
# make sure nodes are added into MAAS and none of them is in commisoning state
while [ "$(maas $PROFILE nodes read | grep Commissioning )" ];
do
# rename interface if needed
IF_MACLOWER=$( cat labconfig.json | jq ".lab.racks[0].nodes[$NODE_ID].nics[] | select(.ifname==\"$IF_NEWNAME\")".mac[0])
IF_MAC=(${IF_MACLOWER,,})
# rename interface if needed
IF_MACLOWER=$( cat labconfig.json | jq ".lab.racks[0].nodes[$NODE_ID].nics[] | select(.ifname==\"$IF_NEWNAME\")".mac[0])
IF_MAC=(${IF_MACLOWER,,})
- IF_ID=$( maas ubuntu interfaces read $NODE_SYS_ID | jq ".[] | select(.mac_address==$IF_MAC)".id)
- maas $PROFILE interface update $NODE_SYS_ID $IF_ID name=$IF_NEWNAME
+ IF_ID=$( maas $PROFILE interfaces read $NODE_SYS_ID | jq ".[] | select(.mac_address==$IF_MAC)".id)
+ if ([ $IF_ID ] && [ "$IF_ID" != "null" ]); then
+ maas $PROFILE interface update $NODE_SYS_ID $IF_ID name=$IF_NEWNAME
+ fi
fi
# Configure the interface
if ([ $SUBNET_CIDR ] && [ "$SUBNET_CIDR" != "null" ]); then
VLANID=$(maas $PROFILE subnet read $SUBNET_CIDR | jq -r '.vlan.id')
if !([ $IF_VLAN ] && [ "$IF_VLAN" != "null" ]); then
# If this interface is not a VLAN (done withe create-vlan)
fi
# Configure the interface
if ([ $SUBNET_CIDR ] && [ "$SUBNET_CIDR" != "null" ]); then
VLANID=$(maas $PROFILE subnet read $SUBNET_CIDR | jq -r '.vlan.id')
if !([ $IF_VLAN ] && [ "$IF_VLAN" != "null" ]); then
# If this interface is not a VLAN (done withe create-vlan)