echo "====== prepare cluster and pxe ======"
ssh $SSH_PARAS $DAISY_IP "python ${REMOTE_SPACE}/deploy/tempest.py --dha $DHA --network $NETWORK --cluster 'yes'"
+
+function get_mac_addresses_for_virtual()
+{
+ tmpfile=$(mktemp XXXXXXXX.yml)
+ cp $DHA_CONF $tmpfile
+
+ for ((i=0;i<${#VM_MULTINODE[@]};i++));do
+ name=${VM_MULTINODE[$i]}
+ macs=$(virsh dumpxml $name | grep "<mac" | awk -F "'" '{print $2}' | tr "\n" " ")
+ line=$(awk "BEGIN{}(/name/&&/controller01/){print NR}" $tmpfile)
+ sed -i "${line}a\ mac_addresses:" $tmpfile
+ for mac in $macs; do
+ line=$[ line + 1 ]
+ sed -i "${line}a\ - \'$mac\'" $tmpfile
+ done
+ done
+ scp -q $tmpfile root@$DAISY_IP:$DHA
+ rm $tmpfile
+}
+
+
echo "====== create and find node ======"
if [ $IS_BARE == 0 ];then
if [ $TARGET_HOSTS_NUM == 1 ];then
done
fi
sleep 20
+ if [ $TARGET_HOSTS_NUM -ne 1 ]; then
+ get_mac_addresses_for_virtual
+ fi
else
for i in $(seq 106 110); do
ipmitool -I lanplus -H 192.168.1.$i -U zteroot -P superuser -R 1 chassis bootdev pxe
import os
import paramiko
import scp
+import tempfile
import time
+import yaml
from utils import (
WORKSPACE,
net_file=path_join(self.remote_dir, self.net_file_name))
self.ssh_run(cmd, check=True)
+ def copy_new_deploy_config(self, data):
+ (dummy, conf_file) = tempfile.mkstemp()
+ with open(conf_file, 'w') as fh:
+ fh.write(yaml.safe_dump(data))
+ fh.flush()
+ self.scp_put(conf_file, path_join(self.remote_dir, self.deploy_file_name))
+
def prepare_host_and_pxe(self):
LI('Prepare host and PXE')
cmd = "python {script} --dha {deploy_file} --network {net_file} --host \'yes\' --isbare {is_bare} --scenario {scenario}".format(
reboot_vm,
delete_vm_and_disk,
create_virtual_network,
- delete_virtual_network
+ delete_virtual_network,
+ get_vm_mac_addresses
)
from utils import (
WORKSPACE,
create_virtual_disk(ceph_disk_file, ceph_size)
disks.append(ceph_disk_file)
- create_vm(template, name, disks)
+ return create_vm(template, name, disks)
def create_nodes(self):
# TODO: support virtNetTemplatePath in deploy.yml
self._daisy_keepalived_net = net_name
for node in self.deploy_struct['hosts']:
- self.create_virtual_node(node)
+ domain = self.create_virtual_node(node)
+ node['mac_addresses'] = get_vm_mac_addresses(domain)
time.sleep(20)
def reboot_nodes(self, boot_devs=None):
def deploy(self, deploy_file, net_file):
self.server.prepare_cluster(deploy_file, net_file)
self.create_nodes()
+ self.server.copy_new_deploy_config(self.deploy_struct)
self.server.prepare_host_and_pxe()
LI('Begin Daisy virtual-deploy os and openstack')
self.reboot_nodes()
return map
+@decorator_mk('hosts')
+def mac_address(host=None):
+ mac_addresses = host.get('mac_addresses', [])
+ map = {host['name']: mac_addresses}
+ return map
+
+
def network_config_parse(s, dha_file):
network_map = network(s)
vip = s.get('internal_vip')
data = init(dha_file)
ceph_disk_name = data.get('ceph_disk_name')
hosts_name = dha_config_parse(data, dha_file)
+ mac_address_map = mac_address(data)
data = init(network_file)
network_map, vip, interface_map = network_config_parse(data, network_file)
- return interface_map, hosts_name, network_map, vip, ceph_disk_name
+ return interface_map, hosts_name, network_map, vip, ceph_disk_name, mac_address_map
def parse(conf, args):
if domain.create() < 0:
err_exit('Failed to start VM %s' % template)
domain.setAutostart(1)
+ conn.close()
LI('VM %s is started' % domain.name())
- return
+ return domain
def reboot_vm(vm_name, boot_devs=None):
vm.destroy()
LI('Destroy VM %s' % vm_name)
- # root = ET.fromstring(vm.XMLDesc())
- temp_file = path_join(WORKSPACE, 'tmp.xml')
- commands.getoutput('virsh dumpxml %s > %s' % (vm_name, temp_file))
- tree = ET.parse(temp_file)
- root = tree.getroot()
+ root = ET.fromstring(vm.XMLDesc())
LI('Modify the boot order %s' % boot_devs)
modify_vm_boot_order(root, boot_devs)
- tree.write(temp_file)
LI('Re-define and start the VM %s' % vm_name)
vm.undefine()
- vm = conn.defineXML(commands.getoutput('cat %s' % temp_file))
+ vm = conn.defineXML(ET.tostring(root))
vm.create()
vm.setAutostart(1)
else:
conn.close()
if not result:
LI('Network %s is not found' % name)
+
+
+def get_vm_mac_addresses(domain):
+ root = ET.fromstring(domain.XMLDesc())
+ macs = root.findall('./devices/interface/mac')
+ return [mac.attrib['address'] for mac in macs]
print("get config...")
conf = cfg.ConfigOpts()
parse(conf, sys.argv[1:])
- host_interface_map, hosts_name, network_map, vip, ceph_disk_name = \
+ host_interface_map, hosts_name, network_map, vip, ceph_disk_name, mac_address_map = \
get_conf.config(conf['dha'], conf['network'])
if conf['cluster'] and conf['cluster'] == 'yes':
print("add cluster...")
hosts_info = get_hosts()
cluster_info = get_cluster()
cluster_id = cluster_info.id
- add_hosts_interface(cluster_id, hosts_info, hosts_name,
+ add_hosts_interface(cluster_id, hosts_info, hosts_name, mac_address_map,
host_interface_map, vip, isbare)
if len(hosts_name) == 1:
protocol_type = 'LVM'
return cluster_info
-def add_hosts_interface(cluster_id, hosts_info, hosts_name, host_interface_map,
+def add_hosts_interface(cluster_id, hosts_info, hosts_name, mac_address_map,
+ host_interface_map,
vip, isbare):
for host_name, host in zip(hosts_name, hosts_info):
host = host.to_dict()
if interface_name in host_interface_map:
interface['assigned_networks'] = \
host_interface_map[interface_name]
+ if mac_address_map:
+ for nodename in mac_address_map:
+ if interface['mac'] in mac_address_map[nodename]:
+ host_name = nodename
pathlist = os.listdir(iso_path)
for filename in pathlist:
if filename.endswith('iso'):
<domain type='kvm' id='4'>
<name>controller</name>
- <memory unit='KiB'>8388608</memory>
- <currentMemory unit='KiB'>8388608</currentMemory>
+ <memory unit='KiB'>12582912</memory>
+ <currentMemory unit='KiB'>12582912</currentMemory>
<vcpu placement='static'>4</vcpu>
<resource>
<partition>/machine</partition>