implement deploy on virthal pod 53/25153/31
authorYao Lu <lu.yao135@zte.com.cn>
Tue, 29 Nov 2016 14:39:37 +0000 (22:39 +0800)
committerzhouya <zhou.ya@zte.com.cn>
Tue, 20 Dec 2016 07:27:28 +0000 (15:27 +0800)
Change-Id: I600e21f0d36f37c75cac4ace5f7225e32ab93d56
Signed-off-by: Yao Lu <lu.yao135@zte.com.cn>
12 files changed:
ci/deploy/deploy.sh
deploy/check_openstack_progress.sh [new file with mode: 0755]
deploy/check_os_progress.sh [new file with mode: 0755]
deploy/config/vm_environment/zte-virtual1/deploy.yml
deploy/config/vm_environment/zte-virtual1/network.yml
deploy/daisy.conf [new file with mode: 0644]
deploy/get_conf.py
deploy/get_para_from_deploy.py [new file with mode: 0755]
deploy/tempest.py
deploy/trustme.sh [new file with mode: 0755]
templates/virtual_environment/networks/daisy.xml [moved from templates/virtual_environment/networks/daisy1.xml with 100% similarity]
templates/virtual_environment/networks/os-all_in_one.xml [moved from templates/virtual_environment/networks/daisy2.xml with 100% similarity]

index 6d7addd..a2d1849 100755 (executable)
 ##############################################################################
 #daisy host discover
 ######exit before finish test#######
-exit 0
+exit 0
 
 ##########TODO after test##########
-DHA=$1
-NETWORK=$2
-tempest_path=$WORKSPACE/deploy
+DHA=$WORKSPACE/$1
+NETWORK=$WORKSPACE/$2
+deploy_path=$WORKSPACE/deploy
+create_qcow2_path=$WORKSPACE/tools
+net_daisy1=$WORKSPACE/templates/virtual_environment/networks/daisy.xml
+net_daisy2=$WORKSPACE/templates/virtual_environment/networks/os-all_in_one.xml
+pod_daisy=$WORKSPACE/templates/virtual_environment/vms/daisy.xml
+pod_all_in_one=$WORKSPACE/templates/virtual_environment/vms/all_in_one.xml
 
-echo "====== clean && install daisy==========="
-.$WORKSPACE/opnfv.bin  clean
-rc=$?
-if [ $rc -ne 0 ]; then
-    echo "daisy clean failed"
-    exit 1
-else
-    echo "daisy clean successfully"
-fi
-.$WORKSPACE/opnfv.bin  install
+parameter_from_deploy=`python $WORKSPACE/deploy/get_para_from_deploy.py --dha $DHA`
+
+daisyserver_size=`echo $parameter_from_deploy | cut -d " " -f 1`
+controller_node_size=`echo $parameter_from_deploy | cut -d " " -f 2`
+compute_node_size=`echo $parameter_from_deploy | cut -d " " -f 3`
+daisy_passwd=`echo $parameter_from_deploy | cut -d " " -f 4`
+daisy_ip=`echo $parameter_from_deploy | cut -d " " -f 5`
+daisy_gateway=`echo $parameter_from_deploy | cut -d " " -f 6`
+
+function execute_on_jumpserver
+{
+    ssh $1 -o UserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no $2
+}
+
+function create_node
+{
+    virsh net-define $1
+    virsh net-autostart $2
+    virsh net-start $2
+    virsh define $3
+    virsh start $4
+}
+
+#update key = value config option in an conf or ini file
+function update_config
+{
+    local file=$1
+    local key=$2
+    local value=$3
+
+    [ ! -e $file ] && return
+
+    #echo update key $key to value $value in file $file ...
+    local exist=`grep "^[[:space:]]*[^#]" $file | grep -c "$key[[:space:]]*=[[:space:]]*.*"`
+    #action:If a line is a comment, the beginning of the first character must be a #!!!
+    local comment=`grep -c "^[[:space:]]*#[[:space:]]*$key[[:space:]]*=[[:space:]]*.*"  $file`
+
+    if [[ $value == "#" ]];then
+        if [ $exist -gt 0 ];then
+            sed  -i "/^[^#]/s/$key[[:space:]]*=/\#$key=/" $file
+        fi
+        return
+    fi
+
+    if [ $exist -gt 0 ];then
+        #if there have been a effective configuration line did not comment, update value directly
+        sed  -i "/^[^#]/s#$key[[:space:]]*=.*#$key=$value#" $file
+
+    elif [ $comment -gt 0 ];then
+        #if there is a configuration line has been commented out, then remove the comments, update the value
+        sed -i "s@^[[:space:]]*#[[:space:]]*$key[[:space:]]*=[[:space:]]*.*@$key=$value@" $file
+    else
+        #add effective configuration line at the end
+        echo "$key=$value" >> $file
+    fi
+}
+
+echo "=======create daisy node================"
+$create_qcow2_path/daisy-img-modify.sh -c $create_qcow2_path/centos-img-modify.sh -a $daisy_ip -g $daisy_gateway -s $daisyserver_size
+#qemu-img resize centos7.qcow2 100G
+create_node $net_daisy1 daisy1 $pod_daisy daisy
+sleep 20
+
+echo "====== install daisy==========="
+$deploy_path/trustme.sh $daisy_ip $daisy_passwd
+scp -r $WORKSPACE root@$daisy_ip:/home
+
+execute_on_jumpserver $daisy_ip "mkdir -p /home/daisy_install"
+update_config $WORKSPACE/deploy/daisy.conf daisy_management_ip $daisy_ip
+scp $WORKSPACE/deploy/daisy.conf root@$daisy_ip:/home/daisy_install
+execute_on_jumpserver $daisy_ip "$WORKSPACE/opnfv.bin  install"
 rc=$?
 if [ $rc -ne 0 ]; then
     echo "daisy install failed"
@@ -35,29 +101,28 @@ else
     echo "daisy install successfully"
 fi
 
-source ~/daisyrc_admin
-
-echo "======prepare install openstack==========="
-python $tempest_path/tempest.py --dha $DHA --network $NETWORK
-
-echo "======daisy install kolla(openstack)==========="
-cluster_id=`daisy cluster-list | awk -F "|" '{print $2}' | sed -n '4p'`
-daisy install $cluster_id
-echo "check installing process..."
-var=1
-while [ $var -eq 1 ]; do
-    echo "loop for judge openstack installing  progress..."
-    openstack_install_active=`daisy host-list --cluster-id $cluster_id | awk -F "|" '{print $12}' | grep -c "active" `
-    openstack_install_failed=`daisy host-list --cluster-id $cluster_id | awk -F "|" '{print $12}' | grep -c "install-failed" `
-    if [ $openstack_install_active -eq 1 ]; then
-        echo "openstack installing successful ..."
-        break
-    elif [ $openstack_install_failed -gt 0 ]; then
-        echo "openstack installing have failed..."
-        tail -n 200 /var/log/daisy/kolla_$cluster_id*
-        exit 1
-    else
-        echo " openstack in installing , please waiting ..."
-    fi
-done
+echo "====== add relate config of kolla==========="
+execute_on_jumpserver $daisy_ip "mkdir -p /etc/kolla/config/nova"
+execute_on_jumpserver $daisy_ip "echo -e "[libvirt]\nvirt_type=qemu" > /etc/kolla/config/nova/nova-compute.conf"
+
+echo "===prepare cluster and pxe==="
+execute_on_jumpserver $daisy_ip "python $WORKSPACE/deploy/tempest.py --dha $DHA --network $NETWORK --cluster "yes""
+
+echo "=====create all-in-one node======"
+qemu-img create -f qcow2 $WORKSPACE/../qemu/vms/all_in_one.qcow2 200G
+create_node $net_daisy2 daisy2 $pod_all_in_one all_in_one
+sleep 20
+
+echo "======prepare host and pxe==========="
+execute_on_jumpserver $daisy_ip "python $WORKSPACE/deploy/tempest.py  --dha $DHA --network $NETWORK --host "yes""
+
+echo "======daisy deploy os and openstack==========="
+virsh destroy all_in_one
+virsh start all_in_one
+
+echo "===========check install progress==========="
+execute_on_jumpserver $daisy_ip "$WORKSPACE/deploy/check_os_progress.sh"
+virsh reboot all_in_one
+execute_on_jumpserver $daisy_ip "$WORKSPACE/deploy/check_openstack_progress.sh"
+
 exit 0
diff --git a/deploy/check_openstack_progress.sh b/deploy/check_openstack_progress.sh
new file mode 100755 (executable)
index 0000000..c1c88eb
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/bash
+source /root/daisyrc_admin
+echo "check openstack installing progress..."
+cluster_id=`daisy cluster-list | awk -F "|" '{print $2}' | sed -n '4p'`
+while true; do
+    openstack_install_active=`daisy host-list --cluster-id $cluster_id | awk -F "|" '{print $12}' | grep -c "active" `
+    openstack_install_failed=`daisy host-list --cluster-id $cluster_id | awk -F "|" '{print $12}' | grep -c "install-failed" `
+    if [ $openstack_install_active -eq 1 ]; then
+        echo "openstack installing successful ..."
+        break
+    elif [ $openstack_install_failed -gt 0 ]; then
+        echo "openstack installing have failed..."
+        tail -n 200 /var/log/daisy/kolla_$cluster_id*
+        exit 1
+    else
+        progress=`daisy host-list --cluster-id $cluster_id |grep DISCOVERY_SUCCESSFUL |awk -F "|" '{print $11}'|sed s/[[:space:]]//g`
+        echo " openstack in installing , progress is $progress%"
+        sleep 30
+    fi
+done
diff --git a/deploy/check_os_progress.sh b/deploy/check_os_progress.sh
new file mode 100755 (executable)
index 0000000..bf44fe2
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+source /root/daisyrc_admin
+cluster_id=`daisy cluster-list | awk -F "|" '{print $2}' | sed -n '4p'`
+daisy install $cluster_id --skip-pxe-ipmi true
+echo "check os installing progress..."
+while true; do
+    os_install_active=`daisy host-list --cluster-id $cluster_id | awk -F "|" '{print $8}' | grep -c "active" `
+    os_install_failed=`daisy host-list --cluster-id $cluster_id | awk -F "|" '{print $8}' | grep -c "install-failed" `
+    if [ $os_install_active -eq 1 ]; then
+        echo "os installing successful ..."
+        break
+    elif [ $os_install_failed -gt 0 ]; then
+        echo "os installing have failed..."
+        exit 1
+    else
+        progress=`daisy host-list --cluster-id $cluster_id |grep DISCOVERY_SUCCESSFUL |awk -F "|" '{print $7}'|sed s/[[:space:]]//g`
+        echo "os in installing, the progress is $progress%"
+        sleep 10
+    fi
+done
+systemctl disable dhcpd
+systemctl stop dhcpd
index e12a981..78c3201 100644 (file)
@@ -1,22 +1,12 @@
 hosts:
-- name: 'Node5'
+- name: 'all_in_one'
   roles:
     - 'CONTROLLER_LB'
     - 'COMPUTER'
-  ip: '192.168.122.152'
-  password: 'ossdbg1'
-  interface:
-    - phynic: 'ens3'
-      logic:
-        - name: 'MANAGEMENT'
-          ip: '192.168.122.152'
-        - name: 'PUBLICAPI'
-          ip:
-        - name: 'physnet1'
-          ip:
-        - name: 'STORAGE'
-          ip:
-    - phynic: 'ens8'
-      logic:
-        - name: 'EXTERNAL'
-          ip:
+disks:
+  daisy: 100G
+  controller: 100G
+  compute: 100G
+daisy_passwd: 'r00tme'
+daisy_ip: '10.20.11.2'
+daisy_gateway: '10.20.11.1'
index ea08117..3686388 100644 (file)
@@ -14,37 +14,46 @@ network-config-metadata:
   created: 'Mon Oct 31 2016'
   comment:
 networks:
-  - cidr: '192.168.122.0/24'
-    gateway: '192.168.122.1'
+  - cidr: '10.20.11.0/24'
+    gateway: '10.20.11.1'
     ip_ranges:
-    - 'start': '192.168.122.1'
-      'end': '192.168.122.254'
+    - 'start': '10.20.11.3'
+      'end': '10.20.11.10'
     name: 'MANAGEMENT'
-  - cidr: '192.168.122.0/24'
-    gateway: '192.168.122.1'
+  - cidr: '10.20.11.0/24'
+    gateway: '10.20.11.1'
     ip_ranges:
-    - start: '192.168.122.1'
-      end: '192.168.122.254'
+    - start: '10.20.11.3'
+      end: '10.20.11.10'
     name: 'STORAGE'
-  - cidr: '192.168.2.0/24'
-    gateway: '192.168.2.1'
+  - cidr: '172.10.101.0/24'
+    gateway: '172.10.101.0'
     ip_ranges:
-    - 'start': '192.168.2.1'
-      'end': '192.168.2.254'
+    - 'start': '172.10.101.1'
+      'end': '172.10.101.10'
     'name': 'EXTERNAL'
-  - cidr: '192.168.122.0/24'
-    gateway: '192.168.122.1'
+  - cidr: '10.20.11.0/24'
+    gateway: '10.20.11.1'
     ip_ranges:
-    - 'start': '192.168.122.1'
-      'end': '192.168.122.254'
+    - 'start': '10.20.11.3'
+      'end': '10.20.11.10'
     name: 'PUBLICAPI'
-  - cidr: '192.168.122.0/24'
-    gateway: '192.168.122.1'
+  - cidr: '10.20.11.0/24'
+    gateway: '10.20.11.1'
     ip_ranges:
-    - 'start': '192.168.122.1'
-      'end': '192.168.122.254'
-    name: 'physnet1'
-
-internal_vip: '192.168.122.144'
-
-public_vip: '192.168.122.144'
+    - 'start': '10.20.11.3'
+      'end': '10.20.11.10'
+    name: 'TENANT'
+interfaces:
+  - name: 'EXTERNAL'
+    interface: 'ens8'
+  - name: 'MANAGEMENT'
+    interface: 'ens3'
+  - name: 'PUBLICAPI'
+    interface: 'ens3'
+  - name: 'STORAGE'
+    interface: 'ens3'
+  - name: 'TENANT'
+    interface: 'ens3'
+internal_vip: '10.20.11.11'
+public_vip: '10.20.11.11'
diff --git a/deploy/daisy.conf b/deploy/daisy.conf
new file mode 100644 (file)
index 0000000..f8ce053
--- /dev/null
@@ -0,0 +1,34 @@
+[DEFAULT]
+#The mangement ip of daisy
+#When Daisy will be installed in the virtual machine, this option is required.
+daisy_management_ip=
+
+[BACKEND]
+#Default backend types of daisy, including tecs, zenic, proton, kolla.
+#If you want to create a cluster with more than one backend,
+#all backend names should be provided for this configuration item,
+#such as, default_backend_types=tecs,zenic,proton,kolla.
+default_backend_types=kolla
+
+[OS]
+#Default os install types of daisy
+os_install_type=pxe
+
+[PXE]
+#Set to 'yes' if you want to build a PXE server, otherwise to 'no'.
+build_pxe=no
+
+#the nic name, to build a PXE server on this nic.
+eth_name=
+
+#The ip value of PXE server
+ip_address=99.99.1.5
+
+#the net mask of PXE server
+net_mask=255.255.255.0
+
+#The start value of PXE client ip range
+client_ip_begin=99.99.1.50
+
+#The end value of PXE client ip range
+client_ip_end=99.99.1.150
index 5ce9806..eaac8b1 100755 (executable)
@@ -23,7 +23,9 @@ def decorator_mk(types):
             result = {}
             for item in item_list:
                 ret = func(item)
-                if ret:
+                if ret.keys()[0] in result:
+                    result[ret.keys()[0]].append(ret.values()[0][0])
+                else:
                     result.update(ret)
             return result
         return wrapter
@@ -33,20 +35,24 @@ def decorator_mk(types):
 @decorator_mk('networks')
 def network(network=None):
     net_plane = network.get('name', '')
+    if net_plane == "TENANT":
+        net_plane = "physnet1"
     network.pop('name')
     map = {}
     map[net_plane] = network
     return map
 
 
-@decorator_mk('hosts')
-def interface(host=None):
-    interface = host.get('interface', '')
-    map = {}
-    for k in interface:
-        for v in k['logic']:
-            map[v['name']] = {'ip': v['ip'], 'phynic': k['phynic']}
-    return map
+@decorator_mk('interfaces')
+def interface(interface=None):
+    net_name = interface.get('name', '')
+    if net_name == "TENANT":
+        net_name = "physnet1"
+    interface_name = interface.get('interface', '')
+    map2 = {}
+    map = {'ip': '', 'name': net_name}
+    map2[interface_name] = [map]
+    return map2
 
 
 @decorator_mk('hosts')
@@ -70,23 +76,27 @@ def host(host=None):
 def network_config_parse(s, dha_file):
     network_map = network(s)
     vip = s.get('internal_vip')
-    return network_map, vip
+    interface_map = interface(s)
+    return network_map, vip, interface_map
 
 
 def dha_config_parse(s, dha_file):
-    host_interface_map = interface(s)
     host_role_map = role(s)
-    host_ip_passwd_map = host(s)
-    return host_interface_map, host_role_map, host_ip_passwd_map
+    hosts_name = []
+    for name in host_role_map:
+        hosts_name.append(name)
+    return hosts_name
 
 
 def config(dha_file, network_file):
     data = init(dha_file)
-    host_interface_map, host_role_map, host_ip_passwd_map = \
-        dha_config_parse(data, dha_file)
+    hosts_name = dha_config_parse(data, dha_file)
     data = init(network_file)
-    network_map, vip = network_config_parse(data, network_file)
-    for k in host_interface_map:
-        host_interface_map[k].update(network_map[k])
-    return host_interface_map, host_role_map, \
-        host_ip_passwd_map, network_map, vip
+    network_map, vip, interface_map = network_config_parse(data, network_file)
+    for interface_name in interface_map:
+        for name in interface_map[interface_name]:
+            if name.get('name', None) == 'MANAGEMENT':
+                name['ip'] = network_map.get(
+                    'MANAGEMENT', None).get(
+                    'ip_ranges', None)[0].get('start', None)
+    return interface_map, hosts_name, network_map, vip
diff --git a/deploy/get_para_from_deploy.py b/deploy/get_para_from_deploy.py
new file mode 100755 (executable)
index 0000000..c0e08c5
--- /dev/null
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2016 ZTE Coreporation and others.
+# hu.zhijiang@zte.com.cn
+# lu.yao135@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import get_conf
+from oslo_config import cfg
+import sys
+
+_CLI_OPTS = [
+    cfg.StrOpt('dha',
+               help='The dha file path'),
+]
+
+
+def parse(conf, args):
+    conf.register_cli_opts(_CLI_OPTS)
+    conf(args=args)
+
+
+def get_yml_para(dha_file):
+    data = get_conf.init(dha_file)
+    disks = data.get("disks", 0)
+    daisyserver_size = disks.get("daisy", 0)
+    controller_node_size = disks.get("controller", 0)
+    compute_node_size = disks.get("compute", 0)
+    daisy_passwd = data.get("daisy_passwd", "")
+    daisy_ip = data.get("daisy_ip", "")
+    daisy_gateway = data.get("daisy_gateway", "")
+    return daisyserver_size, controller_node_size,\
+        compute_node_size, daisy_passwd, daisy_ip, daisy_gateway
+
+
+def get_conf_from_deploy():
+    conf = cfg.ConfigOpts()
+    parse(conf, sys.argv[1:])
+    daisyserver_size, controller_node_size, compute_node_size,\
+        daisy_passwd, daisy_ip, daisy_gateway = get_yml_para(conf['dha'])
+    print daisyserver_size
+    print controller_node_size
+    print compute_node_size
+    print daisy_passwd
+    print daisy_ip
+    print daisy_gateway
+
+if __name__ == "__main__":
+    get_conf_from_deploy()
index 011f1e5..1efe159 100755 (executable)
@@ -14,12 +14,12 @@ from daisyclient.v1 import client as daisy_client
 import get_conf
 import traceback
 import time
-import subprocess
 
 daisy_version = 1.0
 daisy_endpoint = "http://127.0.0.1:19292"
 client = daisy_client.Client(version=daisy_version, endpoint=daisy_endpoint)
-
+iso_path = "/var/lib/daisy/kolla/CentOS-7-x86_64-DVD-1511.iso"
+deployment_interface = "ens3"
 cluster_name = "clustertest"
 
 _CLI_OPTS = [
@@ -27,6 +27,10 @@ _CLI_OPTS = [
                help='The dha file path'),
     cfg.StrOpt('network',
                help='The network file path'),
+    cfg.StrOpt('cluster',
+               help='Config cluster'),
+    cfg.StrOpt('host',
+               help='Config host'),
 ]
 
 
@@ -42,62 +46,56 @@ def print_bar(msg):
     print ("--------------------------------------------")
 
 
-def foo():
+def prepare_install():
     try:
         print("get config...")
         conf = cfg.ConfigOpts()
         parse(conf, sys.argv[1:])
-        host_interface_map, host_role_map, \
-            host_ip_passwd_map, network_map, vip = \
+        host_interface_map, hosts_name, network_map, vip = \
             get_conf.config(conf['dha'], conf['network'])
-        print("clean deploy host...")
-        clean_deploy_host(host_ip_passwd_map)
-        print("discover host...")
-        discover_host(host_ip_passwd_map)
-        print("add cluster...")
-        cluster_meta = {'name': cluster_name, 'description': ''}
-        clusters_info = client.clusters.add(**cluster_meta)
-        cluster_id = clusters_info.id
-        print("cluster_id=%s." % cluster_id)
-        print("update network...")
-        update_network(cluster_id, network_map)
-        print("update hosts interface...")
-        hosts_info = get_hosts()
-        add_hosts_interface(cluster_id, hosts_info, host_interface_map,
-                            host_role_map, vip)
+        if conf['cluster'] and conf['cluster'] == 'yes':
+            print("add cluster...")
+            cluster_meta = {'name': cluster_name, 'description': '',
+                            'target_systems': 'os+kolla'}
+            clusters_info = client.clusters.add(**cluster_meta)
+            cluster_id = clusters_info.id
+            print("cluster_id=%s." % cluster_id)
+            print("update network...")
+            update_network(cluster_id, network_map)
+            print("build pxe...")
+            build_pxe_for_discover(cluster_id)
+        elif conf['host'] and conf['host'] == 'yes':
+            print("discover host...")
+            discover_host(hosts_name)
+            print("update hosts interface...")
+            hosts_info = get_hosts()
+            cluster_info = get_cluster()
+            cluster_id = cluster_info.id
+            add_hosts_interface(cluster_id, hosts_info,
+                                host_interface_map, vip)
+            build_pxe_for_os(cluster_id)
     except Exception:
         print("Deploy failed!!!.%s." % traceback.format_exc())
     else:
         print_bar("Everything is done!")
 
 
-def clean_deploy_host(host_ip_passwd_map):
-    for host_ip_passwd in host_ip_passwd_map:
-        command = 'sshpass -p %s ssh %s -o UserKnownHostsFile=/dev/null \
-                  -oStrictHostKeyChecking=no \
-                  "/home/daisy/forDel/tools/cleanup-containers"' % \
-                  (host_ip_passwd['passwd'], host_ip_passwd['ip'])
-        subprocess.call(command,
-                        shell=True,
-                        stdout=open('/dev/null', 'w'),
-                        stderr=subprocess.STDOUT)
-        command = 'sshpass -p %s ssh %s -o UserKnownHostsFile=/dev/null \
-                  -oStrictHostKeyChecking=no \
-                  "/home/daisy/forDel/tools/cleanup-images"' % \
-                  (host_ip_passwd['passwd'], host_ip_passwd['ip'])
-        subprocess.call(command,
-                        shell=True,
-                        stdout=open('/dev/null', 'w'),
-                        stderr=subprocess.STDOUT)
-
-
-def discover_host(host_ip_passwd_map):
-    for host_ip_passwd in host_ip_passwd_map:
-        client.hosts.add_discover_host(**host_ip_passwd)
-        client.hosts.discover_host()
+def build_pxe_for_discover(cluster_id):
+    cluster_meta = {'cluster_id': cluster_id,
+                    'deployment_interface': deployment_interface}
+    client.install.install(**cluster_meta)
+
+
+def build_pxe_for_os(cluster_id):
+    cluster_meta = {'cluster_id': cluster_id,
+                    'pxe_only': "true"}
+    client.install.install(**cluster_meta)
+
+
+def discover_host(hosts_name):
     while True:
         hosts_info = get_hosts()
-        if len(hosts_info) == len(host_ip_passwd_map):
+        if len(hosts_info) == len(hosts_name):
             print('discover hosts success!')
             break
         else:
@@ -126,23 +124,30 @@ def get_hosts():
     return hosts_info
 
 
+def get_cluster():
+    cluster_list_generator = client.clusters.list()
+    cluster_list = [cluster for cluster in cluster_list_generator]
+    for cluster in cluster_list:
+        cluster_info = client.clusters.get(cluster.id)
+    return cluster_info
+
+
 def add_hosts_interface(cluster_id, hosts_info, host_interface_map,
-                        host_role_map, vip):
+                        vip):
     for host in hosts_info:
         host = host.to_dict()
         host['cluster'] = cluster_id
-        host_name = host['name']
         for interface in host['interfaces']:
             interface_name = interface['name']
             interface['assigned_networks'] = \
-                host_interface_map[host_name][interface_name]
+                host_interface_map[interface_name]
+        host['os_version'] = iso_path
         client.hosts.update(host['id'], **host)
         print("update role...")
-        add_host_role(cluster_id, host['id'], host['name'],
-                      host_role_map, vip)
+        add_host_role(cluster_id, host['id'], host['name'], vip)
 
 
-def add_host_role(cluster_id, host_id, host_name, host_role_map, vip):
+def add_host_role(cluster_id, host_id, host_name, vip):
     role_meta = {'filters': {'cluster_id': cluster_id}}
     role_list_generator = client.roles.list(**role_meta)
     role_list = [role for role in role_list_generator]
@@ -150,15 +155,13 @@ def add_host_role(cluster_id, host_id, host_name, host_role_map, vip):
                   role.name == "CONTROLLER_LB"][0]
     computer_role_id = [role.id for role in role_list if
                         role.name == "COMPUTER"][0]
-    if "CONTROLLER_LB" in host_role_map[host_name]:
-        role_lb_update_meta = {'nodes': [host_id],
-                               'cluster_id': cluster_id, 'vip': vip}
-        client.roles.update(lb_role_id, **role_lb_update_meta)
-    if "COMPUTER" in host_role_map[host_name]:
-        role_computer_update_meta = {'nodes': [host_id],
-                                     'cluster_id': cluster_id}
-        client.roles.update(computer_role_id, **role_computer_update_meta)
+    role_lb_update_meta = {'nodes': [host_id],
+                           'cluster_id': cluster_id, 'vip': vip}
+    client.roles.update(lb_role_id, **role_lb_update_meta)
+    role_computer_update_meta = {'nodes': [host_id],
+                                 'cluster_id': cluster_id}
+    client.roles.update(computer_role_id, **role_computer_update_meta)
 
 
 if __name__ == "__main__":
-    foo()
+    prepare_install()
diff --git a/deploy/trustme.sh b/deploy/trustme.sh
new file mode 100755 (executable)
index 0000000..eedda5c
--- /dev/null
@@ -0,0 +1,88 @@
+#!/bin/sh
+#to be trusted by other host£¬and no password needed when use ssh command
+
+#check parameters legality
+logfile=/var/log/trustme.log
+function print_log
+{
+   local promt="$1"
+   echo -e "$promt"
+   echo -e "`date -d today +"%Y-%m-%d %H:%M:%S"`  $promt" >> $logfile
+}
+ip=$1
+passwd=$2
+if [ -z $passwd ]; then
+  print_log "Usage: `basename $0` ipaddr passwd"
+  exit 1
+fi
+
+rpm -qi sshpass >/dev/null
+if [ $? != 0 ]; then
+  print_log "Please install sshpass first"
+  exit 1
+fi
+
+#ping other host
+unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"`
+if [ $unreachable -eq 1 ]; then
+  print_log "host $ip is unreachable"
+  exit 1
+fi
+
+#generate ssh pubkey
+if [ ! -e ~/.ssh/id_dsa.pub ]; then
+  print_log "generating ssh public key ..."
+  ssh-keygen -t dsa -f /root/.ssh/id_dsa -N "" <<EOF
+n
+EOF
+  if [ $? != 0 ]; then
+    print_log "ssh-keygen failed"
+    exit 1
+  fi
+fi
+
+#clear old public key
+user=`whoami`
+host=`hostname`
+keyend="$user@$host"
+print_log "my keyend = $keyend"
+cmd="sed '/$keyend$/d'  -i ~/.ssh/authorized_keys"
+print_log "clear my old pub key on $local_host ..."
+ssh-keygen -f "/root/.ssh/known_hosts" -R $ip
+if [ $? != 0 ]; then
+    print_log "delete pub key of $ip from known_hosts failed"
+    exit 1
+fi
+sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "mkdir -p ~/.ssh && touch ~/.ssh/authorized_keys"
+if [ $? != 0 ]; then
+    print_log "ssh $ip to create file authorized_keys failed"
+    exit 1
+fi
+sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "$cmd"
+if [ $? != 0 ]; then
+    print_log "ssh $ip to edit authorized_keys failed"
+    exit 1
+fi
+#copy new public key
+print_log "copy my public key to $ip ..."
+tmpfile=/tmp/`hostname`.key.pub
+sshpass -p $passwd scp -o StrictHostKeyChecking=no ~/.ssh/id_dsa.pub  $ip:$tmpfile
+if [ $? != 0 ]; then
+    print_log "scp file to $ip failed"
+    exit 1
+fi
+#copy public key to authorized_keys
+print_log "on $ip, append my public key to ~/.ssh/authorized_keys ..."
+sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "cat $tmpfile >> ~/.ssh/authorized_keys"
+if [ $? != 0 ]; then
+    print_log "ssh $ip to add public key for authorized_keys failed"
+    exit 1
+fi
+print_log "rm tmp file $ip:$tmpfile"
+sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm $tmpfile" 
+if [ $? != 0 ]; then
+    print_log "ssh $ip to delete tmp file failed"
+    exit 1
+fi
+print_log "trustme ok!"
+