add public vip for external access 81/1881/2
authorbaigk <baiguoku@huawei.com>
Tue, 22 Sep 2015 14:57:22 +0000 (22:57 +0800)
committerbaigk <baiguoku@huawei.com>
Tue, 22 Sep 2015 15:35:03 +0000 (23:35 +0800)
JIRA: COMPASS-69

Change-Id: I7c2b6a026d2fb002174aa5f0a619d9fe6982e528
Signed-off-by: baigk <baiguoku@huawei.com>
12 files changed:
deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh
deploy/adapters/ansible/roles/dashboard/tasks/main.yml
deploy/adapters/ansible/roles/dashboard/templates/ports.j2 [new file with mode: 0644]
deploy/adapters/ansible/roles/ha/files/notify.sh [deleted file]
deploy/adapters/ansible/roles/ha/tasks/main.yml
deploy/adapters/ansible/roles/ha/templates/failover.j2 [deleted file]
deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
deploy/adapters/ansible/roles/ha/templates/keepalived.conf
deploy/adapters/ansible/roles/keystone/templates/keystone_init
deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml
deploy/conf/baremetal_cluster_sh.yml [new file with mode: 0644]
deploy/host_vm.sh [deleted file]

index abe4d06..bc92bac 100644 (file)
@@ -2,5 +2,5 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin
 
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ public_vip.ip }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s
 
index 2cad117..9206fda 100644 (file)
@@ -5,6 +5,9 @@
   action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
   with_items: packages | union(packages_noarch)
 
+- name: set apache2 config
+  template: src=ports.j2 dest=/etc/apache2/ports.conf backup=yes
+
 - name: remove ubuntu theme
   action: "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent"
 
diff --git a/deploy/adapters/ansible/roles/dashboard/templates/ports.j2 b/deploy/adapters/ansible/roles/dashboard/templates/ports.j2
new file mode 100644 (file)
index 0000000..0bfa042
--- /dev/null
@@ -0,0 +1,15 @@
+# if you just change the port or add more ports here, you will likely also
+# have to change the VirtualHost statement in
+# /etc/apache2/sites-enabled/000-default.conf
+
+Listen {{ internal_ip }}:80
+
+<IfModule ssl_module>
+    Listen 443
+</IfModule>
+
+<IfModule mod_gnutls.c>
+    Listen 443
+</IfModule>
+
+# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
diff --git a/deploy/adapters/ansible/roles/ha/files/notify.sh b/deploy/adapters/ansible/roles/ha/files/notify.sh
deleted file mode 100644 (file)
index 5edffe8..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-python /usr/local/bin/failover.py $1
-mysql -uroot -e"flush hosts"
-service mysql restart
index edd5e6d..668f684 100644 (file)
 - name: copy galera_chk file
   copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777
 
-- name: copy notify file
-  copy: src=notify.sh dest=/usr/local/bin/notify.sh mode=0777
-
-- name: copy notify template file
-  template: src=failover.j2 dest=/usr/local/bin/failover.py mode=0777
-
 - name: add network service
   lineinfile: dest=/etc/services state=present
               line="mysqlchk          9200/tcp"
diff --git a/deploy/adapters/ansible/roles/ha/templates/failover.j2 b/deploy/adapters/ansible/roles/ha/templates/failover.j2
deleted file mode 100644 (file)
index 3b08cf2..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-import ConfigParser, os, socket
-import logging as LOG
-import pxssh
-import sys
-import re
-
-LOG_FILE="/var/log/mysql_failover"
-try:
-    os.remove(LOG_FILE)
-except:
-    pass
-
-LOG.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE,level=LOG.DEBUG)
-ha_vip = {{ internal_vip.ip }}
-LOG.info("ha_vip: %s" % ha_vip)
-
-#ha_vip = "10.1.0.50"
-galera_path = '/etc/mysql/conf.d/wsrep.cnf'
-pattern = re.compile(r"gcomm://(?P<prev_ip>.*)")
-
-def ssh_get_hostname(ip):
-    try:
-        s = pxssh.pxssh()
-        s.login("%s" % ip, "root", "root")
-        s.sendline('hostname')   # run a command
-        s.prompt()             # match the prompt
-        result = s.before.strip()      # print everything before the prompt.
-        return result.split(os.linesep)[1]
-    except pxssh.ExceptionPxssh as e:
-        LOG.error("pxssh failed on login.")
-        raise
-
-def failover(mode):
-    config = ConfigParser.ConfigParser()
-    config.optionxform = str
-    config.readfp(open(galera_path))
-    wsrep_cluster_address = config.get("mysqld", "wsrep_cluster_address")
-    wsrep_cluster_address = pattern.match(wsrep_cluster_address).groupdict()["prev_ip"]
-
-    LOG.info("old wsrep_cluster_address = %s" % wsrep_cluster_address)
-
-    if mode == "master":
-        # refresh wsrep_cluster_address to null
-        LOG.info("I'm being master, set wsrep_cluster_address to null")
-        wsrep_cluster_address = ""
-
-    elif mode == "backup":
-        # refresh wsrep_cluster_address to master int ip
-        hostname = ssh_get_hostname(ha_vip)
-        wsrep_cluster_address = socket.gethostbyname(hostname)
-        LOG.info("I'm being slave, set wsrep_cluster_address to master internal ip")
-
-    LOG.info("new wsrep_cluster_address = %s" % wsrep_cluster_address)
-    wsrep_cluster_address  = "gcomm://%s" % wsrep_cluster_address
-    config.set("mysqld", "wsrep_cluster_address", wsrep_cluster_address)
-    with open(galera_path, 'wb') as fp:
-        #config.write(sys.stdout)
-        config.write(fp)
-
-    os.system("service mysql restart")
-    LOG.info("failover success!!!")
-
-if __name__ == "__main__":
-    LOG.debug("call me: %s" % sys.argv)
-    failover(sys.argv[1])
index f1a2312..8f026fa 100644 (file)
@@ -36,6 +36,7 @@ listen  proxy-glance_registry_cluster
 
 listen  proxy-glance_api_cluster
     bind {{ internal_vip.ip }}:9292
+    bind {{ public_vip.ip }}:9292
     option tcpka
     option httpchk
     option tcplog
@@ -94,6 +95,7 @@ listen  proxy-keystone_public_internal_cluster
 
 listen  proxy-nova_compute_api_cluster
     bind {{ internal_vip.ip }}:8774
+    bind {{ public_vip.ip }}:8774
     mode tcp
     option httpchk
     option tcplog
@@ -104,6 +106,7 @@ listen  proxy-nova_compute_api_cluster
 
 listen  proxy-nova_metadata_api_cluster
     bind {{ internal_vip.ip }}:8775
+    bind {{ public_vip.ip }}:8775
     option tcpka
     option tcplog
     balance source
@@ -113,6 +116,7 @@ listen  proxy-nova_metadata_api_cluster
 
 listen  proxy-cinder_api_cluster
     bind {{ internal_vip.ip }}:8776
+    bind {{ public_vip.ip }}:8776
     mode tcp
     option httpchk
     option tcplog
@@ -121,6 +125,16 @@ listen  proxy-cinder_api_cluster
     server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
 {% endfor %}
 
+listen  proxy-dashboarad
+    bind {{ public_vip.ip }}:80
+    option tcpka
+    option httpchk
+    option tcplog
+    balance source
+{% for host,ip in haproxy_hosts.items() %}
+    server {{ host }} {{ ip }}:80 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
 listen stats
     mode http
     bind 0.0.0.0:8888
index f1e6db5..a2e008a 100644 (file)
@@ -19,30 +19,24 @@ vrrp_instance internal_vip {
     virtual_ipaddress {
         {{ internal_vip.ip }}/{{ internal_vip.netmask }}  dev {{ internal_vip.interface }}
     }
+}
 
-    notify_master "/usr/local/bin/notify.sh master"
-    notify_backup "/usr/local/bin/notify.sh backup"
+vrrp_instance public_vip {
+    interface {{ network_cfg.public_vip.interface }}
+    virtual_router_id {{ vrouter_id_public }}
+    state BACKUP
+    nopreempt
+    preempt_delay 30
+    advert_int 1
+    priority 100
 
-}
+    authentication {
+        auth_type PASS
+        auth_pass 4321
+    }
 
-#vrrp_instance public_vip {
-#    interface {{ network_cfg.public_vip.interface }}
-#    virtual_router_id {{ vrouter_id_public }}
-#    state BACKUP
-#    nopreempt
-#    preempt_delay 30
-#    advert_int 1
-#    priority 100
-#
-#    authentication {
-#        auth_type PASS
-#        auth_pass 4321
-#    }
-#
-#    virtual_ipaddress {
-#        {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }}  dev {{ network_cfg.public_vip.interface }}
-#    }
-#
-#}
-#
-#    notify_backup "/usr/local/bin/notify.sh backup"
+    virtual_ipaddress {
+        {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }}  dev {{ network_cfg.public_vip.interface }}
+    }
+
+}
index d9cc65a..c7e2232 100644 (file)
@@ -22,7 +22,7 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}
 
 # regist keystone
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ internal_vip.ip }}:5000/v2.0 --internalurl=http://{{ internal_vip.ip }}:5000/v2.0 --adminurl=http://{{ internal_vip.ip }}:35357/v2.0
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ public_vip.ip }}:5000/v2.0 --internalurl=http://{{ internal_vip.ip }}:5000/v2.0 --adminurl=http://{{ internal_vip.ip }}:35357/v2.0
 
 # Create a glance user that the Image Service can use to authenticate with the Identity service
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com
@@ -30,7 +30,7 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}
 
 #Register the Image Service with the Identity service so that other OpenStack services can locate it
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ internal_vip.ip }}:9292 --internalurl=http://{{ internal_vip.ip }}:9292 --adminurl=http://{{ internal_vip.ip }}:9292
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ public_vip.ip }}:9292 --internalurl=http://{{ internal_vip.ip }}:9292 --adminurl=http://{{ internal_vip.ip }}:9292
 
 #Create a nova user that Compute uses to authenticate with the Identity Service
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com
@@ -38,10 +38,10 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}
 
 # register Compute with the Identity Service so that other OpenStack services can locate it
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ public_vip.ip }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s
 
 # register netron user, role and service
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin
 keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ internal_vip.ip }}:9696 --adminurl http://{{ internal_vip.ip }}:9696 --internalurl http://{{ internal_vip.ip }}:9696
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ public_vip.ip }}:9696 --adminurl http://{{ internal_vip.ip }}:9696 --internalurl http://{{ internal_vip.ip }}:9696
index 991e33c..26758f5 100644 (file)
@@ -9,10 +9,12 @@
   notify:
     - restart neutron control services
 
-- name: restart neutron-server
-  service: name=neutron-server  state=restarted enabled=yes
-  delegate_to: "{{ item }}"
-  run_once: True
-  with_items: groups['controller']
+- name: restart first neutron-server
+  service: name=neutron-server state=restarted enabled=yes
+  when: inventory_hostname == groups['controller'][0]
+
+- name: restart other neutron-server
+  service: name=neutron-server state=restarted enabled=yes
+  when: inventory_hostname != groups['controller'][0]
 
 - meta: flush_handlers
diff --git a/deploy/conf/baremetal_cluster_sh.yml b/deploy/conf/baremetal_cluster_sh.yml
new file mode 100644 (file)
index 0000000..1078cb5
--- /dev/null
@@ -0,0 +1,43 @@
+
+TYPE: baremetal
+FLAVOR: cluster
+POWER_TOOL: ipmitool
+
+ipmiUser: root
+ipmiPass: Huawei@123
+
+hosts:
+  - name: host1
+    mac: 'D8:49:0B:DA:2A:28'
+    ipmiUser: root
+    ipmiPass: Huawei@123
+    ipmiIp: 192.168.2.145
+    roles:
+      - controller
+      - ha
+
+  - name: host2
+    mac: 'D8:49:0B:DA:5B:5D'
+    ipmiIp: 192.168.2.155
+    roles:
+      - controller
+      - ha
+
+  - name: host3
+    mac: 'D8:49:0B:DA:5A:B7'
+    ipmiIp: 192.168.2.165
+    roles:
+      - controller
+      - ha
+
+  - name: host4
+    mac: 'D8:49:0B:DA:58:99'
+    ipmiIp: 192.168.2.175
+    roles:
+      - compute
+
+  - name: host5
+    mac: 'D8:49:0B:DA:56:85'
+    ipmiIp: 192.168.2.185
+    roles:
+      - compute
diff --git a/deploy/host_vm.sh b/deploy/host_vm.sh
deleted file mode 100644 (file)
index 0754b1f..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-host_vm_dir=$WORK_DIR/vm
-function tear_down_machines() {
-    for i in $HOSTNAMES; do
-        sudo virsh destroy $i
-        sudo virsh undefine $i
-        rm -rf $host_vm_dir/$i
-    done
-}
-
-function reboot_hosts() {
-    log_warn "reboot_hosts do nothing"
-}
-
-function launch_host_vms() {
-    old_ifs=$IFS
-    IFS=,
-    tear_down_machines
-    #function_bod
-    mac_array=($machines)
-    log_info "bringing up pxe boot vms"
-    i=0
-    for host in $HOSTNAMES; do
-        log_info "creating vm disk for instance $host"
-        vm_dir=$host_vm_dir/$host
-        mkdir -p $vm_dir
-        sudo qemu-img create -f raw $vm_dir/disk.img ${VIRT_DISK}
-        # create vm xml
-        sed -e "s/REPLACE_MEM/$VIRT_MEM/g" \
-          -e "s/REPLACE_CPU/$VIRT_CPUS/g" \
-          -e "s/REPLACE_NAME/$host/g" \
-          -e "s#REPLACE_IMAGE#$vm_dir/disk.img#g" \
-          -e "s/REPLACE_BOOT_MAC/${mac_array[i]}/g" \
-          -e "s/REPLACE_BRIDGE_MGMT/br_install/g" \
-          -e "s/REPLACE_BRIDGE_TENANT/br_install/g" \
-          -e "s/REPLACE_BRIDGE_PUBLIC/br_install/g" \
-          -e "s/REPLACE_BRIDGE_STORAGE/br_install/g" \
-          $COMPASS_DIR/deploy/template/vm/host.xml\
-          > $vm_dir/libvirt.xml
-
-        sudo virsh define $vm_dir/libvirt.xml
-        sudo virsh start $host
-        let i=i+1
-    done
-    IFS=$old_ifs
-}
-
-function get_host_macs() {
-    local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
-    local mac_generator=${COMPASS_DIR}/deploy/mac_generator.sh
-    local machines=
-
-    chmod +x $mac_generator
-    mac_array=`$mac_generator $VIRT_NUMBER`
-    machines=`echo $mac_array|sed 's/ /,/g'`
-
-    echo "test: true" >> $config_file
-    echo "pxe_boot_macs: [${machines}]" >> $config_file
-
-    echo $machines
-}
-