xci: Add ansible files for sandbox ha flavor 21/32221/6
authorFatih Degirmenci <fatih.degirmenci@ericsson.com>
Wed, 29 Mar 2017 13:43:00 +0000 (15:43 +0200)
committerFatih Degirmenci <fatih.degirmenci@ericsson.com>
Fri, 31 Mar 2017 08:06:55 +0000 (08:06 +0000)
This patch adds noha target host configuration playbook, inventory, variable
files and so on to use until we have time to fix things more properly and some
other functionality becomes available in upstream. This approach will result in
duplicates but it is something we can perhaps live with.

The playbook to configure target hosts will be same with the one we will
have for noha flavor but for different for the flavor mini.

The user variables and openstack user configuration will be different between
flavors and keeping deployment specific configuration.

Change-Id: I4e2b375b9f8f6bd5f8c5da91a522b78d61a58125
Signed-off-by: Fatih Degirmenci <fatih.degirmenci@ericsson.com>
prototypes/xci/file/ha/configure-targethosts.yml [new file with mode: 0644]
prototypes/xci/file/ha/flavor-vars.yml
prototypes/xci/file/ha/inventory
prototypes/xci/file/ha/openstack_user_config.yml
prototypes/xci/file/ha/user_variables.yml [new file with mode: 0644]

diff --git a/prototypes/xci/file/ha/configure-targethosts.yml b/prototypes/xci/file/ha/configure-targethosts.yml
new file mode 100644 (file)
index 0000000..6dc147f
--- /dev/null
@@ -0,0 +1,36 @@
+---
+- hosts: all
+  remote_user: root
+  tasks:
+    - name: add public key to host
+      copy:
+        src: ../file/authorized_keys
+        dest: /root/.ssh/authorized_keys
+    - name: configure modules
+      copy:
+        src: ../file/modules
+        dest: /etc/modules
+
+- hosts: controller
+  remote_user: root
+  vars_files:
+    - ../var/{{ ansible_os_family }}.yml
+    - ../var/flavor-vars.yml
+  roles:
+    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+
+- hosts: compute
+  remote_user: root
+  vars_files:
+    - ../var/{{ ansible_os_family }}.yml
+    - ../var/flavor-vars.yml
+  roles:
+    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+
+- hosts: compute01
+  remote_user: root
+  # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
+  roles:
+    - role: configure-nfs
index e69de29..3cd1d62 100644 (file)
@@ -0,0 +1,37 @@
+---
+host_info: {
+    'opnfv': {
+        'MGMT_IP': '172.29.236.10',
+        'VLAN_IP': '192.168.122.2',
+        'STORAGE_IP': '172.29.244.10'
+    },
+    'controller00': {
+        'MGMT_IP': '172.29.236.11',
+        'VLAN_IP': '192.168.122.3',
+        'STORAGE_IP': '172.29.244.11'
+    },
+    'controller01': {
+        'MGMT_IP': '172.29.236.12',
+        'VLAN_IP': '192.168.122.4',
+        'STORAGE_IP': '172.29.244.12'
+    },
+    'controller02': {
+        'MGMT_IP': '172.29.236.13',
+        'VLAN_IP': '192.168.122.5',
+        'STORAGE_IP': '172.29.244.13'
+    },
+    'compute00': {
+        'MGMT_IP': '172.29.236.14',
+        'VLAN_IP': '192.168.122.6',
+        'STORAGE_IP': '172.29.244.14',
+        'VLAN_IP_SECOND': '173.29.241.1',
+        'VXLAN_IP': '172.29.240.14'
+    },
+    'compute01': {
+        'MGMT_IP': '172.29.236.15',
+        'VLAN_IP': '192.168.122.7',
+        'STORAGE_IP': '172.29.244.15',
+        'VLAN_IP_SECOND': '173.29.241.2',
+        'VXLAN_IP': '172.29.240.15'
+    }
+}
index e69de29..94b1d07 100644 (file)
@@ -0,0 +1,11 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+controller01 ansible_ssh_host=192.168.122.4
+controller02 ansible_ssh_host=192.168.122.5
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.6
+compute01 ansible_ssh_host=192.168.122.7
index e69de29..43e88c0 100644 (file)
@@ -0,0 +1,278 @@
+---
+cidr_networks:
+  container: 172.29.236.0/22
+  tunnel: 172.29.240.0/22
+  storage: 172.29.244.0/22
+
+used_ips:
+  - "172.29.236.1,172.29.236.50"
+  - "172.29.240.1,172.29.240.50"
+  - "172.29.244.1,172.29.244.50"
+  - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+  internal_lb_vip_address: 172.29.236.222
+  external_lb_vip_address: 192.168.122.220
+  tunnel_bridge: "br-vxlan"
+  management_bridge: "br-mgmt"
+  provider_networks:
+    - network:
+        container_bridge: "br-mgmt"
+        container_type: "veth"
+        container_interface: "eth1"
+        ip_from_q: "container"
+        type: "raw"
+        group_binds:
+          - all_containers
+          - hosts
+        is_container_address: true
+        is_ssh_address: true
+    - network:
+        container_bridge: "br-vxlan"
+        container_type: "veth"
+        container_interface: "eth10"
+        ip_from_q: "tunnel"
+        type: "vxlan"
+        range: "1:1000"
+        net_name: "vxlan"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth12"
+        host_bind_override: "eth12"
+        type: "flat"
+        net_name: "flat"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth11"
+        type: "vlan"
+        range: "1:1"
+        net_name: "vlan"
+        group_binds:
+          - neutron_linuxbridge_agent
+    - network:
+        container_bridge: "br-storage"
+        container_type: "veth"
+        container_interface: "eth2"
+        ip_from_q: "storage"
+        type: "raw"
+        group_binds:
+          - glance_api
+          - cinder_api
+          - cinder_volume
+          - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# rsyslog server
+# log_hosts:
+# log1:
+#  ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      limit_container_types: glance
+      glance_nfs_client:
+        - server: "172.29.244.15"
+          remote_path: "/images"
+          local_path: "/var/lib/glance/images"
+          type: "nfs"
+          options: "_netdev,auto"
+  controller01:
+    ip: 172.29.236.12
+    container_vars:
+      limit_container_types: glance
+      glance_nfs_client:
+        - server: "172.29.244.15"
+          remote_path: "/images"
+          local_path: "/var/lib/glance/images"
+          type: "nfs"
+          options: "_netdev,auto"
+  controller02:
+    ip: 172.29.236.13
+    container_vars:
+      limit_container_types: glance
+      glance_nfs_client:
+        - server: "172.29.244.15"
+          remote_path: "/images"
+          local_path: "/var/lib/glance/images"
+          type: "nfs"
+          options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# ceilometer (telemetry API)
+metering-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# aodh (telemetry alarm service)
+metering-alarm_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# gnocchi (telemetry metrics storage)
+metrics_hosts:
+  controller00:
+    ip: 172.29.236.11
+  controller01:
+    ip: 172.29.236.12
+  controller02:
+    ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+  compute00:
+    ip: 172.29.236.14
+  compute01:
+    ip: 172.29.236.15
+
+# ceilometer compute agent (telemetry)
+metering-compute_hosts:
+  compute00:
+    ip: 172.29.236.14
+  compute01:
+    ip: 172.29.236.15
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      cinder_backends:
+        limit_container_types: cinder_volume
+        lvm:
+          volume_group: cinder-volumes
+          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+          volume_backend_name: LVM_iSCSI
+          iscsi_ip_address: "172.29.244.11"
+  controller01:
+    ip: 172.29.236.12
+    container_vars:
+      cinder_backends:
+        limit_container_types: cinder_volume
+        lvm:
+          volume_group: cinder-volumes
+          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+          volume_backend_name: LVM_iSCSI
+          iscsi_ip_address: "172.29.244.12"
+  controller02:
+    ip: 172.29.236.13
+    container_vars:
+      cinder_backends:
+        limit_container_types: cinder_volume
+        lvm:
+          volume_group: cinder-volumes
+          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+          volume_backend_name: LVM_iSCSI
+          iscsi_ip_address: "172.29.244.13"
diff --git a/prototypes/xci/file/ha/user_variables.yml b/prototypes/xci/file/ha/user_variables.yml
new file mode 100644 (file)
index 0000000..65cbcc1
--- /dev/null
@@ -0,0 +1,27 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt