Bring in the files from base scenario into os-nosdn-osm
[releng-xci-scenarios.git] / scenarios / os-nosdn-osm / role / os-nosdn-osm / files / mini / openstack_user_config.yml
diff --git a/scenarios/os-nosdn-osm/role/os-nosdn-osm/files/mini/openstack_user_config.yml b/scenarios/os-nosdn-osm/role/os-nosdn-osm/files/mini/openstack_user_config.yml
new file mode 100644 (file)
index 0000000..86b87c1
--- /dev/null
@@ -0,0 +1,170 @@
+---
+cidr_networks:
+  container: 172.29.236.0/22
+  tunnel: 172.29.240.0/22
+  storage: 172.29.244.0/22
+
+used_ips:
+  - "172.29.236.1,172.29.236.50"
+  - "172.29.240.1,172.29.240.50"
+  - "172.29.244.1,172.29.244.50"
+  - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+  internal_lb_vip_address: 172.29.236.11
+  external_lb_vip_address: 192.168.122.3
+  tunnel_bridge: "br-vxlan"
+  management_bridge: "br-mgmt"
+  provider_networks:
+    - network:
+        container_bridge: "br-mgmt"
+        container_type: "veth"
+        container_interface: "eth1"
+        ip_from_q: "container"
+        type: "raw"
+        group_binds:
+          - all_containers
+          - hosts
+        is_container_address: true
+        is_ssh_address: true
+    - network:
+        container_bridge: "br-vxlan"
+        container_type: "veth"
+        container_interface: "eth10"
+        ip_from_q: "tunnel"
+        type: "vxlan"
+        range: "1:1000"
+        net_name: "vxlan"
+        group_binds:
+          - neutron_openvswitch_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth12"
+        host_bind_override: "eth12"
+        type: "flat"
+        net_name: "flat"
+        group_binds:
+          - neutron_openvswitch_agent
+    - network:
+        container_bridge: "br-vlan"
+        container_type: "veth"
+        container_interface: "eth11"
+        type: "vlan"
+        range: "1:1"
+        net_name: "vlan"
+        group_binds:
+          - neutron_openvswitch_agent
+    - network:
+        container_bridge: "br-storage"
+        container_type: "veth"
+        container_interface: "eth2"
+        ip_from_q: "storage"
+        type: "raw"
+        group_binds:
+          - glance_api
+          - cinder_api
+          - cinder_volume
+          - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+#  ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      limit_container_types: glance
+      glance_nfs_client:
+        - server: "172.29.244.12"
+          remote_path: "/images"
+          local_path: "/var/lib/glance/images"
+          type: "nfs"
+          options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+  controller00:
+    ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+  compute00:
+    ip: 172.29.236.12
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+  controller00:
+    ip: 172.29.236.11
+    container_vars:
+      cinder_backends:
+        limit_container_types: cinder_volume
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.12"
+              share: "/volumes"