-salt 'ctl01*' cmd.run ". /root/keystonercv3; openstack compute service list; openstack network agent list; openstack stack list; openstack volume list"
+salt 'ctl01*' cmd.run ". /root/keystonercv3; openstack compute service list; openstack network agent list; openstack stack list; openstack volume service list"
salt 'ctl01*' cmd.run ". /root/keystonercv3; openstack network create --share --external --provider-network-type flat --provider-physical-network physnet1 floating_net"
salt 'ctl01*' cmd.run ". /root/keystonercv3; openstack subnet create --gateway 10.16.0.1 --no-dhcp --allocation-pool start=10.16.0.130,end=10.16.0.254 --network floating_net --subnet-range 10.16.0.0/24 floating_subnet"
-salt -C 'I@rabbitmq:server' state.sls rabbitmq
-salt -C 'I@rabbitmq:server' cmd.run "rabbitmqctl status"
+salt -I 'rabbitmq:server' state.sls rabbitmq
+salt -I 'rabbitmq:server' cmd.run "rabbitmqctl status"
-salt -C 'I@mysql:server' state.sls mysql
-salt -C 'I@memcached:server' state.sls memcached
+salt -I 'mysql:server' state.sls mysql
+salt -I 'memcached:server' state.sls memcached
-salt -C 'I@keystone:server' state.sls keystone.server
-salt -C 'I@keystone:server' service.restart apache2
-salt -C 'I@keystone:server' state.sls keystone.client
-salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; openstack service list"
+salt -I 'keystone:server' state.sls keystone.server
+salt -I 'keystone:server' service.restart apache2
+salt -I 'keystone:server' state.sls keystone.client
+salt -I 'keystone:server' cmd.run ". /root/keystonercv3; openstack service list"
-salt -C 'I@glance:server' state.sls glance
-salt -C 'I@nova:controller' state.sls nova
-salt -C 'I@heat:server' state.sls heat
-salt -C 'I@cinder:controller' state.sls cinder
+salt -I 'glance:server' state.sls glance
+salt -I 'nova:controller' state.sls nova
+salt -I 'heat:server' state.sls heat
-salt -C 'I@neutron:server' state.sls neutron
-salt -C 'I@neutron:gateway' state.sls neutron
+salt -I 'cinder:controller' state.sls cinder
+salt -I 'cinder:volume' state.sls cinder
-salt -C 'I@nova:compute' state.sls nova
+salt -I 'neutron:server' state.sls neutron
+salt -I 'neutron:gateway' state.sls neutron
+
+salt -I 'nova:compute' state.sls nova
- system.linux.system.repo.mcp.openstack
- system.linux.system.repo.mcp.extra
- system.linux.system.repo.saltstack.xenial
+- system.linux.storage.loopback
- system.nova.compute.single
- service.neutron.compute.single
+- service.cinder.volume.single
+- system.cinder.volume.backend.lvm
- cluster.virtual-mcp-ocata-odl
parameters:
_param:
external_interface: ens6
interface_mtu: 9000
linux_system_codename: xenial
+ loopback_device_size: 10
nova:
compute:
vncproxy_url: http://${_param:cluster_vip_address}:6080
- system.nova.control.single
- system.neutron.control.opendaylight.single
- system.cinder.control.single
+- system.cinder.control.backend.lvm
- system.heat.server.single
- service.mysql.server.single
- system.galera.server.database.cinder
galera_server_maintenance_password: opnfv_secret
galera_server_admin_password: opnfv_secret
cluster_vip_address: ${_param:cluster_public_host}
- cluster_local_address: ${_param:single_address}
+ cluster_local_address: ${_param:openstack_control_address}
cluster_node01_hostname: ctl01
cluster_node01_address: 172.16.10.101
cluster_node02_hostname: ctl02
- system.linux.system.repo.mcp.openstack
- system.linux.system.repo.mcp.extra
- system.linux.system.repo.saltstack.xenial
+- system.linux.storage.loopback
- system.nova.compute.single
- service.neutron.compute.single
+- service.cinder.volume.single
+- system.cinder.volume.backend.lvm
- cluster.virtual-mcp-ocata-ovs
parameters:
_param:
external_interface: ens6
interface_mtu: 9000
linux_system_codename: xenial
+ loopback_device_size: 10
nova:
compute:
vncproxy_url: http://${_param:cluster_vip_address}:6080
- system.nova.control.single
- system.neutron.control.openvswitch.single
- system.cinder.control.single
+- system.cinder.control.backend.lvm
- system.heat.server.single
- service.mysql.server.single
- system.galera.server.database.cinder
galera_server_maintenance_password: opnfv_secret
galera_server_admin_password: opnfv_secret
cluster_vip_address: ${_param:cluster_public_host}
- cluster_local_address: ${_param:single_address}
+ cluster_local_address: ${_param:openstack_control_address}
cluster_node01_hostname: ctl01
cluster_node01_address: 172.16.10.101
cluster_node02_hostname: ctl02
names:
- ctl01
- ctl01.${_param:cluster_domain}
- ctl02:
- address: ${_param:openstack_control_node02_address}
- names:
- - ctl02
- - ctl02.${_param:cluster_domain}
- ctl03:
- address: ${_param:openstack_control_node03_address}
- names:
- - ctl03
- - ctl03.${_param:cluster_domain}
gtw01:
address: ${_param:openstack_gateway_address}
names:
-Subproject commit 8a7dc2f6ae598cc95300fd404dfb8dd75185ecb4
+Subproject commit 947500aa320e5d6ba26fa5fe18f4bd260015fd9d
# wait until ssh on Salt master is available
while (($attempt <= $total_attempts)); do
- ssh -i ${SSH_OPTS} ubuntu@${SALT_MASTER} uptime
+ ssh ${SSH_OPTS} ubuntu@${SALT_MASTER} uptime
case $? in
0) echo "${attempt}> Success"; break ;;
*) echo "${attempt}/${total_attempts}> ssh server ain't ready yet, waiting for ${sleep_time} seconds ..." ;;