4 from cStringIO import StringIO
6 from configparser import ConfigParser
9 from teuthology.orchestra import run
10 from teuthology import misc
11 from teuthology.contextutil import nested
13 log = logging.getLogger(__name__)
15 DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git'
16 DS_STABLE_BRANCHES = ("havana", "grizzly")
18 is_devstack_node = lambda role: role.startswith('devstack')
19 is_osd_node = lambda role: role.startswith('osd')
22 @contextlib.contextmanager
23 def task(ctx, config):
26 if not isinstance(config, dict):
27 raise TypeError("config must be a dict")
28 with nested(lambda: install(ctx=ctx, config=config),
29 lambda: smoke(ctx=ctx, config=config),
34 @contextlib.contextmanager
35 def install(ctx, config):
37 Install OpenStack DevStack and configure it to use a Ceph cluster for
40 Requires one node with a role 'devstack'
42 Since devstack runs rampant on the system it's used on, typically you will
43 want to reprovision that machine after using devstack on it.
45 Also, the default 2GB of RAM that is given to vps nodes is insufficient. I
46 recommend 4GB. Downburst can be instructed to give 4GB to a vps node by
47 adding this to the yaml:
52 This was created using documentation found here:
53 https://github.com/openstack-dev/devstack/blob/master/README.md
54 http://docs.ceph.com/docs/master/rbd/rbd-openstack/
58 if not isinstance(config, dict):
59 raise TypeError("config must be a dict")
61 devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
62 an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
64 devstack_branch = config.get("branch", "master")
65 install_devstack(devstack_node, devstack_branch)
67 configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node)
73 def install_devstack(devstack_node, branch="master"):
74 log.info("Cloning DevStack repo...")
76 args = ['git', 'clone', DEVSTACK_GIT_REPO]
77 devstack_node.run(args=args)
79 if branch != "master":
80 if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"):
81 branch = "stable/" + branch
82 log.info("Checking out {branch} branch...".format(branch=branch))
83 cmd = "cd devstack && git checkout " + branch
84 devstack_node.run(args=cmd)
86 log.info("Installing DevStack...")
87 args = ['cd', 'devstack', run.Raw('&&'), './stack.sh']
88 devstack_node.run(args=args)
91 def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node):
92 pool_size = config.get('pool_size', '128')
93 create_pools(ceph_node, pool_size)
94 distribute_ceph_conf(devstack_node, ceph_node)
95 # This is where we would install python-ceph and ceph-common but it appears
96 # the ceph task does that for us.
97 generate_ceph_keys(ceph_node)
98 distribute_ceph_keys(devstack_node, ceph_node)
99 secret_uuid = set_libvirt_secret(devstack_node, ceph_node)
100 update_devstack_config_files(devstack_node, secret_uuid)
101 set_apache_servername(devstack_node)
102 # Rebooting is the most-often-used method of restarting devstack services
103 misc.reboot(devstack_node)
104 start_devstack(devstack_node)
105 restart_apache(devstack_node)
108 def create_pools(ceph_node, pool_size):
109 log.info("Creating pools on Ceph cluster...")
111 for pool_name in ['volumes', 'images', 'backups']:
112 args = ['sudo', 'ceph', 'osd', 'pool', 'create', pool_name, pool_size]
113 ceph_node.run(args=args)
116 def distribute_ceph_conf(devstack_node, ceph_node):
117 log.info("Copying ceph.conf to DevStack node...")
119 ceph_conf_path = '/etc/ceph/ceph.conf'
120 ceph_conf = misc.get_file(ceph_node, ceph_conf_path, sudo=True)
121 misc.sudo_write_file(devstack_node, ceph_conf_path, ceph_conf)
124 def generate_ceph_keys(ceph_node):
125 log.info("Generating Ceph keys...")
128 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder', 'mon',
129 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa
130 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.glance', 'mon',
131 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa
132 ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon',
133 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa
135 for cmd in ceph_auth_cmds:
136 ceph_node.run(args=cmd)
139 def distribute_ceph_keys(devstack_node, ceph_node):
140 log.info("Copying Ceph keys to DevStack node...")
142 def copy_key(from_remote, key_name, to_remote, dest_path, owner):
143 key_stringio = StringIO()
145 args=['sudo', 'ceph', 'auth', 'get-or-create', key_name],
148 misc.sudo_write_file(to_remote, dest_path,
149 key_stringio, owner=owner)
151 dict(name='client.glance',
152 path='/etc/ceph/ceph.client.glance.keyring',
153 # devstack appears to just want root:root
154 #owner='glance:glance',
156 dict(name='client.cinder',
157 path='/etc/ceph/ceph.client.cinder.keyring',
158 # devstack appears to just want root:root
159 #owner='cinder:cinder',
161 dict(name='client.cinder-backup',
162 path='/etc/ceph/ceph.client.cinder-backup.keyring',
163 # devstack appears to just want root:root
164 #owner='cinder:cinder',
167 for key_dict in keys:
168 copy_key(ceph_node, key_dict['name'], devstack_node,
169 key_dict['path'], key_dict.get('owner'))
172 def set_libvirt_secret(devstack_node, ceph_node):
173 log.info("Setting libvirt secret...")
175 cinder_key_stringio = StringIO()
176 ceph_node.run(args=['sudo', 'ceph', 'auth', 'get-key', 'client.cinder'],
177 stdout=cinder_key_stringio)
178 cinder_key = cinder_key_stringio.getvalue().strip()
180 uuid_stringio = StringIO()
181 devstack_node.run(args=['uuidgen'], stdout=uuid_stringio)
182 uuid = uuid_stringio.getvalue().strip()
184 secret_path = '/tmp/secret.xml'
185 secret_template = textwrap.dedent("""
186 <secret ephemeral='no' private='no'>
189 <name>client.cinder secret</name>
192 misc.sudo_write_file(devstack_node, secret_path,
193 secret_template.format(uuid=uuid))
194 devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file',
196 devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret',
197 uuid, '--base64', cinder_key])
201 def update_devstack_config_files(devstack_node, secret_uuid):
202 log.info("Updating DevStack config files to use Ceph...")
204 def backup_config(node, file_name, backup_ext='.orig.teuth'):
205 node.run(args=['cp', '-f', file_name, file_name + backup_ext])
207 def update_config(config_name, config_stream, update_dict,
209 parser = ConfigParser()
210 parser.read_file(config_stream)
211 for (key, value) in update_dict.items():
212 parser.set(section, key, value)
213 out_stream = StringIO()
214 parser.write(out_stream)
219 dict(name='/etc/glance/glance-api.conf', options=dict(
221 rbd_store_user='glance',
222 rbd_store_pool='images',
223 show_image_direct_url='True',)),
224 dict(name='/etc/cinder/cinder.conf', options=dict(
225 volume_driver='cinder.volume.drivers.rbd.RBDDriver',
227 rbd_ceph_conf='/etc/ceph/ceph.conf',
228 rbd_flatten_volume_from_snapshot='false',
229 rbd_max_clone_depth='5',
230 glance_api_version='2',
232 rbd_secret_uuid=secret_uuid,
233 backup_driver='cinder.backup.drivers.ceph',
234 backup_ceph_conf='/etc/ceph/ceph.conf',
235 backup_ceph_user='cinder-backup',
236 backup_ceph_chunk_size='134217728',
237 backup_ceph_pool='backups',
238 backup_ceph_stripe_unit='0',
239 backup_ceph_stripe_count='0',
240 restore_discard_excess_bytes='true',
242 dict(name='/etc/nova/nova.conf', options=dict(
243 libvirt_images_type='rbd',
244 libvirt_images_rbd_pool='volumes',
245 libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf',
247 rbd_secret_uuid=secret_uuid,
248 libvirt_inject_password='false',
249 libvirt_inject_key='false',
250 libvirt_inject_partition='-2',
254 for update in updates:
255 file_name = update['name']
256 options = update['options']
257 config_str = misc.get_file(devstack_node, file_name, sudo=True)
258 config_stream = StringIO(config_str)
259 backup_config(devstack_node, file_name)
260 new_config_stream = update_config(file_name, config_stream, options)
261 misc.sudo_write_file(devstack_node, file_name, new_config_stream)
264 def set_apache_servername(node):
265 # Apache complains: "Could not reliably determine the server's fully
266 # qualified domain name, using 127.0.0.1 for ServerName"
267 # So, let's make sure it knows its name.
268 log.info("Setting Apache ServerName...")
270 hostname = node.hostname
271 config_file = '/etc/apache2/conf.d/servername'
272 misc.sudo_write_file(node, config_file,
273 "ServerName {name}".format(name=hostname))
276 def start_devstack(devstack_node):
277 log.info("Patching devstack start script...")
278 # This causes screen to start headless - otherwise rejoin-stack.sh fails
279 # because there is no terminal attached.
280 cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh"
281 devstack_node.run(args=cmd)
283 log.info("Starting devstack...")
284 cmd = "cd devstack && ./rejoin-stack.sh"
285 devstack_node.run(args=cmd)
287 # This was added because I was getting timeouts on Cinder requests - which
288 # were trying to access Keystone on port 5000. A more robust way to handle
289 # this would be to introduce a wait-loop on devstack_node that checks to
290 # see if a service is listening on port 5000.
291 log.info("Waiting 30s for devstack to start...")
295 def restart_apache(node):
296 node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True)
299 @contextlib.contextmanager
300 def exercise(ctx, config):
301 log.info("Running devstack exercises...")
305 if not isinstance(config, dict):
306 raise TypeError("config must be a dict")
308 devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
310 # TODO: save the log *and* preserve failures
311 #devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
314 #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa
315 # dir=devstack_archive_dir)
316 cmd = "cd devstack && ./exercise.sh"
317 devstack_node.run(args=cmd, wait=True)
323 def create_devstack_archive(ctx, devstack_node):
324 test_dir = misc.get_testdir(ctx)
325 devstack_archive_dir = "{test_dir}/archive/devstack".format(
327 devstack_node.run(args="mkdir -p " + devstack_archive_dir)
328 return devstack_archive_dir
331 @contextlib.contextmanager
332 def smoke(ctx, config):
333 log.info("Running a basic smoketest...")
335 devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
336 an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
339 create_volume(devstack_node, an_osd_node, 'smoke0', 1)
345 def create_volume(devstack_node, ceph_node, vol_name, size):
347 :param size: The size of the volume, in GB
350 log.info("Creating a {size}GB volume named {name}...".format(
353 args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
354 '--display-name', vol_name, size]
355 out_stream = StringIO()
356 devstack_node.run(args=args, stdout=out_stream, wait=True)
357 vol_info = parse_os_table(out_stream.getvalue())
358 log.debug("Volume info: %s", str(vol_info))
360 out_stream = StringIO()
362 ceph_node.run(args="rbd --id cinder ls -l volumes", stdout=out_stream,
364 except run.CommandFailedError:
365 log.debug("Original rbd call failed; retrying without '--id cinder'")
366 ceph_node.run(args="rbd ls -l volumes", stdout=out_stream,
369 assert vol_info['id'] in out_stream.getvalue(), \
370 "Volume not found on Ceph cluster"
371 assert vol_info['size'] == size, \
372 "Volume size on Ceph cluster is different than specified"
373 return vol_info['id']
376 def parse_os_table(table_str):
378 for line in table_str.split('\n'):
379 if line.startswith('|'):
381 out_dict[items[1]] = items[3]