Fix some bugs when testing opensds ansible
[stor4nfv.git] / src / ceph / qa / tasks / rgw.py
1 """
2 rgw routines
3 """
4 import argparse
5 import contextlib
6 import json
7 import logging
8 import os
9 import errno
10 import util.rgw as rgw_utils
11
12 from teuthology.orchestra import run
13 from teuthology import misc as teuthology
14 from teuthology import contextutil
15 from teuthology.orchestra.run import CommandFailedError
16 from util.rgw import rgwadmin, wait_for_radosgw
17 from util.rados import (rados, create_ec_pool,
18                                         create_replicated_pool,
19                                         create_cache_pool)
20
21 log = logging.getLogger(__name__)
22
23 @contextlib.contextmanager
24 def start_rgw(ctx, config, clients):
25     """
26     Start rgw on remote sites.
27     """
28     log.info('Starting rgw...')
29     testdir = teuthology.get_testdir(ctx)
30     for client in clients:
31         (remote,) = ctx.cluster.only(client).remotes.iterkeys()
32         cluster_name, daemon_type, client_id = teuthology.split_role(client)
33         client_with_id = daemon_type + '.' + client_id
34         client_with_cluster = cluster_name + '.' + client_with_id
35
36         client_config = config.get(client)
37         if client_config is None:
38             client_config = {}
39         log.info("rgw %s config is %s", client, client_config)
40         cmd_prefix = [
41             'sudo',
42             'adjust-ulimits',
43             'ceph-coverage',
44             '{tdir}/archive/coverage'.format(tdir=testdir),
45             'daemon-helper',
46             'term',
47             ]
48
49         rgw_cmd = ['radosgw']
50
51         log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
52
53         host, port = ctx.rgw.role_endpoints[client]
54         rgw_cmd.extend([
55             '--rgw-frontends',
56             '{frontend} port={port}'.format(frontend=ctx.rgw.frontend, port=port),
57             '-n', client_with_id,
58             '--cluster', cluster_name,
59             '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster),
60             '--log-file',
61             '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),
62             '--rgw_ops_log_socket_path',
63             '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
64                                                      client_with_cluster=client_with_cluster),
65             '--foreground',
66             run.Raw('|'),
67             'sudo',
68             'tee',
69             '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir,
70                                                        client_with_cluster=client_with_cluster),
71             run.Raw('2>&1'),
72             ])
73
74         if client_config.get('valgrind'):
75             cmd_prefix = teuthology.get_valgrind_args(
76                 testdir,
77                 client_with_cluster,
78                 cmd_prefix,
79                 client_config.get('valgrind')
80                 )
81
82         run_cmd = list(cmd_prefix)
83         run_cmd.extend(rgw_cmd)
84
85         ctx.daemons.add_daemon(
86             remote, 'rgw', client_with_id,
87             cluster=cluster_name,
88             args=run_cmd,
89             logger=log.getChild(client),
90             stdin=run.PIPE,
91             wait=False,
92             )
93
94     # XXX: add_daemon() doesn't let us wait until radosgw finishes startup
95     for client in config.keys():
96         host, port = ctx.rgw.role_endpoints[client]
97         endpoint = 'http://{host}:{port}/'.format(host=host, port=port)
98         log.info('Polling {client} until it starts accepting connections on {endpoint}'.format(client=client, endpoint=endpoint))
99         wait_for_radosgw(endpoint)
100
101     try:
102         yield
103     finally:
104         for client in config.iterkeys():
105             cluster_name, daemon_type, client_id = teuthology.split_role(client)
106             client_with_id = daemon_type + '.' + client_id
107             client_with_cluster = cluster_name + '.' + client_with_id
108             ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).stop()
109             ctx.cluster.only(client).run(
110                 args=[
111                     'rm',
112                     '-f',
113                     '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
114                                                              client=client_with_cluster),
115                     ],
116                 )
117
118 def assign_ports(ctx, config):
119     """
120     Assign port numberst starting with port 7280.
121     """
122     port = 7280
123     role_endpoints = {}
124     for remote, roles_for_host in ctx.cluster.remotes.iteritems():
125         for role in roles_for_host:
126             if role in config:
127                 role_endpoints[role] = (remote.name.split('@')[1], port)
128                 port += 1
129
130     return role_endpoints
131
132 @contextlib.contextmanager
133 def create_pools(ctx, clients):
134     """Create replicated or erasure coded data pools for rgw."""
135
136     log.info('Creating data pools')
137     for client in clients:
138         log.debug("Obtaining remote for client {}".format(client))
139         (remote,) = ctx.cluster.only(client).remotes.iterkeys()
140         data_pool = '.rgw.buckets'
141         cluster_name, daemon_type, client_id = teuthology.split_role(client)
142
143         if ctx.rgw.ec_data_pool:
144             create_ec_pool(remote, data_pool, client, 64,
145                            ctx.rgw.erasure_code_profile, cluster_name, 'rgw')
146         else:
147             create_replicated_pool(remote, data_pool, 64, cluster_name, 'rgw')
148         if ctx.rgw.cache_pools:
149             create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
150                               64*1024*1024, cluster_name)
151     log.debug('Pools created')
152     yield
153
154 @contextlib.contextmanager
155 def configure_compression(ctx, clients, compression):
156     """ set a compression type in the default zone placement """
157     log.info('Configuring compression type = %s', compression)
158     for client in clients:
159         # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
160         # issue a 'radosgw-admin user list' command to trigger this
161         rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
162
163         rgwadmin(ctx, client,
164                 cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default',
165                      '--placement-id', 'default-placement',
166                      '--compression', compression],
167                 check_status=True)
168     yield
169
170 @contextlib.contextmanager
171 def task(ctx, config):
172     """
173     For example, to run rgw on all clients::
174
175         tasks:
176         - ceph:
177         - rgw:
178
179     To only run on certain clients::
180
181         tasks:
182         - ceph:
183         - rgw: [client.0, client.3]
184
185     or
186
187         tasks:
188         - ceph:
189         - rgw:
190             client.0:
191             client.3:
192
193     To run radosgw through valgrind:
194
195         tasks:
196         - ceph:
197         - rgw:
198             client.0:
199               valgrind: [--tool=memcheck]
200             client.3:
201               valgrind: [--tool=memcheck]
202     """
203     if config is None:
204         config = dict(('client.{id}'.format(id=id_), None)
205                       for id_ in teuthology.all_roles_of_type(
206                           ctx.cluster, 'client'))
207     elif isinstance(config, list):
208         config = dict((name, None) for name in config)
209
210     clients = config.keys() # http://tracker.ceph.com/issues/20417
211
212     overrides = ctx.config.get('overrides', {})
213     teuthology.deep_merge(config, overrides.get('rgw', {}))
214
215     role_endpoints = assign_ports(ctx, config)
216     ctx.rgw = argparse.Namespace()
217     ctx.rgw.role_endpoints = role_endpoints
218
219     ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False))
220     ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {})
221     ctx.rgw.cache_pools = bool(config.pop('cache-pools', False))
222     ctx.rgw.frontend = config.pop('frontend', 'civetweb')
223     ctx.rgw.compression_type = config.pop('compression type', None)
224     ctx.rgw.config = config
225
226     log.debug("config is {}".format(config))
227     log.debug("client list is {}".format(clients))
228     subtasks = [
229         lambda: create_pools(ctx=ctx, clients=clients),
230     ]
231     if ctx.rgw.compression_type:
232         subtasks.extend([
233             lambda: configure_compression(ctx=ctx, clients=clients,
234                                           compression=ctx.rgw.compression_type),
235         ])
236     subtasks.extend([
237         lambda: start_rgw(ctx=ctx, config=config, clients=clients),
238     ])
239
240     with contextutil.nested(*subtasks):
241         yield