2 from unittest import case
6 from teuthology import misc
7 from tasks.ceph_test_case import CephTestCase
9 # TODO move definition of CephCluster away from the CephFS stuff
10 from tasks.cephfs.filesystem import CephCluster
13 log = logging.getLogger(__name__)
16 class MgrCluster(CephCluster):
17 def __init__(self, ctx):
18 super(MgrCluster, self).__init__(ctx)
19 self.mgr_ids = list(misc.all_roles_of_type(ctx.cluster, 'mgr'))
21 if len(self.mgr_ids) == 0:
23 "This task requires at least one manager daemon")
25 self.mgr_daemons = dict(
26 [(mgr_id, self._ctx.daemons.get_daemon('mgr', mgr_id)) for mgr_id
29 def mgr_stop(self, mgr_id):
30 self.mgr_daemons[mgr_id].stop()
32 def mgr_fail(self, mgr_id):
33 self.mon_manager.raw_cluster_cmd("mgr", "fail", mgr_id)
35 def mgr_restart(self, mgr_id):
36 self.mgr_daemons[mgr_id].restart()
38 def get_mgr_map(self):
40 self.mon_manager.raw_cluster_cmd("status", "--format=json-pretty"))
42 return status["mgrmap"]
44 def get_active_id(self):
45 return self.get_mgr_map()["active_name"]
47 def get_standby_ids(self):
48 return [s['name'] for s in self.get_mgr_map()["standbys"]]
50 def set_module_localized_conf(self, module, mgr_id, key, val):
51 self.mon_manager.raw_cluster_cmd("config-key", "set",
52 "mgr/{0}/{1}/{2}".format(
57 class MgrTestCase(CephTestCase):
61 super(MgrTestCase, self).setUp()
63 # The test runner should have populated this
64 assert self.mgr_cluster is not None
66 if len(self.mgr_cluster.mgr_ids) < self.MGRS_REQUIRED:
67 raise case.SkipTest("Only have {0} manager daemons, "
68 "{1} are required".format(
69 len(self.mgr_cluster.mgr_ids), self.MGRS_REQUIRED))
71 # Restart all the daemons
72 for daemon in self.mgr_cluster.mgr_daemons.values():
75 for mgr_id in self.mgr_cluster.mgr_ids:
76 self.mgr_cluster.mgr_fail(mgr_id)
78 for daemon in self.mgr_cluster.mgr_daemons.values():
81 # Wait for an active to come up
82 self.wait_until_true(lambda: self.mgr_cluster.get_active_id() != "",
85 expect_standbys = set(self.mgr_cluster.mgr_ids) \
86 - {self.mgr_cluster.get_active_id()}
88 lambda: set(self.mgr_cluster.get_standby_ids()) == expect_standbys,
91 def _load_module(self, module_name):
92 loaded = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd(
93 "mgr", "module", "ls"))['enabled_modules']
94 if module_name in loaded:
95 # The enable command is idempotent, but our wait for a restart
96 # isn't, so let's return now if it's already loaded
99 initial_gid = self.mgr_cluster.get_mgr_map()['active_gid']
100 self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable",
103 # Wait for the module to load
105 mgr_map = self.mgr_cluster.get_mgr_map()
106 done = mgr_map['active_gid'] != initial_gid and mgr_map['available']
108 log.info("Restarted after module load (new active {0}/{1})".format(
109 mgr_map['active_name'] , mgr_map['active_gid']))
111 self.wait_until_true(has_restarted, timeout=30)
114 def _get_uri(self, service_name):
115 # Little dict hack so that I can assign into this from
116 # the get_or_none function
117 mgr_map = {'x': None}
120 mgr_map['x'] = self.mgr_cluster.get_mgr_map()
121 result = mgr_map['x']['services'].get(service_name, None)
124 self.wait_until_true(lambda: _get_or_none() is not None, 30)
126 uri = mgr_map['x']['services'][service_name]
128 log.info("Found {0} at {1} (daemon {2}/{3})".format(
129 service_name, uri, mgr_map['x']['active_name'],
130 mgr_map['x']['active_gid']))
135 def _assign_ports(self, module_name, config_name, min_port=7789):
137 To avoid the need to run lots of hosts in teuthology tests to
138 get different URLs per mgr, we will hand out different ports
141 This is already taken care of for us when running in a vstart
144 # Start handing out ports well above Ceph's range.
145 assign_port = min_port
147 for mgr_id in self.mgr_cluster.mgr_ids:
148 self.mgr_cluster.mgr_stop(mgr_id)
149 self.mgr_cluster.mgr_fail(mgr_id)
151 for mgr_id in self.mgr_cluster.mgr_ids:
152 log.info("Using port {0} for {1} on mgr.{2}".format(
153 assign_port, module_name, mgr_id
155 self.mgr_cluster.set_module_localized_conf(module_name, mgr_id,
160 for mgr_id in self.mgr_cluster.mgr_ids:
161 self.mgr_cluster.mgr_restart(mgr_id)
164 mgr_map = self.mgr_cluster.get_mgr_map()
165 done = mgr_map['available']
167 log.info("Available after assign ports (new active {0}/{1})".format(
168 mgr_map['active_name'] , mgr_map['active_gid']))
170 self.wait_until_true(is_available, timeout=30)