Fix some bugs when testing opensds ansible
[stor4nfv.git] / src / ceph / qa / tasks / cephfs / test_pool_perm.py
1 from textwrap import dedent
2 from teuthology.exceptions import CommandFailedError
3 from tasks.cephfs.cephfs_test_case import CephFSTestCase
4 import os
5
6
7 class TestPoolPerm(CephFSTestCase):
8     def test_pool_perm(self):
9         self.mount_a.run_shell(["touch", "test_file"])
10
11         file_path = os.path.join(self.mount_a.mountpoint, "test_file")
12
13         remote_script = dedent("""
14             import os
15             import errno
16
17             fd = os.open("{path}", os.O_RDWR)
18             try:
19                 if {check_read}:
20                     ret = os.read(fd, 1024)
21                 else:
22                     os.write(fd, 'content')
23             except OSError, e:
24                 if e.errno != errno.EPERM:
25                     raise
26             else:
27                 raise RuntimeError("client does not check permission of data pool")
28             """)
29
30         client_name = "client.{0}".format(self.mount_a.client_id)
31
32         # set data pool read only
33         self.fs.mon_manager.raw_cluster_cmd_result(
34             'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
35             'allow r pool={0}'.format(self.fs.get_data_pool_name()))
36
37         self.mount_a.umount_wait()
38         self.mount_a.mount()
39         self.mount_a.wait_until_mounted()
40
41         # write should fail
42         self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False)))
43
44         # set data pool write only
45         self.fs.mon_manager.raw_cluster_cmd_result(
46             'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
47             'allow w pool={0}'.format(self.fs.get_data_pool_name()))
48
49         self.mount_a.umount_wait()
50         self.mount_a.mount()
51         self.mount_a.wait_until_mounted()
52
53         # read should fail
54         self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(True)))
55
56     def test_forbidden_modification(self):
57         """
58         That a client who does not have the capability for setting
59         layout pools is prevented from doing so.
60         """
61
62         # Set up
63         client_name = "client.{0}".format(self.mount_a.client_id)
64         new_pool_name = "data_new"
65         self.fs.add_data_pool(new_pool_name)
66
67         self.mount_a.run_shell(["touch", "layoutfile"])
68         self.mount_a.run_shell(["mkdir", "layoutdir"])
69
70         # Set MDS 'rw' perms: missing 'p' means no setting pool layouts
71         self.fs.mon_manager.raw_cluster_cmd_result(
72             'auth', 'caps', client_name, 'mds', 'allow rw', 'mon', 'allow r',
73             'osd',
74             'allow rw pool={0},allow rw pool={1}'.format(
75                 self.fs.get_data_pool_names()[0],
76                 self.fs.get_data_pool_names()[1],
77             ))
78
79         self.mount_a.umount_wait()
80         self.mount_a.mount()
81         self.mount_a.wait_until_mounted()
82
83         with self.assertRaises(CommandFailedError):
84             self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool",
85                                   new_pool_name)
86         with self.assertRaises(CommandFailedError):
87             self.mount_a.setfattr("layoutdir", "ceph.dir.layout.pool",
88                                   new_pool_name)
89         self.mount_a.umount_wait()
90
91         # Set MDS 'rwp' perms: should now be able to set layouts
92         self.fs.mon_manager.raw_cluster_cmd_result(
93             'auth', 'caps', client_name, 'mds', 'allow rwp', 'mon', 'allow r',
94             'osd',
95             'allow rw pool={0},allow rw pool={1}'.format(
96                 self.fs.get_data_pool_names()[0],
97                 self.fs.get_data_pool_names()[1],
98             ))
99         self.mount_a.mount()
100         self.mount_a.wait_until_mounted()
101         self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool",
102                               new_pool_name)
103         self.mount_a.setfattr("layoutdir", "ceph.dir.layout.pool",
104                               new_pool_name)
105         self.mount_a.umount_wait()
106
107     def tearDown(self):
108         self.fs.mon_manager.raw_cluster_cmd_result(
109             'auth', 'caps', "client.{0}".format(self.mount_a.client_id),
110             'mds', 'allow', 'mon', 'allow r', 'osd',
111             'allow rw pool={0}'.format(self.fs.get_data_pool_names()[0]))
112         super(TestPoolPerm, self).tearDown()
113