Fix some bugs when testing opensds ansible
[stor4nfv.git] / src / ceph / qa / tasks / cephfs / test_flush.py
1
2 from textwrap import dedent
3 from tasks.cephfs.cephfs_test_case import CephFSTestCase
4 from tasks.cephfs.filesystem import ObjectNotFound, ROOT_INO
5
6
7 class TestFlush(CephFSTestCase):
8     def test_flush(self):
9         self.mount_a.run_shell(["mkdir", "mydir"])
10         self.mount_a.run_shell(["touch", "mydir/alpha"])
11         dir_ino = self.mount_a.path_to_ino("mydir")
12         file_ino = self.mount_a.path_to_ino("mydir/alpha")
13
14         # Unmount the client so that it isn't still holding caps
15         self.mount_a.umount_wait()
16
17         # Before flush, the dirfrag object does not exist
18         with self.assertRaises(ObjectNotFound):
19             self.fs.list_dirfrag(dir_ino)
20
21         # Before flush, the file's backtrace has not been written
22         with self.assertRaises(ObjectNotFound):
23             self.fs.read_backtrace(file_ino)
24
25         # Before flush, there are no dentries in the root
26         self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
27
28         # Execute flush
29         flush_data = self.fs.mds_asok(["flush", "journal"])
30         self.assertEqual(flush_data['return_code'], 0)
31
32         # After flush, the dirfrag object has been created
33         dir_list = self.fs.list_dirfrag(dir_ino)
34         self.assertEqual(dir_list, ["alpha_head"])
35
36         # And the 'mydir' dentry is in the root
37         self.assertEqual(self.fs.list_dirfrag(ROOT_INO), ['mydir_head'])
38
39         # ...and the data object has its backtrace
40         backtrace = self.fs.read_backtrace(file_ino)
41         self.assertEqual(['alpha', 'mydir'], [a['dname'] for a in backtrace['ancestors']])
42         self.assertEqual([dir_ino, 1], [a['dirino'] for a in backtrace['ancestors']])
43         self.assertEqual(file_ino, backtrace['ino'])
44
45         # ...and the journal is truncated to just a single subtreemap from the
46         # newly created segment
47         summary_output = self.fs.journal_tool(["event", "get", "summary"])
48         try:
49             self.assertEqual(summary_output,
50                              dedent(
51                                  """
52                                  Events by type:
53                                    SUBTREEMAP: 1
54                                  Errors: 0
55                                  """
56                              ).strip())
57         except AssertionError:
58             # In some states, flushing the journal will leave you
59             # an extra event from locks a client held.   This is
60             # correct behaviour: the MDS is flushing the journal,
61             # it's just that new events are getting added too.
62             # In this case, we should nevertheless see a fully
63             # empty journal after a second flush.
64             self.assertEqual(summary_output,
65                              dedent(
66                                  """
67                                  Events by type:
68                                    SUBTREEMAP: 1
69                                    UPDATE: 1
70                                  Errors: 0
71                                  """
72                              ).strip())
73             flush_data = self.fs.mds_asok(["flush", "journal"])
74             self.assertEqual(flush_data['return_code'], 0)
75             self.assertEqual(self.fs.journal_tool(["event", "get", "summary"]),
76                              dedent(
77                                  """
78                                  Events by type:
79                                    SUBTREEMAP: 1
80                                  Errors: 0
81                                  """
82                              ).strip())
83
84         # Now for deletion!
85         # We will count the RADOS deletions and MDS file purges, to verify that
86         # the expected behaviour is happening as a result of the purge
87         initial_dels = self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete']
88         initial_purges = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued']
89
90         # Use a client to delete a file
91         self.mount_a.mount()
92         self.mount_a.wait_until_mounted()
93         self.mount_a.run_shell(["rm", "-rf", "mydir"])
94
95         # Flush the journal so that the directory inode can be purged
96         flush_data = self.fs.mds_asok(["flush", "journal"])
97         self.assertEqual(flush_data['return_code'], 0)
98
99         # We expect to see a single file purge
100         self.wait_until_true(
101             lambda: self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued'] - initial_purges >= 2,
102             60)
103
104         # We expect two deletions, one of the dirfrag and one of the backtrace
105         self.wait_until_true(
106             lambda: self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete'] - initial_dels >= 2,
107             60)  # timeout is fairly long to allow for tick+rados latencies
108
109         with self.assertRaises(ObjectNotFound):
110             self.fs.list_dirfrag(dir_ino)
111         with self.assertRaises(ObjectNotFound):
112             self.fs.read_backtrace(file_ino)
113         self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])