1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2011 New Dream Network
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
16 #include "gtest/gtest.h"
17 #ifndef GTEST_IS_THREADSAFE
18 #error "!GTEST_IS_THREADSAFE"
21 #include "include/cephfs/libcephfs.h"
26 #include <sys/types.h>
29 #include <sys/xattr.h>
32 #include <semaphore.h>
40 // Startup common: create and mount ceph fs
41 #define STARTUP_CEPH() do { \
42 ASSERT_EQ(0, ceph_create(&cmount, NULL)); \
43 ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL)); \
44 ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL)); \
45 ASSERT_EQ(0, ceph_mount(cmount, NULL)); \
48 // Cleanup common: unmount and release ceph fs
49 #define CLEANUP_CEPH() do { \
50 ASSERT_EQ(0, ceph_unmount(cmount)); \
51 ASSERT_EQ(0, ceph_release(cmount)); \
54 static const mode_t fileMode = S_IRWXU | S_IRWXG | S_IRWXO;
56 // Default wait time for normal and "slow" operations
57 // (5" should be enough in case of network congestion)
58 static const long waitMs = 10;
59 static const long waitSlowMs = 5000;
61 // Get the absolute struct timespec reference from now + 'ms' milliseconds
62 static const struct timespec* abstime(struct timespec &ts, long ms) {
63 if (clock_gettime(CLOCK_REALTIME, &ts) == -1) {
66 ts.tv_nsec += ms * 1000000;
67 ts.tv_sec += ts.tv_nsec / 1000000000;
68 ts.tv_nsec %= 1000000000;
73 TEST(LibCephFS, BasicLocking) {
74 struct ceph_mount_info *cmount = NULL;
78 sprintf(c_file, "/flock_test_%d", getpid());
79 const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
82 // Lock exclusively twice
83 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42));
84 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 43));
85 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 44));
86 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
88 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 43));
89 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 44));
90 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 43));
92 // Lock shared three times
93 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42));
94 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 43));
95 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 44));
96 // And then attempt to lock exclusively
97 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
98 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
99 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
100 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 44));
101 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
102 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 43));
103 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
104 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, 42));
105 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 45));
107 // Lock shared with upgrade to exclusive (POSIX)
108 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42));
109 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42));
110 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
112 // Lock exclusive with downgrade to shared (POSIX)
113 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42));
114 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42));
115 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
117 ASSERT_EQ(0, ceph_close(cmount, fd));
118 ASSERT_EQ(0, ceph_unlink(cmount, c_file));
122 /* Locking in different threads */
124 // Used by ConcurrentLocking test
125 struct str_ConcurrentLocking {
127 struct ceph_mount_info *cmount; // !NULL if shared
130 void sem_init(int pshared) {
131 ASSERT_EQ(0, ::sem_init(&sem[0], pshared, 0));
132 ASSERT_EQ(0, ::sem_init(&sem[1], pshared, 0));
133 ASSERT_EQ(0, ::sem_init(&semReply[0], pshared, 0));
134 ASSERT_EQ(0, ::sem_init(&semReply[1], pshared, 0));
137 ASSERT_EQ(0, ::sem_destroy(&sem[0]));
138 ASSERT_EQ(0, ::sem_destroy(&sem[1]));
139 ASSERT_EQ(0, ::sem_destroy(&semReply[0]));
140 ASSERT_EQ(0, ::sem_destroy(&semReply[1]));
144 // Wakeup main (for (N) steps)
145 #define PING_MAIN(n) ASSERT_EQ(0, sem_post(&s.sem[n%2]))
146 // Wait for main to wake us up (for (RN) steps)
147 #define WAIT_MAIN(n) \
148 ASSERT_EQ(0, sem_timedwait(&s.semReply[n%2], abstime(ts, waitSlowMs)))
150 // Wakeup worker (for (RN) steps)
151 #define PING_WORKER(n) ASSERT_EQ(0, sem_post(&s.semReply[n%2]))
152 // Wait for worker to wake us up (for (N) steps)
153 #define WAIT_WORKER(n) \
154 ASSERT_EQ(0, sem_timedwait(&s.sem[n%2], abstime(ts, waitSlowMs)))
155 // Worker shall not wake us up (for (N) steps)
156 #define NOT_WAIT_WORKER(n) \
157 ASSERT_EQ(-1, sem_timedwait(&s.sem[n%2], abstime(ts, waitMs)))
159 // Do twice an operation
160 #define TWICE(EXPR) do { \
165 /* Locking in different threads */
167 // Used by ConcurrentLocking test
168 static void thread_ConcurrentLocking(str_ConcurrentLocking& s) {
169 struct ceph_mount_info *const cmount = s.cmount;
172 const int fd = ceph_open(cmount, s.file, O_RDWR | O_CREAT, fileMode);
175 ASSERT_EQ(-EWOULDBLOCK,
176 ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, pthread_self()));
178 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, pthread_self()));
181 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, pthread_self()));
184 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, pthread_self()));
187 WAIT_MAIN(1); // (R1)
188 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, pthread_self()));
191 WAIT_MAIN(2); // (R2)
192 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, pthread_self()));
195 WAIT_MAIN(3); // (R3)
196 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, pthread_self()));
200 // Used by ConcurrentLocking test
201 static void* thread_ConcurrentLocking_(void *arg) {
202 str_ConcurrentLocking *const s =
203 reinterpret_cast<str_ConcurrentLocking*>(arg);
204 thread_ConcurrentLocking(*s);
208 TEST(LibCephFS, ConcurrentLocking) {
209 const pid_t mypid = getpid();
210 struct ceph_mount_info *cmount;
214 sprintf(c_file, "/flock_test_%d", mypid);
215 const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
219 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, pthread_self()));
221 // Start locker thread
224 str_ConcurrentLocking s = { c_file, cmount };
226 ASSERT_EQ(0, pthread_create(&thread, NULL, thread_ConcurrentLocking_, &s));
227 // Synchronization point with thread (failure: thread is dead)
228 WAIT_WORKER(1); // (1)
230 // Shall not have lock immediately
231 NOT_WAIT_WORKER(2); // (2)
234 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, pthread_self()));
237 // Synchronization point with thread (failure: thread is dead)
238 WAIT_WORKER(2); // (2)
240 // Synchronization point with thread (failure: thread is dead)
241 WAIT_WORKER(3); // (3)
243 // Wait for thread to share lock
244 WAIT_WORKER(4); // (4)
245 ASSERT_EQ(-EWOULDBLOCK,
246 ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, pthread_self()));
247 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, pthread_self()));
249 // Wake up thread to unlock shared lock
250 PING_WORKER(1); // (R1)
251 WAIT_WORKER(5); // (5)
253 // Now we can lock exclusively
254 // Upgrade to exclusive lock (as per POSIX)
255 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, pthread_self()));
257 // Wake up thread to lock shared lock
258 PING_WORKER(2); // (R2)
260 // Shall not have lock immediately
261 NOT_WAIT_WORKER(6); // (6)
263 // Release lock ; thread will get it
264 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, pthread_self()));
265 WAIT_WORKER(6); // (6)
267 // We no longer have the lock
268 ASSERT_EQ(-EWOULDBLOCK,
269 ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, pthread_self()));
270 ASSERT_EQ(-EWOULDBLOCK,
271 ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, pthread_self()));
273 // Wake up thread to unlock exclusive lock
274 PING_WORKER(3); // (R3)
275 WAIT_WORKER(7); // (7)
277 // We can lock it again
278 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, pthread_self()));
279 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, pthread_self()));
282 void *retval = (void*) (uintptr_t) -1;
283 ASSERT_EQ(0, pthread_join(thread, &retval));
284 ASSERT_EQ(NULL, retval);
286 ASSERT_EQ(0, ceph_close(cmount, fd));
287 ASSERT_EQ(0, ceph_unlink(cmount, c_file));
291 TEST(LibCephFS, ThreesomeLocking) {
292 const pid_t mypid = getpid();
293 struct ceph_mount_info *cmount;
297 sprintf(c_file, "/flock_test_%d", mypid);
298 const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
302 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, pthread_self()));
304 // Start locker thread
307 str_ConcurrentLocking s = { c_file, cmount };
309 ASSERT_EQ(0, pthread_create(&thread[0], NULL, thread_ConcurrentLocking_, &s));
310 ASSERT_EQ(0, pthread_create(&thread[1], NULL, thread_ConcurrentLocking_, &s));
311 // Synchronization point with thread (failure: thread is dead)
312 TWICE(WAIT_WORKER(1)); // (1)
314 // Shall not have lock immediately
315 NOT_WAIT_WORKER(2); // (2)
318 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, pthread_self()));
321 TWICE(// Synchronization point with thread (failure: thread is dead)
322 WAIT_WORKER(2); // (2)
324 // Synchronization point with thread (failure: thread is dead)
325 WAIT_WORKER(3)); // (3)
327 // Wait for thread to share lock
328 TWICE(WAIT_WORKER(4)); // (4)
329 ASSERT_EQ(-EWOULDBLOCK,
330 ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, pthread_self()));
331 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, pthread_self()));
333 // Wake up thread to unlock shared lock
334 TWICE(PING_WORKER(1); // (R1)
335 WAIT_WORKER(5)); // (5)
337 // Now we can lock exclusively
338 // Upgrade to exclusive lock (as per POSIX)
339 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, pthread_self()));
341 TWICE( // Wake up thread to lock shared lock
342 PING_WORKER(2); // (R2)
344 // Shall not have lock immediately
345 NOT_WAIT_WORKER(6)); // (6)
347 // Release lock ; thread will get it
348 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, pthread_self()));
349 TWICE(WAIT_WORKER(6); // (6)
351 // We no longer have the lock
352 ASSERT_EQ(-EWOULDBLOCK,
353 ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, pthread_self()));
354 ASSERT_EQ(-EWOULDBLOCK,
355 ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, pthread_self()));
357 // Wake up thread to unlock exclusive lock
358 PING_WORKER(3); // (R3)
359 WAIT_WORKER(7); // (7)
362 // We can lock it again
363 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, pthread_self()));
364 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, pthread_self()));
367 void *retval = (void*) (uintptr_t) -1;
368 ASSERT_EQ(0, pthread_join(thread[0], &retval));
369 ASSERT_EQ(NULL, retval);
370 ASSERT_EQ(0, pthread_join(thread[1], &retval));
371 ASSERT_EQ(NULL, retval);
373 ASSERT_EQ(0, ceph_close(cmount, fd));
374 ASSERT_EQ(0, ceph_unlink(cmount, c_file));
378 /* Locking in different processes */
380 #define PROCESS_SLOW_MS() \
381 static const long waitMs = 100; \
384 // Used by ConcurrentLocking test
385 static void process_ConcurrentLocking(str_ConcurrentLocking& s) {
386 const pid_t mypid = getpid();
389 struct ceph_mount_info *cmount = NULL;
395 const int fd = ceph_open(cmount, s.file, O_RDWR | O_CREAT, fileMode);
397 WAIT_MAIN(1); // (R1)
399 ASSERT_EQ(-EWOULDBLOCK,
400 ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
402 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
405 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
408 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, mypid));
411 WAIT_MAIN(2); // (R2)
412 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
415 WAIT_MAIN(3); // (R3)
416 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
419 WAIT_MAIN(4); // (R4)
420 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
429 // Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
430 TEST(LibCephFS, DISABLED_InterProcessLocking) {
432 // Process synchronization
434 const pid_t mypid = getpid();
435 sprintf(c_file, "/flock_test_%d", mypid);
437 // Note: the semaphores MUST be on a shared memory segment
438 str_ConcurrentLocking *const shs =
439 reinterpret_cast<str_ConcurrentLocking*>
440 (mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
442 str_ConcurrentLocking &s = *shs;
446 // Start locker process
447 const pid_t pid = fork();
450 process_ConcurrentLocking(s);
455 struct ceph_mount_info *cmount;
458 const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
462 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
464 // Synchronization point with process (failure: process is dead)
465 PING_WORKER(1); // (R1)
466 WAIT_WORKER(1); // (1)
468 // Shall not have lock immediately
469 NOT_WAIT_WORKER(2); // (2)
472 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
475 // Synchronization point with process (failure: process is dead)
476 WAIT_WORKER(2); // (2)
478 // Synchronization point with process (failure: process is dead)
479 WAIT_WORKER(3); // (3)
481 // Wait for process to share lock
482 WAIT_WORKER(4); // (4)
483 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
484 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
486 // Wake up process to unlock shared lock
487 PING_WORKER(2); // (R2)
488 WAIT_WORKER(5); // (5)
490 // Now we can lock exclusively
491 // Upgrade to exclusive lock (as per POSIX)
492 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
494 // Wake up process to lock shared lock
495 PING_WORKER(3); // (R3)
497 // Shall not have lock immediately
498 NOT_WAIT_WORKER(6); // (6)
500 // Release lock ; process will get it
501 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
502 WAIT_WORKER(6); // (6)
504 // We no longer have the lock
505 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
506 ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
508 // Wake up process to unlock exclusive lock
509 PING_WORKER(4); // (R4)
510 WAIT_WORKER(7); // (7)
512 // We can lock it again
513 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
514 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
518 ASSERT_EQ(pid, waitpid(pid, &status, 0));
519 ASSERT_EQ(EXIT_SUCCESS, status);
523 ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
524 ASSERT_EQ(0, ceph_close(cmount, fd));
525 ASSERT_EQ(0, ceph_unlink(cmount, c_file));
529 // Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
530 TEST(LibCephFS, DISABLED_ThreesomeInterProcessLocking) {
532 // Process synchronization
534 const pid_t mypid = getpid();
535 sprintf(c_file, "/flock_test_%d", mypid);
537 // Note: the semaphores MUST be on a shared memory segment
538 str_ConcurrentLocking *const shs =
539 reinterpret_cast<str_ConcurrentLocking*>
540 (mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
542 str_ConcurrentLocking &s = *shs;
546 // Start locker processes
549 ASSERT_GE(pid[0], 0);
551 process_ConcurrentLocking(s);
555 ASSERT_GE(pid[1], 0);
557 process_ConcurrentLocking(s);
562 struct ceph_mount_info *cmount;
565 const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
569 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
571 // Synchronization point with process (failure: process is dead)
572 TWICE(PING_WORKER(1)); // (R1)
573 TWICE(WAIT_WORKER(1)); // (1)
575 // Shall not have lock immediately
576 NOT_WAIT_WORKER(2); // (2)
579 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
582 TWICE(// Synchronization point with process (failure: process is dead)
583 WAIT_WORKER(2); // (2)
585 // Synchronization point with process (failure: process is dead)
586 WAIT_WORKER(3)); // (3)
588 // Wait for process to share lock
589 TWICE(WAIT_WORKER(4)); // (4)
590 ASSERT_EQ(-EWOULDBLOCK,
591 ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
592 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
594 // Wake up process to unlock shared lock
595 TWICE(PING_WORKER(2); // (R2)
596 WAIT_WORKER(5)); // (5)
598 // Now we can lock exclusively
599 // Upgrade to exclusive lock (as per POSIX)
600 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
602 TWICE( // Wake up process to lock shared lock
603 PING_WORKER(3); // (R3)
605 // Shall not have lock immediately
606 NOT_WAIT_WORKER(6)); // (6)
608 // Release lock ; process will get it
609 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
610 TWICE(WAIT_WORKER(6); // (6)
612 // We no longer have the lock
613 ASSERT_EQ(-EWOULDBLOCK,
614 ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
615 ASSERT_EQ(-EWOULDBLOCK,
616 ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
618 // Wake up process to unlock exclusive lock
619 PING_WORKER(4); // (R4)
620 WAIT_WORKER(7); // (7)
623 // We can lock it again
624 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
625 ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
629 ASSERT_EQ(pid[0], waitpid(pid[0], &status, 0));
630 ASSERT_EQ(EXIT_SUCCESS, status);
631 ASSERT_EQ(pid[1], waitpid(pid[1], &status, 0));
632 ASSERT_EQ(EXIT_SUCCESS, status);
636 ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
637 ASSERT_EQ(0, ceph_close(cmount, fd));
638 ASSERT_EQ(0, ceph_unlink(cmount, c_file));