Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / fs / nfsd / nfs4state.c
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49
50 #include "netns.h"
51 #include "pnfs.h"
52
53 #define NFSDDBG_FACILITY                NFSDDBG_PROC
54
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid = {
57         .si_generation = ~0,
58         .si_opaque = all_ones,
59 };
60 static const stateid_t zero_stateid = {
61         /* all fields zero */
62 };
63 static const stateid_t currentstateid = {
64         .si_generation = 1,
65 };
66
67 static u64 current_sessionid = 1;
68
69 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
70 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
71 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
72
73 /* forward declarations */
74 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
75 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
76
77 /* Locking: */
78
79 /*
80  * Currently used for the del_recall_lru and file hash table.  In an
81  * effort to decrease the scope of the client_mutex, this spinlock may
82  * eventually cover more:
83  */
84 static DEFINE_SPINLOCK(state_lock);
85
86 /*
87  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
88  * the refcount on the open stateid to drop.
89  */
90 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
91
92 static struct kmem_cache *openowner_slab;
93 static struct kmem_cache *lockowner_slab;
94 static struct kmem_cache *file_slab;
95 static struct kmem_cache *stateid_slab;
96 static struct kmem_cache *deleg_slab;
97 static struct kmem_cache *odstate_slab;
98
99 static void free_session(struct nfsd4_session *);
100
101 static struct nfsd4_callback_ops nfsd4_cb_recall_ops;
102
103 static bool is_session_dead(struct nfsd4_session *ses)
104 {
105         return ses->se_flags & NFS4_SESSION_DEAD;
106 }
107
108 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
109 {
110         if (atomic_read(&ses->se_ref) > ref_held_by_me)
111                 return nfserr_jukebox;
112         ses->se_flags |= NFS4_SESSION_DEAD;
113         return nfs_ok;
114 }
115
116 static bool is_client_expired(struct nfs4_client *clp)
117 {
118         return clp->cl_time == 0;
119 }
120
121 static __be32 get_client_locked(struct nfs4_client *clp)
122 {
123         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
124
125         lockdep_assert_held(&nn->client_lock);
126
127         if (is_client_expired(clp))
128                 return nfserr_expired;
129         atomic_inc(&clp->cl_refcount);
130         return nfs_ok;
131 }
132
133 /* must be called under the client_lock */
134 static inline void
135 renew_client_locked(struct nfs4_client *clp)
136 {
137         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
138
139         if (is_client_expired(clp)) {
140                 WARN_ON(1);
141                 printk("%s: client (clientid %08x/%08x) already expired\n",
142                         __func__,
143                         clp->cl_clientid.cl_boot,
144                         clp->cl_clientid.cl_id);
145                 return;
146         }
147
148         dprintk("renewing client (clientid %08x/%08x)\n",
149                         clp->cl_clientid.cl_boot,
150                         clp->cl_clientid.cl_id);
151         list_move_tail(&clp->cl_lru, &nn->client_lru);
152         clp->cl_time = get_seconds();
153 }
154
155 static void put_client_renew_locked(struct nfs4_client *clp)
156 {
157         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
158
159         lockdep_assert_held(&nn->client_lock);
160
161         if (!atomic_dec_and_test(&clp->cl_refcount))
162                 return;
163         if (!is_client_expired(clp))
164                 renew_client_locked(clp);
165 }
166
167 static void put_client_renew(struct nfs4_client *clp)
168 {
169         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
170
171         if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
172                 return;
173         if (!is_client_expired(clp))
174                 renew_client_locked(clp);
175         spin_unlock(&nn->client_lock);
176 }
177
178 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
179 {
180         __be32 status;
181
182         if (is_session_dead(ses))
183                 return nfserr_badsession;
184         status = get_client_locked(ses->se_client);
185         if (status)
186                 return status;
187         atomic_inc(&ses->se_ref);
188         return nfs_ok;
189 }
190
191 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
192 {
193         struct nfs4_client *clp = ses->se_client;
194         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
195
196         lockdep_assert_held(&nn->client_lock);
197
198         if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
199                 free_session(ses);
200         put_client_renew_locked(clp);
201 }
202
203 static void nfsd4_put_session(struct nfsd4_session *ses)
204 {
205         struct nfs4_client *clp = ses->se_client;
206         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
207
208         spin_lock(&nn->client_lock);
209         nfsd4_put_session_locked(ses);
210         spin_unlock(&nn->client_lock);
211 }
212
213 static inline struct nfs4_stateowner *
214 nfs4_get_stateowner(struct nfs4_stateowner *sop)
215 {
216         atomic_inc(&sop->so_count);
217         return sop;
218 }
219
220 static int
221 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
222 {
223         return (sop->so_owner.len == owner->len) &&
224                 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
225 }
226
227 static struct nfs4_openowner *
228 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
229                         struct nfs4_client *clp)
230 {
231         struct nfs4_stateowner *so;
232
233         lockdep_assert_held(&clp->cl_lock);
234
235         list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
236                             so_strhash) {
237                 if (!so->so_is_open_owner)
238                         continue;
239                 if (same_owner_str(so, &open->op_owner))
240                         return openowner(nfs4_get_stateowner(so));
241         }
242         return NULL;
243 }
244
245 static struct nfs4_openowner *
246 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
247                         struct nfs4_client *clp)
248 {
249         struct nfs4_openowner *oo;
250
251         spin_lock(&clp->cl_lock);
252         oo = find_openstateowner_str_locked(hashval, open, clp);
253         spin_unlock(&clp->cl_lock);
254         return oo;
255 }
256
257 static inline u32
258 opaque_hashval(const void *ptr, int nbytes)
259 {
260         unsigned char *cptr = (unsigned char *) ptr;
261
262         u32 x = 0;
263         while (nbytes--) {
264                 x *= 37;
265                 x += *cptr++;
266         }
267         return x;
268 }
269
270 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
271 {
272         struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
273
274         kmem_cache_free(file_slab, fp);
275 }
276
277 void
278 put_nfs4_file(struct nfs4_file *fi)
279 {
280         might_lock(&state_lock);
281
282         if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
283                 hlist_del_rcu(&fi->fi_hash);
284                 spin_unlock(&state_lock);
285                 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
286                 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
287                 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
288         }
289 }
290
291 static struct file *
292 __nfs4_get_fd(struct nfs4_file *f, int oflag)
293 {
294         if (f->fi_fds[oflag])
295                 return get_file(f->fi_fds[oflag]);
296         return NULL;
297 }
298
299 static struct file *
300 find_writeable_file_locked(struct nfs4_file *f)
301 {
302         struct file *ret;
303
304         lockdep_assert_held(&f->fi_lock);
305
306         ret = __nfs4_get_fd(f, O_WRONLY);
307         if (!ret)
308                 ret = __nfs4_get_fd(f, O_RDWR);
309         return ret;
310 }
311
312 static struct file *
313 find_writeable_file(struct nfs4_file *f)
314 {
315         struct file *ret;
316
317         spin_lock(&f->fi_lock);
318         ret = find_writeable_file_locked(f);
319         spin_unlock(&f->fi_lock);
320
321         return ret;
322 }
323
324 static struct file *find_readable_file_locked(struct nfs4_file *f)
325 {
326         struct file *ret;
327
328         lockdep_assert_held(&f->fi_lock);
329
330         ret = __nfs4_get_fd(f, O_RDONLY);
331         if (!ret)
332                 ret = __nfs4_get_fd(f, O_RDWR);
333         return ret;
334 }
335
336 static struct file *
337 find_readable_file(struct nfs4_file *f)
338 {
339         struct file *ret;
340
341         spin_lock(&f->fi_lock);
342         ret = find_readable_file_locked(f);
343         spin_unlock(&f->fi_lock);
344
345         return ret;
346 }
347
348 struct file *
349 find_any_file(struct nfs4_file *f)
350 {
351         struct file *ret;
352
353         spin_lock(&f->fi_lock);
354         ret = __nfs4_get_fd(f, O_RDWR);
355         if (!ret) {
356                 ret = __nfs4_get_fd(f, O_WRONLY);
357                 if (!ret)
358                         ret = __nfs4_get_fd(f, O_RDONLY);
359         }
360         spin_unlock(&f->fi_lock);
361         return ret;
362 }
363
364 static atomic_long_t num_delegations;
365 unsigned long max_delegations;
366
367 /*
368  * Open owner state (share locks)
369  */
370
371 /* hash tables for lock and open owners */
372 #define OWNER_HASH_BITS              8
373 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
374 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
375
376 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
377 {
378         unsigned int ret;
379
380         ret = opaque_hashval(ownername->data, ownername->len);
381         return ret & OWNER_HASH_MASK;
382 }
383
384 /* hash table for nfs4_file */
385 #define FILE_HASH_BITS                   8
386 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
387
388 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
389 {
390         return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
391 }
392
393 static unsigned int file_hashval(struct knfsd_fh *fh)
394 {
395         return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
396 }
397
398 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
399
400 static void
401 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
402 {
403         lockdep_assert_held(&fp->fi_lock);
404
405         if (access & NFS4_SHARE_ACCESS_WRITE)
406                 atomic_inc(&fp->fi_access[O_WRONLY]);
407         if (access & NFS4_SHARE_ACCESS_READ)
408                 atomic_inc(&fp->fi_access[O_RDONLY]);
409 }
410
411 static __be32
412 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
413 {
414         lockdep_assert_held(&fp->fi_lock);
415
416         /* Does this access mode make sense? */
417         if (access & ~NFS4_SHARE_ACCESS_BOTH)
418                 return nfserr_inval;
419
420         /* Does it conflict with a deny mode already set? */
421         if ((access & fp->fi_share_deny) != 0)
422                 return nfserr_share_denied;
423
424         __nfs4_file_get_access(fp, access);
425         return nfs_ok;
426 }
427
428 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
429 {
430         /* Common case is that there is no deny mode. */
431         if (deny) {
432                 /* Does this deny mode make sense? */
433                 if (deny & ~NFS4_SHARE_DENY_BOTH)
434                         return nfserr_inval;
435
436                 if ((deny & NFS4_SHARE_DENY_READ) &&
437                     atomic_read(&fp->fi_access[O_RDONLY]))
438                         return nfserr_share_denied;
439
440                 if ((deny & NFS4_SHARE_DENY_WRITE) &&
441                     atomic_read(&fp->fi_access[O_WRONLY]))
442                         return nfserr_share_denied;
443         }
444         return nfs_ok;
445 }
446
447 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
448 {
449         might_lock(&fp->fi_lock);
450
451         if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
452                 struct file *f1 = NULL;
453                 struct file *f2 = NULL;
454
455                 swap(f1, fp->fi_fds[oflag]);
456                 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
457                         swap(f2, fp->fi_fds[O_RDWR]);
458                 spin_unlock(&fp->fi_lock);
459                 if (f1)
460                         fput(f1);
461                 if (f2)
462                         fput(f2);
463         }
464 }
465
466 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
467 {
468         WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
469
470         if (access & NFS4_SHARE_ACCESS_WRITE)
471                 __nfs4_file_put_access(fp, O_WRONLY);
472         if (access & NFS4_SHARE_ACCESS_READ)
473                 __nfs4_file_put_access(fp, O_RDONLY);
474 }
475
476 /*
477  * Allocate a new open/delegation state counter. This is needed for
478  * pNFS for proper return on close semantics.
479  *
480  * Note that we only allocate it for pNFS-enabled exports, otherwise
481  * all pointers to struct nfs4_clnt_odstate are always NULL.
482  */
483 static struct nfs4_clnt_odstate *
484 alloc_clnt_odstate(struct nfs4_client *clp)
485 {
486         struct nfs4_clnt_odstate *co;
487
488         co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
489         if (co) {
490                 co->co_client = clp;
491                 atomic_set(&co->co_odcount, 1);
492         }
493         return co;
494 }
495
496 static void
497 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
498 {
499         struct nfs4_file *fp = co->co_file;
500
501         lockdep_assert_held(&fp->fi_lock);
502         list_add(&co->co_perfile, &fp->fi_clnt_odstate);
503 }
504
505 static inline void
506 get_clnt_odstate(struct nfs4_clnt_odstate *co)
507 {
508         if (co)
509                 atomic_inc(&co->co_odcount);
510 }
511
512 static void
513 put_clnt_odstate(struct nfs4_clnt_odstate *co)
514 {
515         struct nfs4_file *fp;
516
517         if (!co)
518                 return;
519
520         fp = co->co_file;
521         if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
522                 list_del(&co->co_perfile);
523                 spin_unlock(&fp->fi_lock);
524
525                 nfsd4_return_all_file_layouts(co->co_client, fp);
526                 kmem_cache_free(odstate_slab, co);
527         }
528 }
529
530 static struct nfs4_clnt_odstate *
531 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
532 {
533         struct nfs4_clnt_odstate *co;
534         struct nfs4_client *cl;
535
536         if (!new)
537                 return NULL;
538
539         cl = new->co_client;
540
541         spin_lock(&fp->fi_lock);
542         list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
543                 if (co->co_client == cl) {
544                         get_clnt_odstate(co);
545                         goto out;
546                 }
547         }
548         co = new;
549         co->co_file = fp;
550         hash_clnt_odstate_locked(new);
551 out:
552         spin_unlock(&fp->fi_lock);
553         return co;
554 }
555
556 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
557                                   void (*sc_free)(struct nfs4_stid *))
558 {
559         struct nfs4_stid *stid;
560         int new_id;
561
562         stid = kmem_cache_zalloc(slab, GFP_KERNEL);
563         if (!stid)
564                 return NULL;
565
566         idr_preload(GFP_KERNEL);
567         spin_lock(&cl->cl_lock);
568         new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
569         spin_unlock(&cl->cl_lock);
570         idr_preload_end();
571         if (new_id < 0)
572                 goto out_free;
573
574         stid->sc_free = sc_free;
575         stid->sc_client = cl;
576         stid->sc_stateid.si_opaque.so_id = new_id;
577         stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
578         /* Will be incremented before return to client: */
579         atomic_set(&stid->sc_count, 1);
580         spin_lock_init(&stid->sc_lock);
581
582         /*
583          * It shouldn't be a problem to reuse an opaque stateid value.
584          * I don't think it is for 4.1.  But with 4.0 I worry that, for
585          * example, a stray write retransmission could be accepted by
586          * the server when it should have been rejected.  Therefore,
587          * adopt a trick from the sctp code to attempt to maximize the
588          * amount of time until an id is reused, by ensuring they always
589          * "increase" (mod INT_MAX):
590          */
591         return stid;
592 out_free:
593         kmem_cache_free(slab, stid);
594         return NULL;
595 }
596
597 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
598 {
599         struct nfs4_stid *stid;
600
601         stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
602         if (!stid)
603                 return NULL;
604
605         return openlockstateid(stid);
606 }
607
608 static void nfs4_free_deleg(struct nfs4_stid *stid)
609 {
610         kmem_cache_free(deleg_slab, stid);
611         atomic_long_dec(&num_delegations);
612 }
613
614 /*
615  * When we recall a delegation, we should be careful not to hand it
616  * out again straight away.
617  * To ensure this we keep a pair of bloom filters ('new' and 'old')
618  * in which the filehandles of recalled delegations are "stored".
619  * If a filehandle appear in either filter, a delegation is blocked.
620  * When a delegation is recalled, the filehandle is stored in the "new"
621  * filter.
622  * Every 30 seconds we swap the filters and clear the "new" one,
623  * unless both are empty of course.
624  *
625  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
626  * low 3 bytes as hash-table indices.
627  *
628  * 'blocked_delegations_lock', which is always taken in block_delegations(),
629  * is used to manage concurrent access.  Testing does not need the lock
630  * except when swapping the two filters.
631  */
632 static DEFINE_SPINLOCK(blocked_delegations_lock);
633 static struct bloom_pair {
634         int     entries, old_entries;
635         time_t  swap_time;
636         int     new; /* index into 'set' */
637         DECLARE_BITMAP(set[2], 256);
638 } blocked_delegations;
639
640 static int delegation_blocked(struct knfsd_fh *fh)
641 {
642         u32 hash;
643         struct bloom_pair *bd = &blocked_delegations;
644
645         if (bd->entries == 0)
646                 return 0;
647         if (seconds_since_boot() - bd->swap_time > 30) {
648                 spin_lock(&blocked_delegations_lock);
649                 if (seconds_since_boot() - bd->swap_time > 30) {
650                         bd->entries -= bd->old_entries;
651                         bd->old_entries = bd->entries;
652                         memset(bd->set[bd->new], 0,
653                                sizeof(bd->set[0]));
654                         bd->new = 1-bd->new;
655                         bd->swap_time = seconds_since_boot();
656                 }
657                 spin_unlock(&blocked_delegations_lock);
658         }
659         hash = jhash(&fh->fh_base, fh->fh_size, 0);
660         if (test_bit(hash&255, bd->set[0]) &&
661             test_bit((hash>>8)&255, bd->set[0]) &&
662             test_bit((hash>>16)&255, bd->set[0]))
663                 return 1;
664
665         if (test_bit(hash&255, bd->set[1]) &&
666             test_bit((hash>>8)&255, bd->set[1]) &&
667             test_bit((hash>>16)&255, bd->set[1]))
668                 return 1;
669
670         return 0;
671 }
672
673 static void block_delegations(struct knfsd_fh *fh)
674 {
675         u32 hash;
676         struct bloom_pair *bd = &blocked_delegations;
677
678         hash = jhash(&fh->fh_base, fh->fh_size, 0);
679
680         spin_lock(&blocked_delegations_lock);
681         __set_bit(hash&255, bd->set[bd->new]);
682         __set_bit((hash>>8)&255, bd->set[bd->new]);
683         __set_bit((hash>>16)&255, bd->set[bd->new]);
684         if (bd->entries == 0)
685                 bd->swap_time = seconds_since_boot();
686         bd->entries += 1;
687         spin_unlock(&blocked_delegations_lock);
688 }
689
690 static struct nfs4_delegation *
691 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
692                  struct nfs4_clnt_odstate *odstate)
693 {
694         struct nfs4_delegation *dp;
695         long n;
696
697         dprintk("NFSD alloc_init_deleg\n");
698         n = atomic_long_inc_return(&num_delegations);
699         if (n < 0 || n > max_delegations)
700                 goto out_dec;
701         if (delegation_blocked(&current_fh->fh_handle))
702                 goto out_dec;
703         dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
704         if (dp == NULL)
705                 goto out_dec;
706
707         /*
708          * delegation seqid's are never incremented.  The 4.1 special
709          * meaning of seqid 0 isn't meaningful, really, but let's avoid
710          * 0 anyway just for consistency and use 1:
711          */
712         dp->dl_stid.sc_stateid.si_generation = 1;
713         INIT_LIST_HEAD(&dp->dl_perfile);
714         INIT_LIST_HEAD(&dp->dl_perclnt);
715         INIT_LIST_HEAD(&dp->dl_recall_lru);
716         dp->dl_clnt_odstate = odstate;
717         get_clnt_odstate(odstate);
718         dp->dl_type = NFS4_OPEN_DELEGATE_READ;
719         dp->dl_retries = 1;
720         nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
721                       &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
722         return dp;
723 out_dec:
724         atomic_long_dec(&num_delegations);
725         return NULL;
726 }
727
728 void
729 nfs4_put_stid(struct nfs4_stid *s)
730 {
731         struct nfs4_file *fp = s->sc_file;
732         struct nfs4_client *clp = s->sc_client;
733
734         might_lock(&clp->cl_lock);
735
736         if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
737                 wake_up_all(&close_wq);
738                 return;
739         }
740         idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
741         spin_unlock(&clp->cl_lock);
742         s->sc_free(s);
743         if (fp)
744                 put_nfs4_file(fp);
745 }
746
747 void
748 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
749 {
750         stateid_t *src = &stid->sc_stateid;
751
752         spin_lock(&stid->sc_lock);
753         if (unlikely(++src->si_generation == 0))
754                 src->si_generation = 1;
755         memcpy(dst, src, sizeof(*dst));
756         spin_unlock(&stid->sc_lock);
757 }
758
759 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
760 {
761         struct file *filp = NULL;
762
763         spin_lock(&fp->fi_lock);
764         if (fp->fi_deleg_file && --fp->fi_delegees == 0)
765                 swap(filp, fp->fi_deleg_file);
766         spin_unlock(&fp->fi_lock);
767
768         if (filp) {
769                 vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
770                 fput(filp);
771         }
772 }
773
774 void nfs4_unhash_stid(struct nfs4_stid *s)
775 {
776         s->sc_type = 0;
777 }
778
779 /**
780  * nfs4_get_existing_delegation - Discover if this delegation already exists
781  * @clp:     a pointer to the nfs4_client we're granting a delegation to
782  * @fp:      a pointer to the nfs4_file we're granting a delegation on
783  *
784  * Return:
785  *      On success: NULL if an existing delegation was not found.
786  *
787  *      On error: -EAGAIN if one was previously granted to this nfs4_client
788  *                 for this nfs4_file.
789  *
790  */
791
792 static int
793 nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
794 {
795         struct nfs4_delegation *searchdp = NULL;
796         struct nfs4_client *searchclp = NULL;
797
798         lockdep_assert_held(&state_lock);
799         lockdep_assert_held(&fp->fi_lock);
800
801         list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
802                 searchclp = searchdp->dl_stid.sc_client;
803                 if (clp == searchclp) {
804                         return -EAGAIN;
805                 }
806         }
807         return 0;
808 }
809
810 /**
811  * hash_delegation_locked - Add a delegation to the appropriate lists
812  * @dp:     a pointer to the nfs4_delegation we are adding.
813  * @fp:     a pointer to the nfs4_file we're granting a delegation on
814  *
815  * Return:
816  *      On success: NULL if the delegation was successfully hashed.
817  *
818  *      On error: -EAGAIN if one was previously granted to this
819  *                 nfs4_client for this nfs4_file. Delegation is not hashed.
820  *
821  */
822
823 static int
824 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
825 {
826         int status;
827         struct nfs4_client *clp = dp->dl_stid.sc_client;
828
829         lockdep_assert_held(&state_lock);
830         lockdep_assert_held(&fp->fi_lock);
831
832         status = nfs4_get_existing_delegation(clp, fp);
833         if (status)
834                 return status;
835         ++fp->fi_delegees;
836         atomic_inc(&dp->dl_stid.sc_count);
837         dp->dl_stid.sc_type = NFS4_DELEG_STID;
838         list_add(&dp->dl_perfile, &fp->fi_delegations);
839         list_add(&dp->dl_perclnt, &clp->cl_delegations);
840         return 0;
841 }
842
843 static bool
844 unhash_delegation_locked(struct nfs4_delegation *dp)
845 {
846         struct nfs4_file *fp = dp->dl_stid.sc_file;
847
848         lockdep_assert_held(&state_lock);
849
850         if (list_empty(&dp->dl_perfile))
851                 return false;
852
853         dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
854         /* Ensure that deleg break won't try to requeue it */
855         ++dp->dl_time;
856         spin_lock(&fp->fi_lock);
857         list_del_init(&dp->dl_perclnt);
858         list_del_init(&dp->dl_recall_lru);
859         list_del_init(&dp->dl_perfile);
860         spin_unlock(&fp->fi_lock);
861         return true;
862 }
863
864 static void destroy_delegation(struct nfs4_delegation *dp)
865 {
866         bool unhashed;
867
868         spin_lock(&state_lock);
869         unhashed = unhash_delegation_locked(dp);
870         spin_unlock(&state_lock);
871         if (unhashed) {
872                 put_clnt_odstate(dp->dl_clnt_odstate);
873                 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
874                 nfs4_put_stid(&dp->dl_stid);
875         }
876 }
877
878 static void revoke_delegation(struct nfs4_delegation *dp)
879 {
880         struct nfs4_client *clp = dp->dl_stid.sc_client;
881
882         WARN_ON(!list_empty(&dp->dl_recall_lru));
883
884         put_clnt_odstate(dp->dl_clnt_odstate);
885         nfs4_put_deleg_lease(dp->dl_stid.sc_file);
886
887         if (clp->cl_minorversion == 0)
888                 nfs4_put_stid(&dp->dl_stid);
889         else {
890                 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
891                 spin_lock(&clp->cl_lock);
892                 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
893                 spin_unlock(&clp->cl_lock);
894         }
895 }
896
897 /* 
898  * SETCLIENTID state 
899  */
900
901 static unsigned int clientid_hashval(u32 id)
902 {
903         return id & CLIENT_HASH_MASK;
904 }
905
906 static unsigned int clientstr_hashval(const char *name)
907 {
908         return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
909 }
910
911 /*
912  * We store the NONE, READ, WRITE, and BOTH bits separately in the
913  * st_{access,deny}_bmap field of the stateid, in order to track not
914  * only what share bits are currently in force, but also what
915  * combinations of share bits previous opens have used.  This allows us
916  * to enforce the recommendation of rfc 3530 14.2.19 that the server
917  * return an error if the client attempt to downgrade to a combination
918  * of share bits not explicable by closing some of its previous opens.
919  *
920  * XXX: This enforcement is actually incomplete, since we don't keep
921  * track of access/deny bit combinations; so, e.g., we allow:
922  *
923  *      OPEN allow read, deny write
924  *      OPEN allow both, deny none
925  *      DOWNGRADE allow read, deny none
926  *
927  * which we should reject.
928  */
929 static unsigned int
930 bmap_to_share_mode(unsigned long bmap) {
931         int i;
932         unsigned int access = 0;
933
934         for (i = 1; i < 4; i++) {
935                 if (test_bit(i, &bmap))
936                         access |= i;
937         }
938         return access;
939 }
940
941 /* set share access for a given stateid */
942 static inline void
943 set_access(u32 access, struct nfs4_ol_stateid *stp)
944 {
945         unsigned char mask = 1 << access;
946
947         WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
948         stp->st_access_bmap |= mask;
949 }
950
951 /* clear share access for a given stateid */
952 static inline void
953 clear_access(u32 access, struct nfs4_ol_stateid *stp)
954 {
955         unsigned char mask = 1 << access;
956
957         WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
958         stp->st_access_bmap &= ~mask;
959 }
960
961 /* test whether a given stateid has access */
962 static inline bool
963 test_access(u32 access, struct nfs4_ol_stateid *stp)
964 {
965         unsigned char mask = 1 << access;
966
967         return (bool)(stp->st_access_bmap & mask);
968 }
969
970 /* set share deny for a given stateid */
971 static inline void
972 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
973 {
974         unsigned char mask = 1 << deny;
975
976         WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
977         stp->st_deny_bmap |= mask;
978 }
979
980 /* clear share deny for a given stateid */
981 static inline void
982 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
983 {
984         unsigned char mask = 1 << deny;
985
986         WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
987         stp->st_deny_bmap &= ~mask;
988 }
989
990 /* test whether a given stateid is denying specific access */
991 static inline bool
992 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
993 {
994         unsigned char mask = 1 << deny;
995
996         return (bool)(stp->st_deny_bmap & mask);
997 }
998
999 static int nfs4_access_to_omode(u32 access)
1000 {
1001         switch (access & NFS4_SHARE_ACCESS_BOTH) {
1002         case NFS4_SHARE_ACCESS_READ:
1003                 return O_RDONLY;
1004         case NFS4_SHARE_ACCESS_WRITE:
1005                 return O_WRONLY;
1006         case NFS4_SHARE_ACCESS_BOTH:
1007                 return O_RDWR;
1008         }
1009         WARN_ON_ONCE(1);
1010         return O_RDONLY;
1011 }
1012
1013 /*
1014  * A stateid that had a deny mode associated with it is being released
1015  * or downgraded. Recalculate the deny mode on the file.
1016  */
1017 static void
1018 recalculate_deny_mode(struct nfs4_file *fp)
1019 {
1020         struct nfs4_ol_stateid *stp;
1021
1022         spin_lock(&fp->fi_lock);
1023         fp->fi_share_deny = 0;
1024         list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1025                 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1026         spin_unlock(&fp->fi_lock);
1027 }
1028
1029 static void
1030 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1031 {
1032         int i;
1033         bool change = false;
1034
1035         for (i = 1; i < 4; i++) {
1036                 if ((i & deny) != i) {
1037                         change = true;
1038                         clear_deny(i, stp);
1039                 }
1040         }
1041
1042         /* Recalculate per-file deny mode if there was a change */
1043         if (change)
1044                 recalculate_deny_mode(stp->st_stid.sc_file);
1045 }
1046
1047 /* release all access and file references for a given stateid */
1048 static void
1049 release_all_access(struct nfs4_ol_stateid *stp)
1050 {
1051         int i;
1052         struct nfs4_file *fp = stp->st_stid.sc_file;
1053
1054         if (fp && stp->st_deny_bmap != 0)
1055                 recalculate_deny_mode(fp);
1056
1057         for (i = 1; i < 4; i++) {
1058                 if (test_access(i, stp))
1059                         nfs4_file_put_access(stp->st_stid.sc_file, i);
1060                 clear_access(i, stp);
1061         }
1062 }
1063
1064 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1065 {
1066         kfree(sop->so_owner.data);
1067         sop->so_ops->so_free(sop);
1068 }
1069
1070 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1071 {
1072         struct nfs4_client *clp = sop->so_client;
1073
1074         might_lock(&clp->cl_lock);
1075
1076         if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1077                 return;
1078         sop->so_ops->so_unhash(sop);
1079         spin_unlock(&clp->cl_lock);
1080         nfs4_free_stateowner(sop);
1081 }
1082
1083 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1084 {
1085         struct nfs4_file *fp = stp->st_stid.sc_file;
1086
1087         lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1088
1089         if (list_empty(&stp->st_perfile))
1090                 return false;
1091
1092         spin_lock(&fp->fi_lock);
1093         list_del_init(&stp->st_perfile);
1094         spin_unlock(&fp->fi_lock);
1095         list_del(&stp->st_perstateowner);
1096         return true;
1097 }
1098
1099 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1100 {
1101         struct nfs4_ol_stateid *stp = openlockstateid(stid);
1102
1103         put_clnt_odstate(stp->st_clnt_odstate);
1104         release_all_access(stp);
1105         if (stp->st_stateowner)
1106                 nfs4_put_stateowner(stp->st_stateowner);
1107         kmem_cache_free(stateid_slab, stid);
1108 }
1109
1110 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1111 {
1112         struct nfs4_ol_stateid *stp = openlockstateid(stid);
1113         struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1114         struct file *file;
1115
1116         file = find_any_file(stp->st_stid.sc_file);
1117         if (file)
1118                 filp_close(file, (fl_owner_t)lo);
1119         nfs4_free_ol_stateid(stid);
1120 }
1121
1122 /*
1123  * Put the persistent reference to an already unhashed generic stateid, while
1124  * holding the cl_lock. If it's the last reference, then put it onto the
1125  * reaplist for later destruction.
1126  */
1127 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1128                                        struct list_head *reaplist)
1129 {
1130         struct nfs4_stid *s = &stp->st_stid;
1131         struct nfs4_client *clp = s->sc_client;
1132
1133         lockdep_assert_held(&clp->cl_lock);
1134
1135         WARN_ON_ONCE(!list_empty(&stp->st_locks));
1136
1137         if (!atomic_dec_and_test(&s->sc_count)) {
1138                 wake_up_all(&close_wq);
1139                 return;
1140         }
1141
1142         idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1143         list_add(&stp->st_locks, reaplist);
1144 }
1145
1146 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1147 {
1148         struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
1149
1150         lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
1151
1152         list_del_init(&stp->st_locks);
1153         nfs4_unhash_stid(&stp->st_stid);
1154         return unhash_ol_stateid(stp);
1155 }
1156
1157 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1158 {
1159         struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
1160         bool unhashed;
1161
1162         spin_lock(&oo->oo_owner.so_client->cl_lock);
1163         unhashed = unhash_lock_stateid(stp);
1164         spin_unlock(&oo->oo_owner.so_client->cl_lock);
1165         if (unhashed)
1166                 nfs4_put_stid(&stp->st_stid);
1167 }
1168
1169 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1170 {
1171         struct nfs4_client *clp = lo->lo_owner.so_client;
1172
1173         lockdep_assert_held(&clp->cl_lock);
1174
1175         list_del_init(&lo->lo_owner.so_strhash);
1176 }
1177
1178 /*
1179  * Free a list of generic stateids that were collected earlier after being
1180  * fully unhashed.
1181  */
1182 static void
1183 free_ol_stateid_reaplist(struct list_head *reaplist)
1184 {
1185         struct nfs4_ol_stateid *stp;
1186         struct nfs4_file *fp;
1187
1188         might_sleep();
1189
1190         while (!list_empty(reaplist)) {
1191                 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1192                                        st_locks);
1193                 list_del(&stp->st_locks);
1194                 fp = stp->st_stid.sc_file;
1195                 stp->st_stid.sc_free(&stp->st_stid);
1196                 if (fp)
1197                         put_nfs4_file(fp);
1198         }
1199 }
1200
1201 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1202                                        struct list_head *reaplist)
1203 {
1204         struct nfs4_ol_stateid *stp;
1205
1206         lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1207
1208         while (!list_empty(&open_stp->st_locks)) {
1209                 stp = list_entry(open_stp->st_locks.next,
1210                                 struct nfs4_ol_stateid, st_locks);
1211                 WARN_ON(!unhash_lock_stateid(stp));
1212                 put_ol_stateid_locked(stp, reaplist);
1213         }
1214 }
1215
1216 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1217                                 struct list_head *reaplist)
1218 {
1219         bool unhashed;
1220
1221         lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1222
1223         unhashed = unhash_ol_stateid(stp);
1224         release_open_stateid_locks(stp, reaplist);
1225         return unhashed;
1226 }
1227
1228 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1229 {
1230         LIST_HEAD(reaplist);
1231
1232         spin_lock(&stp->st_stid.sc_client->cl_lock);
1233         if (unhash_open_stateid(stp, &reaplist))
1234                 put_ol_stateid_locked(stp, &reaplist);
1235         spin_unlock(&stp->st_stid.sc_client->cl_lock);
1236         free_ol_stateid_reaplist(&reaplist);
1237 }
1238
1239 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1240 {
1241         struct nfs4_client *clp = oo->oo_owner.so_client;
1242
1243         lockdep_assert_held(&clp->cl_lock);
1244
1245         list_del_init(&oo->oo_owner.so_strhash);
1246         list_del_init(&oo->oo_perclient);
1247 }
1248
1249 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1250 {
1251         struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1252                                           nfsd_net_id);
1253         struct nfs4_ol_stateid *s;
1254
1255         spin_lock(&nn->client_lock);
1256         s = oo->oo_last_closed_stid;
1257         if (s) {
1258                 list_del_init(&oo->oo_close_lru);
1259                 oo->oo_last_closed_stid = NULL;
1260         }
1261         spin_unlock(&nn->client_lock);
1262         if (s)
1263                 nfs4_put_stid(&s->st_stid);
1264 }
1265
1266 static void release_openowner(struct nfs4_openowner *oo)
1267 {
1268         struct nfs4_ol_stateid *stp;
1269         struct nfs4_client *clp = oo->oo_owner.so_client;
1270         struct list_head reaplist;
1271
1272         INIT_LIST_HEAD(&reaplist);
1273
1274         spin_lock(&clp->cl_lock);
1275         unhash_openowner_locked(oo);
1276         while (!list_empty(&oo->oo_owner.so_stateids)) {
1277                 stp = list_first_entry(&oo->oo_owner.so_stateids,
1278                                 struct nfs4_ol_stateid, st_perstateowner);
1279                 if (unhash_open_stateid(stp, &reaplist))
1280                         put_ol_stateid_locked(stp, &reaplist);
1281         }
1282         spin_unlock(&clp->cl_lock);
1283         free_ol_stateid_reaplist(&reaplist);
1284         release_last_closed_stateid(oo);
1285         nfs4_put_stateowner(&oo->oo_owner);
1286 }
1287
1288 static inline int
1289 hash_sessionid(struct nfs4_sessionid *sessionid)
1290 {
1291         struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1292
1293         return sid->sequence % SESSION_HASH_SIZE;
1294 }
1295
1296 #ifdef CONFIG_SUNRPC_DEBUG
1297 static inline void
1298 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1299 {
1300         u32 *ptr = (u32 *)(&sessionid->data[0]);
1301         dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1302 }
1303 #else
1304 static inline void
1305 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1306 {
1307 }
1308 #endif
1309
1310 /*
1311  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1312  * won't be used for replay.
1313  */
1314 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1315 {
1316         struct nfs4_stateowner *so = cstate->replay_owner;
1317
1318         if (nfserr == nfserr_replay_me)
1319                 return;
1320
1321         if (!seqid_mutating_err(ntohl(nfserr))) {
1322                 nfsd4_cstate_clear_replay(cstate);
1323                 return;
1324         }
1325         if (!so)
1326                 return;
1327         if (so->so_is_open_owner)
1328                 release_last_closed_stateid(openowner(so));
1329         so->so_seqid++;
1330         return;
1331 }
1332
1333 static void
1334 gen_sessionid(struct nfsd4_session *ses)
1335 {
1336         struct nfs4_client *clp = ses->se_client;
1337         struct nfsd4_sessionid *sid;
1338
1339         sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1340         sid->clientid = clp->cl_clientid;
1341         sid->sequence = current_sessionid++;
1342         sid->reserved = 0;
1343 }
1344
1345 /*
1346  * The protocol defines ca_maxresponssize_cached to include the size of
1347  * the rpc header, but all we need to cache is the data starting after
1348  * the end of the initial SEQUENCE operation--the rest we regenerate
1349  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1350  * value that is the number of bytes in our cache plus a few additional
1351  * bytes.  In order to stay on the safe side, and not promise more than
1352  * we can cache, those additional bytes must be the minimum possible: 24
1353  * bytes of rpc header (xid through accept state, with AUTH_NULL
1354  * verifier), 12 for the compound header (with zero-length tag), and 44
1355  * for the SEQUENCE op response:
1356  */
1357 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1358
1359 static void
1360 free_session_slots(struct nfsd4_session *ses)
1361 {
1362         int i;
1363
1364         for (i = 0; i < ses->se_fchannel.maxreqs; i++)
1365                 kfree(ses->se_slots[i]);
1366 }
1367
1368 /*
1369  * We don't actually need to cache the rpc and session headers, so we
1370  * can allocate a little less for each slot:
1371  */
1372 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1373 {
1374         u32 size;
1375
1376         if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1377                 size = 0;
1378         else
1379                 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1380         return size + sizeof(struct nfsd4_slot);
1381 }
1382
1383 /*
1384  * XXX: If we run out of reserved DRC memory we could (up to a point)
1385  * re-negotiate active sessions and reduce their slot usage to make
1386  * room for new connections. For now we just fail the create session.
1387  */
1388 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1389 {
1390         u32 slotsize = slot_bytes(ca);
1391         u32 num = ca->maxreqs;
1392         int avail;
1393
1394         spin_lock(&nfsd_drc_lock);
1395         avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1396                     nfsd_drc_max_mem - nfsd_drc_mem_used);
1397         num = min_t(int, num, avail / slotsize);
1398         nfsd_drc_mem_used += num * slotsize;
1399         spin_unlock(&nfsd_drc_lock);
1400
1401         return num;
1402 }
1403
1404 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1405 {
1406         int slotsize = slot_bytes(ca);
1407
1408         spin_lock(&nfsd_drc_lock);
1409         nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1410         spin_unlock(&nfsd_drc_lock);
1411 }
1412
1413 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1414                                            struct nfsd4_channel_attrs *battrs)
1415 {
1416         int numslots = fattrs->maxreqs;
1417         int slotsize = slot_bytes(fattrs);
1418         struct nfsd4_session *new;
1419         int mem, i;
1420
1421         BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1422                         + sizeof(struct nfsd4_session) > PAGE_SIZE);
1423         mem = numslots * sizeof(struct nfsd4_slot *);
1424
1425         new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1426         if (!new)
1427                 return NULL;
1428         /* allocate each struct nfsd4_slot and data cache in one piece */
1429         for (i = 0; i < numslots; i++) {
1430                 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1431                 if (!new->se_slots[i])
1432                         goto out_free;
1433         }
1434
1435         memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1436         memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1437
1438         return new;
1439 out_free:
1440         while (i--)
1441                 kfree(new->se_slots[i]);
1442         kfree(new);
1443         return NULL;
1444 }
1445
1446 static void free_conn(struct nfsd4_conn *c)
1447 {
1448         svc_xprt_put(c->cn_xprt);
1449         kfree(c);
1450 }
1451
1452 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1453 {
1454         struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1455         struct nfs4_client *clp = c->cn_session->se_client;
1456
1457         spin_lock(&clp->cl_lock);
1458         if (!list_empty(&c->cn_persession)) {
1459                 list_del(&c->cn_persession);
1460                 free_conn(c);
1461         }
1462         nfsd4_probe_callback(clp);
1463         spin_unlock(&clp->cl_lock);
1464 }
1465
1466 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1467 {
1468         struct nfsd4_conn *conn;
1469
1470         conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1471         if (!conn)
1472                 return NULL;
1473         svc_xprt_get(rqstp->rq_xprt);
1474         conn->cn_xprt = rqstp->rq_xprt;
1475         conn->cn_flags = flags;
1476         INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1477         return conn;
1478 }
1479
1480 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1481 {
1482         conn->cn_session = ses;
1483         list_add(&conn->cn_persession, &ses->se_conns);
1484 }
1485
1486 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1487 {
1488         struct nfs4_client *clp = ses->se_client;
1489
1490         spin_lock(&clp->cl_lock);
1491         __nfsd4_hash_conn(conn, ses);
1492         spin_unlock(&clp->cl_lock);
1493 }
1494
1495 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1496 {
1497         conn->cn_xpt_user.callback = nfsd4_conn_lost;
1498         return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1499 }
1500
1501 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1502 {
1503         int ret;
1504
1505         nfsd4_hash_conn(conn, ses);
1506         ret = nfsd4_register_conn(conn);
1507         if (ret)
1508                 /* oops; xprt is already down: */
1509                 nfsd4_conn_lost(&conn->cn_xpt_user);
1510         /* We may have gained or lost a callback channel: */
1511         nfsd4_probe_callback_sync(ses->se_client);
1512 }
1513
1514 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1515 {
1516         u32 dir = NFS4_CDFC4_FORE;
1517
1518         if (cses->flags & SESSION4_BACK_CHAN)
1519                 dir |= NFS4_CDFC4_BACK;
1520         return alloc_conn(rqstp, dir);
1521 }
1522
1523 /* must be called under client_lock */
1524 static void nfsd4_del_conns(struct nfsd4_session *s)
1525 {
1526         struct nfs4_client *clp = s->se_client;
1527         struct nfsd4_conn *c;
1528
1529         spin_lock(&clp->cl_lock);
1530         while (!list_empty(&s->se_conns)) {
1531                 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1532                 list_del_init(&c->cn_persession);
1533                 spin_unlock(&clp->cl_lock);
1534
1535                 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1536                 free_conn(c);
1537
1538                 spin_lock(&clp->cl_lock);
1539         }
1540         spin_unlock(&clp->cl_lock);
1541 }
1542
1543 static void __free_session(struct nfsd4_session *ses)
1544 {
1545         free_session_slots(ses);
1546         kfree(ses);
1547 }
1548
1549 static void free_session(struct nfsd4_session *ses)
1550 {
1551         nfsd4_del_conns(ses);
1552         nfsd4_put_drc_mem(&ses->se_fchannel);
1553         __free_session(ses);
1554 }
1555
1556 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1557 {
1558         int idx;
1559         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1560
1561         new->se_client = clp;
1562         gen_sessionid(new);
1563
1564         INIT_LIST_HEAD(&new->se_conns);
1565
1566         new->se_cb_seq_nr = 1;
1567         new->se_flags = cses->flags;
1568         new->se_cb_prog = cses->callback_prog;
1569         new->se_cb_sec = cses->cb_sec;
1570         atomic_set(&new->se_ref, 0);
1571         idx = hash_sessionid(&new->se_sessionid);
1572         list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1573         spin_lock(&clp->cl_lock);
1574         list_add(&new->se_perclnt, &clp->cl_sessions);
1575         spin_unlock(&clp->cl_lock);
1576
1577         {
1578                 struct sockaddr *sa = svc_addr(rqstp);
1579                 /*
1580                  * This is a little silly; with sessions there's no real
1581                  * use for the callback address.  Use the peer address
1582                  * as a reasonable default for now, but consider fixing
1583                  * the rpc client not to require an address in the
1584                  * future:
1585                  */
1586                 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1587                 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1588         }
1589 }
1590
1591 /* caller must hold client_lock */
1592 static struct nfsd4_session *
1593 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1594 {
1595         struct nfsd4_session *elem;
1596         int idx;
1597         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1598
1599         lockdep_assert_held(&nn->client_lock);
1600
1601         dump_sessionid(__func__, sessionid);
1602         idx = hash_sessionid(sessionid);
1603         /* Search in the appropriate list */
1604         list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1605                 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1606                             NFS4_MAX_SESSIONID_LEN)) {
1607                         return elem;
1608                 }
1609         }
1610
1611         dprintk("%s: session not found\n", __func__);
1612         return NULL;
1613 }
1614
1615 static struct nfsd4_session *
1616 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1617                 __be32 *ret)
1618 {
1619         struct nfsd4_session *session;
1620         __be32 status = nfserr_badsession;
1621
1622         session = __find_in_sessionid_hashtbl(sessionid, net);
1623         if (!session)
1624                 goto out;
1625         status = nfsd4_get_session_locked(session);
1626         if (status)
1627                 session = NULL;
1628 out:
1629         *ret = status;
1630         return session;
1631 }
1632
1633 /* caller must hold client_lock */
1634 static void
1635 unhash_session(struct nfsd4_session *ses)
1636 {
1637         struct nfs4_client *clp = ses->se_client;
1638         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1639
1640         lockdep_assert_held(&nn->client_lock);
1641
1642         list_del(&ses->se_hash);
1643         spin_lock(&ses->se_client->cl_lock);
1644         list_del(&ses->se_perclnt);
1645         spin_unlock(&ses->se_client->cl_lock);
1646 }
1647
1648 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1649 static int
1650 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1651 {
1652         /*
1653          * We're assuming the clid was not given out from a boot
1654          * precisely 2^32 (about 136 years) before this one.  That seems
1655          * a safe assumption:
1656          */
1657         if (clid->cl_boot == (u32)nn->boot_time)
1658                 return 0;
1659         dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1660                 clid->cl_boot, clid->cl_id, nn->boot_time);
1661         return 1;
1662 }
1663
1664 /* 
1665  * XXX Should we use a slab cache ?
1666  * This type of memory management is somewhat inefficient, but we use it
1667  * anyway since SETCLIENTID is not a common operation.
1668  */
1669 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1670 {
1671         struct nfs4_client *clp;
1672         int i;
1673
1674         clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1675         if (clp == NULL)
1676                 return NULL;
1677         clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1678         if (clp->cl_name.data == NULL)
1679                 goto err_no_name;
1680         clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1681                         OWNER_HASH_SIZE, GFP_KERNEL);
1682         if (!clp->cl_ownerstr_hashtbl)
1683                 goto err_no_hashtbl;
1684         for (i = 0; i < OWNER_HASH_SIZE; i++)
1685                 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1686         clp->cl_name.len = name.len;
1687         INIT_LIST_HEAD(&clp->cl_sessions);
1688         idr_init(&clp->cl_stateids);
1689         atomic_set(&clp->cl_refcount, 0);
1690         clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1691         INIT_LIST_HEAD(&clp->cl_idhash);
1692         INIT_LIST_HEAD(&clp->cl_openowners);
1693         INIT_LIST_HEAD(&clp->cl_delegations);
1694         INIT_LIST_HEAD(&clp->cl_lru);
1695         INIT_LIST_HEAD(&clp->cl_revoked);
1696 #ifdef CONFIG_NFSD_PNFS
1697         INIT_LIST_HEAD(&clp->cl_lo_states);
1698 #endif
1699         spin_lock_init(&clp->cl_lock);
1700         rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1701         return clp;
1702 err_no_hashtbl:
1703         kfree(clp->cl_name.data);
1704 err_no_name:
1705         kfree(clp);
1706         return NULL;
1707 }
1708
1709 static void
1710 free_client(struct nfs4_client *clp)
1711 {
1712         while (!list_empty(&clp->cl_sessions)) {
1713                 struct nfsd4_session *ses;
1714                 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1715                                 se_perclnt);
1716                 list_del(&ses->se_perclnt);
1717                 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1718                 free_session(ses);
1719         }
1720         rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1721         free_svc_cred(&clp->cl_cred);
1722         kfree(clp->cl_ownerstr_hashtbl);
1723         kfree(clp->cl_name.data);
1724         idr_destroy(&clp->cl_stateids);
1725         kfree(clp);
1726 }
1727
1728 /* must be called under the client_lock */
1729 static void
1730 unhash_client_locked(struct nfs4_client *clp)
1731 {
1732         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1733         struct nfsd4_session *ses;
1734
1735         lockdep_assert_held(&nn->client_lock);
1736
1737         /* Mark the client as expired! */
1738         clp->cl_time = 0;
1739         /* Make it invisible */
1740         if (!list_empty(&clp->cl_idhash)) {
1741                 list_del_init(&clp->cl_idhash);
1742                 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1743                         rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1744                 else
1745                         rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1746         }
1747         list_del_init(&clp->cl_lru);
1748         spin_lock(&clp->cl_lock);
1749         list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1750                 list_del_init(&ses->se_hash);
1751         spin_unlock(&clp->cl_lock);
1752 }
1753
1754 static void
1755 unhash_client(struct nfs4_client *clp)
1756 {
1757         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1758
1759         spin_lock(&nn->client_lock);
1760         unhash_client_locked(clp);
1761         spin_unlock(&nn->client_lock);
1762 }
1763
1764 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1765 {
1766         if (atomic_read(&clp->cl_refcount))
1767                 return nfserr_jukebox;
1768         unhash_client_locked(clp);
1769         return nfs_ok;
1770 }
1771
1772 static void
1773 __destroy_client(struct nfs4_client *clp)
1774 {
1775         struct nfs4_openowner *oo;
1776         struct nfs4_delegation *dp;
1777         struct list_head reaplist;
1778
1779         INIT_LIST_HEAD(&reaplist);
1780         spin_lock(&state_lock);
1781         while (!list_empty(&clp->cl_delegations)) {
1782                 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1783                 WARN_ON(!unhash_delegation_locked(dp));
1784                 list_add(&dp->dl_recall_lru, &reaplist);
1785         }
1786         spin_unlock(&state_lock);
1787         while (!list_empty(&reaplist)) {
1788                 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1789                 list_del_init(&dp->dl_recall_lru);
1790                 put_clnt_odstate(dp->dl_clnt_odstate);
1791                 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1792                 nfs4_put_stid(&dp->dl_stid);
1793         }
1794         while (!list_empty(&clp->cl_revoked)) {
1795                 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1796                 list_del_init(&dp->dl_recall_lru);
1797                 nfs4_put_stid(&dp->dl_stid);
1798         }
1799         while (!list_empty(&clp->cl_openowners)) {
1800                 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1801                 nfs4_get_stateowner(&oo->oo_owner);
1802                 release_openowner(oo);
1803         }
1804         nfsd4_return_all_client_layouts(clp);
1805         nfsd4_shutdown_callback(clp);
1806         if (clp->cl_cb_conn.cb_xprt)
1807                 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1808         free_client(clp);
1809 }
1810
1811 static void
1812 destroy_client(struct nfs4_client *clp)
1813 {
1814         unhash_client(clp);
1815         __destroy_client(clp);
1816 }
1817
1818 static void expire_client(struct nfs4_client *clp)
1819 {
1820         unhash_client(clp);
1821         nfsd4_client_record_remove(clp);
1822         __destroy_client(clp);
1823 }
1824
1825 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1826 {
1827         memcpy(target->cl_verifier.data, source->data,
1828                         sizeof(target->cl_verifier.data));
1829 }
1830
1831 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1832 {
1833         target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
1834         target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
1835 }
1836
1837 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1838 {
1839         if (source->cr_principal) {
1840                 target->cr_principal =
1841                                 kstrdup(source->cr_principal, GFP_KERNEL);
1842                 if (target->cr_principal == NULL)
1843                         return -ENOMEM;
1844         } else
1845                 target->cr_principal = NULL;
1846         target->cr_flavor = source->cr_flavor;
1847         target->cr_uid = source->cr_uid;
1848         target->cr_gid = source->cr_gid;
1849         target->cr_group_info = source->cr_group_info;
1850         get_group_info(target->cr_group_info);
1851         target->cr_gss_mech = source->cr_gss_mech;
1852         if (source->cr_gss_mech)
1853                 gss_mech_get(source->cr_gss_mech);
1854         return 0;
1855 }
1856
1857 static int
1858 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1859 {
1860         if (o1->len < o2->len)
1861                 return -1;
1862         if (o1->len > o2->len)
1863                 return 1;
1864         return memcmp(o1->data, o2->data, o1->len);
1865 }
1866
1867 static int same_name(const char *n1, const char *n2)
1868 {
1869         return 0 == memcmp(n1, n2, HEXDIR_LEN);
1870 }
1871
1872 static int
1873 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1874 {
1875         return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1876 }
1877
1878 static int
1879 same_clid(clientid_t *cl1, clientid_t *cl2)
1880 {
1881         return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1882 }
1883
1884 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1885 {
1886         int i;
1887
1888         if (g1->ngroups != g2->ngroups)
1889                 return false;
1890         for (i=0; i<g1->ngroups; i++)
1891                 if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1892                         return false;
1893         return true;
1894 }
1895
1896 /*
1897  * RFC 3530 language requires clid_inuse be returned when the
1898  * "principal" associated with a requests differs from that previously
1899  * used.  We use uid, gid's, and gss principal string as our best
1900  * approximation.  We also don't want to allow non-gss use of a client
1901  * established using gss: in theory cr_principal should catch that
1902  * change, but in practice cr_principal can be null even in the gss case
1903  * since gssd doesn't always pass down a principal string.
1904  */
1905 static bool is_gss_cred(struct svc_cred *cr)
1906 {
1907         /* Is cr_flavor one of the gss "pseudoflavors"?: */
1908         return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1909 }
1910
1911
1912 static bool
1913 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1914 {
1915         if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1916                 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
1917                 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1918                 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1919                 return false;
1920         if (cr1->cr_principal == cr2->cr_principal)
1921                 return true;
1922         if (!cr1->cr_principal || !cr2->cr_principal)
1923                 return false;
1924         return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1925 }
1926
1927 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1928 {
1929         struct svc_cred *cr = &rqstp->rq_cred;
1930         u32 service;
1931
1932         if (!cr->cr_gss_mech)
1933                 return false;
1934         service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1935         return service == RPC_GSS_SVC_INTEGRITY ||
1936                service == RPC_GSS_SVC_PRIVACY;
1937 }
1938
1939 static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1940 {
1941         struct svc_cred *cr = &rqstp->rq_cred;
1942
1943         if (!cl->cl_mach_cred)
1944                 return true;
1945         if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
1946                 return false;
1947         if (!svc_rqst_integrity_protected(rqstp))
1948                 return false;
1949         if (!cr->cr_principal)
1950                 return false;
1951         return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1952 }
1953
1954 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
1955 {
1956         __be32 verf[2];
1957
1958         /*
1959          * This is opaque to client, so no need to byte-swap. Use
1960          * __force to keep sparse happy
1961          */
1962         verf[0] = (__force __be32)get_seconds();
1963         verf[1] = (__force __be32)nn->clverifier_counter++;
1964         memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1965 }
1966
1967 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1968 {
1969         clp->cl_clientid.cl_boot = nn->boot_time;
1970         clp->cl_clientid.cl_id = nn->clientid_counter++;
1971         gen_confirm(clp, nn);
1972 }
1973
1974 static struct nfs4_stid *
1975 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
1976 {
1977         struct nfs4_stid *ret;
1978
1979         ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1980         if (!ret || !ret->sc_type)
1981                 return NULL;
1982         return ret;
1983 }
1984
1985 static struct nfs4_stid *
1986 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1987 {
1988         struct nfs4_stid *s;
1989
1990         spin_lock(&cl->cl_lock);
1991         s = find_stateid_locked(cl, t);
1992         if (s != NULL) {
1993                 if (typemask & s->sc_type)
1994                         atomic_inc(&s->sc_count);
1995                 else
1996                         s = NULL;
1997         }
1998         spin_unlock(&cl->cl_lock);
1999         return s;
2000 }
2001
2002 static struct nfs4_client *create_client(struct xdr_netobj name,
2003                 struct svc_rqst *rqstp, nfs4_verifier *verf)
2004 {
2005         struct nfs4_client *clp;
2006         struct sockaddr *sa = svc_addr(rqstp);
2007         int ret;
2008         struct net *net = SVC_NET(rqstp);
2009
2010         clp = alloc_client(name);
2011         if (clp == NULL)
2012                 return NULL;
2013
2014         ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2015         if (ret) {
2016                 free_client(clp);
2017                 return NULL;
2018         }
2019         nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2020         clp->cl_time = get_seconds();
2021         clear_bit(0, &clp->cl_cb_slot_busy);
2022         copy_verf(clp, verf);
2023         rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2024         clp->cl_cb_session = NULL;
2025         clp->net = net;
2026         return clp;
2027 }
2028
2029 static void
2030 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2031 {
2032         struct rb_node **new = &(root->rb_node), *parent = NULL;
2033         struct nfs4_client *clp;
2034
2035         while (*new) {
2036                 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2037                 parent = *new;
2038
2039                 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2040                         new = &((*new)->rb_left);
2041                 else
2042                         new = &((*new)->rb_right);
2043         }
2044
2045         rb_link_node(&new_clp->cl_namenode, parent, new);
2046         rb_insert_color(&new_clp->cl_namenode, root);
2047 }
2048
2049 static struct nfs4_client *
2050 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2051 {
2052         int cmp;
2053         struct rb_node *node = root->rb_node;
2054         struct nfs4_client *clp;
2055
2056         while (node) {
2057                 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2058                 cmp = compare_blob(&clp->cl_name, name);
2059                 if (cmp > 0)
2060                         node = node->rb_left;
2061                 else if (cmp < 0)
2062                         node = node->rb_right;
2063                 else
2064                         return clp;
2065         }
2066         return NULL;
2067 }
2068
2069 static void
2070 add_to_unconfirmed(struct nfs4_client *clp)
2071 {
2072         unsigned int idhashval;
2073         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2074
2075         lockdep_assert_held(&nn->client_lock);
2076
2077         clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2078         add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2079         idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2080         list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2081         renew_client_locked(clp);
2082 }
2083
2084 static void
2085 move_to_confirmed(struct nfs4_client *clp)
2086 {
2087         unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2088         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2089
2090         lockdep_assert_held(&nn->client_lock);
2091
2092         dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2093         list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2094         rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2095         add_clp_to_name_tree(clp, &nn->conf_name_tree);
2096         set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2097         renew_client_locked(clp);
2098 }
2099
2100 static struct nfs4_client *
2101 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2102 {
2103         struct nfs4_client *clp;
2104         unsigned int idhashval = clientid_hashval(clid->cl_id);
2105
2106         list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2107                 if (same_clid(&clp->cl_clientid, clid)) {
2108                         if ((bool)clp->cl_minorversion != sessions)
2109                                 return NULL;
2110                         renew_client_locked(clp);
2111                         return clp;
2112                 }
2113         }
2114         return NULL;
2115 }
2116
2117 static struct nfs4_client *
2118 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2119 {
2120         struct list_head *tbl = nn->conf_id_hashtbl;
2121
2122         lockdep_assert_held(&nn->client_lock);
2123         return find_client_in_id_table(tbl, clid, sessions);
2124 }
2125
2126 static struct nfs4_client *
2127 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2128 {
2129         struct list_head *tbl = nn->unconf_id_hashtbl;
2130
2131         lockdep_assert_held(&nn->client_lock);
2132         return find_client_in_id_table(tbl, clid, sessions);
2133 }
2134
2135 static bool clp_used_exchangeid(struct nfs4_client *clp)
2136 {
2137         return clp->cl_exchange_flags != 0;
2138
2139
2140 static struct nfs4_client *
2141 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2142 {
2143         lockdep_assert_held(&nn->client_lock);
2144         return find_clp_in_name_tree(name, &nn->conf_name_tree);
2145 }
2146
2147 static struct nfs4_client *
2148 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2149 {
2150         lockdep_assert_held(&nn->client_lock);
2151         return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2152 }
2153
2154 static void
2155 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2156 {
2157         struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2158         struct sockaddr *sa = svc_addr(rqstp);
2159         u32 scopeid = rpc_get_scope_id(sa);
2160         unsigned short expected_family;
2161
2162         /* Currently, we only support tcp and tcp6 for the callback channel */
2163         if (se->se_callback_netid_len == 3 &&
2164             !memcmp(se->se_callback_netid_val, "tcp", 3))
2165                 expected_family = AF_INET;
2166         else if (se->se_callback_netid_len == 4 &&
2167                  !memcmp(se->se_callback_netid_val, "tcp6", 4))
2168                 expected_family = AF_INET6;
2169         else
2170                 goto out_err;
2171
2172         conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2173                                             se->se_callback_addr_len,
2174                                             (struct sockaddr *)&conn->cb_addr,
2175                                             sizeof(conn->cb_addr));
2176
2177         if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2178                 goto out_err;
2179
2180         if (conn->cb_addr.ss_family == AF_INET6)
2181                 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2182
2183         conn->cb_prog = se->se_callback_prog;
2184         conn->cb_ident = se->se_callback_ident;
2185         memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2186         return;
2187 out_err:
2188         conn->cb_addr.ss_family = AF_UNSPEC;
2189         conn->cb_addrlen = 0;
2190         dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
2191                 "will not receive delegations\n",
2192                 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2193
2194         return;
2195 }
2196
2197 /*
2198  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2199  */
2200 static void
2201 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2202 {
2203         struct xdr_buf *buf = resp->xdr.buf;
2204         struct nfsd4_slot *slot = resp->cstate.slot;
2205         unsigned int base;
2206
2207         dprintk("--> %s slot %p\n", __func__, slot);
2208
2209         slot->sl_opcnt = resp->opcnt;
2210         slot->sl_status = resp->cstate.status;
2211
2212         slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2213         if (nfsd4_not_cached(resp)) {
2214                 slot->sl_datalen = 0;
2215                 return;
2216         }
2217         base = resp->cstate.data_offset;
2218         slot->sl_datalen = buf->len - base;
2219         if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2220                 WARN("%s: sessions DRC could not cache compound\n", __func__);
2221         return;
2222 }
2223
2224 /*
2225  * Encode the replay sequence operation from the slot values.
2226  * If cachethis is FALSE encode the uncached rep error on the next
2227  * operation which sets resp->p and increments resp->opcnt for
2228  * nfs4svc_encode_compoundres.
2229  *
2230  */
2231 static __be32
2232 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2233                           struct nfsd4_compoundres *resp)
2234 {
2235         struct nfsd4_op *op;
2236         struct nfsd4_slot *slot = resp->cstate.slot;
2237
2238         /* Encode the replayed sequence operation */
2239         op = &args->ops[resp->opcnt - 1];
2240         nfsd4_encode_operation(resp, op);
2241
2242         /* Return nfserr_retry_uncached_rep in next operation. */
2243         if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
2244                 op = &args->ops[resp->opcnt++];
2245                 op->status = nfserr_retry_uncached_rep;
2246                 nfsd4_encode_operation(resp, op);
2247         }
2248         return op->status;
2249 }
2250
2251 /*
2252  * The sequence operation is not cached because we can use the slot and
2253  * session values.
2254  */
2255 static __be32
2256 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2257                          struct nfsd4_sequence *seq)
2258 {
2259         struct nfsd4_slot *slot = resp->cstate.slot;
2260         struct xdr_stream *xdr = &resp->xdr;
2261         __be32 *p;
2262         __be32 status;
2263
2264         dprintk("--> %s slot %p\n", __func__, slot);
2265
2266         status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2267         if (status)
2268                 return status;
2269
2270         p = xdr_reserve_space(xdr, slot->sl_datalen);
2271         if (!p) {
2272                 WARN_ON_ONCE(1);
2273                 return nfserr_serverfault;
2274         }
2275         xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2276         xdr_commit_encode(xdr);
2277
2278         resp->opcnt = slot->sl_opcnt;
2279         return slot->sl_status;
2280 }
2281
2282 /*
2283  * Set the exchange_id flags returned by the server.
2284  */
2285 static void
2286 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2287 {
2288 #ifdef CONFIG_NFSD_PNFS
2289         new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2290 #else
2291         new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2292 #endif
2293
2294         /* Referrals are supported, Migration is not. */
2295         new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2296
2297         /* set the wire flags to return to client. */
2298         clid->flags = new->cl_exchange_flags;
2299 }
2300
2301 static bool client_has_openowners(struct nfs4_client *clp)
2302 {
2303         struct nfs4_openowner *oo;
2304
2305         list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2306                 if (!list_empty(&oo->oo_owner.so_stateids))
2307                         return true;
2308         }
2309         return false;
2310 }
2311
2312 static bool client_has_state(struct nfs4_client *clp)
2313 {
2314         return client_has_openowners(clp)
2315 #ifdef CONFIG_NFSD_PNFS
2316                 || !list_empty(&clp->cl_lo_states)
2317 #endif
2318                 || !list_empty(&clp->cl_delegations)
2319                 || !list_empty(&clp->cl_sessions);
2320 }
2321
2322 __be32
2323 nfsd4_exchange_id(struct svc_rqst *rqstp,
2324                   struct nfsd4_compound_state *cstate,
2325                   struct nfsd4_exchange_id *exid)
2326 {
2327         struct nfs4_client *conf, *new;
2328         struct nfs4_client *unconf = NULL;
2329         __be32 status;
2330         char                    addr_str[INET6_ADDRSTRLEN];
2331         nfs4_verifier           verf = exid->verifier;
2332         struct sockaddr         *sa = svc_addr(rqstp);
2333         bool    update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2334         struct nfsd_net         *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2335
2336         rpc_ntop(sa, addr_str, sizeof(addr_str));
2337         dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2338                 "ip_addr=%s flags %x, spa_how %d\n",
2339                 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
2340                 addr_str, exid->flags, exid->spa_how);
2341
2342         if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2343                 return nfserr_inval;
2344
2345         switch (exid->spa_how) {
2346         case SP4_MACH_CRED:
2347                 if (!svc_rqst_integrity_protected(rqstp))
2348                         return nfserr_inval;
2349         case SP4_NONE:
2350                 break;
2351         default:                                /* checked by xdr code */
2352                 WARN_ON_ONCE(1);
2353         case SP4_SSV:
2354                 return nfserr_encr_alg_unsupp;
2355         }
2356
2357         new = create_client(exid->clname, rqstp, &verf);
2358         if (new == NULL)
2359                 return nfserr_jukebox;
2360
2361         /* Cases below refer to rfc 5661 section 18.35.4: */
2362         spin_lock(&nn->client_lock);
2363         conf = find_confirmed_client_by_name(&exid->clname, nn);
2364         if (conf) {
2365                 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2366                 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2367
2368                 if (update) {
2369                         if (!clp_used_exchangeid(conf)) { /* buggy client */
2370                                 status = nfserr_inval;
2371                                 goto out;
2372                         }
2373                         if (!mach_creds_match(conf, rqstp)) {
2374                                 status = nfserr_wrong_cred;
2375                                 goto out;
2376                         }
2377                         if (!creds_match) { /* case 9 */
2378                                 status = nfserr_perm;
2379                                 goto out;
2380                         }
2381                         if (!verfs_match) { /* case 8 */
2382                                 status = nfserr_not_same;
2383                                 goto out;
2384                         }
2385                         /* case 6 */
2386                         exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2387                         goto out_copy;
2388                 }
2389                 if (!creds_match) { /* case 3 */
2390                         if (client_has_state(conf)) {
2391                                 status = nfserr_clid_inuse;
2392                                 goto out;
2393                         }
2394                         goto out_new;
2395                 }
2396                 if (verfs_match) { /* case 2 */
2397                         conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2398                         goto out_copy;
2399                 }
2400                 /* case 5, client reboot */
2401                 conf = NULL;
2402                 goto out_new;
2403         }
2404
2405         if (update) { /* case 7 */
2406                 status = nfserr_noent;
2407                 goto out;
2408         }
2409
2410         unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
2411         if (unconf) /* case 4, possible retry or client restart */
2412                 unhash_client_locked(unconf);
2413
2414         /* case 1 (normal case) */
2415 out_new:
2416         if (conf) {
2417                 status = mark_client_expired_locked(conf);
2418                 if (status)
2419                         goto out;
2420         }
2421         new->cl_minorversion = cstate->minorversion;
2422         new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
2423
2424         gen_clid(new, nn);
2425         add_to_unconfirmed(new);
2426         swap(new, conf);
2427 out_copy:
2428         exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2429         exid->clientid.cl_id = conf->cl_clientid.cl_id;
2430
2431         exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2432         nfsd4_set_ex_flags(conf, exid);
2433
2434         dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2435                 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2436         status = nfs_ok;
2437
2438 out:
2439         spin_unlock(&nn->client_lock);
2440         if (new)
2441                 expire_client(new);
2442         if (unconf)
2443                 expire_client(unconf);
2444         return status;
2445 }
2446
2447 static __be32
2448 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2449 {
2450         dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2451                 slot_seqid);
2452
2453         /* The slot is in use, and no response has been sent. */
2454         if (slot_inuse) {
2455                 if (seqid == slot_seqid)
2456                         return nfserr_jukebox;
2457                 else
2458                         return nfserr_seq_misordered;
2459         }
2460         /* Note unsigned 32-bit arithmetic handles wraparound: */
2461         if (likely(seqid == slot_seqid + 1))
2462                 return nfs_ok;
2463         if (seqid == slot_seqid)
2464                 return nfserr_replay_cache;
2465         return nfserr_seq_misordered;
2466 }
2467
2468 /*
2469  * Cache the create session result into the create session single DRC
2470  * slot cache by saving the xdr structure. sl_seqid has been set.
2471  * Do this for solo or embedded create session operations.
2472  */
2473 static void
2474 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2475                            struct nfsd4_clid_slot *slot, __be32 nfserr)
2476 {
2477         slot->sl_status = nfserr;
2478         memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2479 }
2480
2481 static __be32
2482 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2483                             struct nfsd4_clid_slot *slot)
2484 {
2485         memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2486         return slot->sl_status;
2487 }
2488
2489 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2490                         2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2491                         1 +     /* MIN tag is length with zero, only length */ \
2492                         3 +     /* version, opcount, opcode */ \
2493                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2494                                 /* seqid, slotID, slotID, cache */ \
2495                         4 ) * sizeof(__be32))
2496
2497 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2498                         2 +     /* verifier: AUTH_NULL, length 0 */\
2499                         1 +     /* status */ \
2500                         1 +     /* MIN tag is length with zero, only length */ \
2501                         3 +     /* opcount, opcode, opstatus*/ \
2502                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2503                                 /* seqid, slotID, slotID, slotID, status */ \
2504                         5 ) * sizeof(__be32))
2505
2506 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2507 {
2508         u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2509
2510         if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2511                 return nfserr_toosmall;
2512         if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2513                 return nfserr_toosmall;
2514         ca->headerpadsz = 0;
2515         ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2516         ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2517         ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2518         ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2519                         NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2520         ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2521         /*
2522          * Note decreasing slot size below client's request may make it
2523          * difficult for client to function correctly, whereas
2524          * decreasing the number of slots will (just?) affect
2525          * performance.  When short on memory we therefore prefer to
2526          * decrease number of slots instead of their size.  Clients that
2527          * request larger slots than they need will get poor results:
2528          */
2529         ca->maxreqs = nfsd4_get_drc_mem(ca);
2530         if (!ca->maxreqs)
2531                 return nfserr_jukebox;
2532
2533         return nfs_ok;
2534 }
2535
2536 #define NFSD_CB_MAX_REQ_SZ      ((NFS4_enc_cb_recall_sz + \
2537                                  RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2538 #define NFSD_CB_MAX_RESP_SZ     ((NFS4_dec_cb_recall_sz + \
2539                                  RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2540
2541 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2542 {
2543         ca->headerpadsz = 0;
2544
2545         /*
2546          * These RPC_MAX_HEADER macros are overkill, especially since we
2547          * don't even do gss on the backchannel yet.  But this is still
2548          * less than 1k.  Tighten up this estimate in the unlikely event
2549          * it turns out to be a problem for some client:
2550          */
2551         if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2552                 return nfserr_toosmall;
2553         if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2554                 return nfserr_toosmall;
2555         ca->maxresp_cached = 0;
2556         if (ca->maxops < 2)
2557                 return nfserr_toosmall;
2558
2559         return nfs_ok;
2560 }
2561
2562 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2563 {
2564         switch (cbs->flavor) {
2565         case RPC_AUTH_NULL:
2566         case RPC_AUTH_UNIX:
2567                 return nfs_ok;
2568         default:
2569                 /*
2570                  * GSS case: the spec doesn't allow us to return this
2571                  * error.  But it also doesn't allow us not to support
2572                  * GSS.
2573                  * I'd rather this fail hard than return some error the
2574                  * client might think it can already handle:
2575                  */
2576                 return nfserr_encr_alg_unsupp;
2577         }
2578 }
2579
2580 __be32
2581 nfsd4_create_session(struct svc_rqst *rqstp,
2582                      struct nfsd4_compound_state *cstate,
2583                      struct nfsd4_create_session *cr_ses)
2584 {
2585         struct sockaddr *sa = svc_addr(rqstp);
2586         struct nfs4_client *conf, *unconf;
2587         struct nfs4_client *old = NULL;
2588         struct nfsd4_session *new;
2589         struct nfsd4_conn *conn;
2590         struct nfsd4_clid_slot *cs_slot = NULL;
2591         __be32 status = 0;
2592         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2593
2594         if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2595                 return nfserr_inval;
2596         status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2597         if (status)
2598                 return status;
2599         status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2600         if (status)
2601                 return status;
2602         status = check_backchannel_attrs(&cr_ses->back_channel);
2603         if (status)
2604                 goto out_release_drc_mem;
2605         status = nfserr_jukebox;
2606         new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2607         if (!new)
2608                 goto out_release_drc_mem;
2609         conn = alloc_conn_from_crses(rqstp, cr_ses);
2610         if (!conn)
2611                 goto out_free_session;
2612
2613         spin_lock(&nn->client_lock);
2614         unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2615         conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2616         WARN_ON_ONCE(conf && unconf);
2617
2618         if (conf) {
2619                 status = nfserr_wrong_cred;
2620                 if (!mach_creds_match(conf, rqstp))
2621                         goto out_free_conn;
2622                 cs_slot = &conf->cl_cs_slot;
2623                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2624                 if (status) {
2625                         if (status == nfserr_replay_cache)
2626                                 status = nfsd4_replay_create_session(cr_ses, cs_slot);
2627                         goto out_free_conn;
2628                 }
2629         } else if (unconf) {
2630                 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2631                     !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2632                         status = nfserr_clid_inuse;
2633                         goto out_free_conn;
2634                 }
2635                 status = nfserr_wrong_cred;
2636                 if (!mach_creds_match(unconf, rqstp))
2637                         goto out_free_conn;
2638                 cs_slot = &unconf->cl_cs_slot;
2639                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2640                 if (status) {
2641                         /* an unconfirmed replay returns misordered */
2642                         status = nfserr_seq_misordered;
2643                         goto out_free_conn;
2644                 }
2645                 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2646                 if (old) {
2647                         status = mark_client_expired_locked(old);
2648                         if (status) {
2649                                 old = NULL;
2650                                 goto out_free_conn;
2651                         }
2652                 }
2653                 move_to_confirmed(unconf);
2654                 conf = unconf;
2655         } else {
2656                 status = nfserr_stale_clientid;
2657                 goto out_free_conn;
2658         }
2659         status = nfs_ok;
2660         /*
2661          * We do not support RDMA or persistent sessions
2662          */
2663         cr_ses->flags &= ~SESSION4_PERSIST;
2664         cr_ses->flags &= ~SESSION4_RDMA;
2665
2666         init_session(rqstp, new, conf, cr_ses);
2667         nfsd4_get_session_locked(new);
2668
2669         memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2670                NFS4_MAX_SESSIONID_LEN);
2671         cs_slot->sl_seqid++;
2672         cr_ses->seqid = cs_slot->sl_seqid;
2673
2674         /* cache solo and embedded create sessions under the client_lock */
2675         nfsd4_cache_create_session(cr_ses, cs_slot, status);
2676         spin_unlock(&nn->client_lock);
2677         /* init connection and backchannel */
2678         nfsd4_init_conn(rqstp, conn, new);
2679         nfsd4_put_session(new);
2680         if (old)
2681                 expire_client(old);
2682         return status;
2683 out_free_conn:
2684         spin_unlock(&nn->client_lock);
2685         free_conn(conn);
2686         if (old)
2687                 expire_client(old);
2688 out_free_session:
2689         __free_session(new);
2690 out_release_drc_mem:
2691         nfsd4_put_drc_mem(&cr_ses->fore_channel);
2692         return status;
2693 }
2694
2695 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2696 {
2697         switch (*dir) {
2698         case NFS4_CDFC4_FORE:
2699         case NFS4_CDFC4_BACK:
2700                 return nfs_ok;
2701         case NFS4_CDFC4_FORE_OR_BOTH:
2702         case NFS4_CDFC4_BACK_OR_BOTH:
2703                 *dir = NFS4_CDFC4_BOTH;
2704                 return nfs_ok;
2705         };
2706         return nfserr_inval;
2707 }
2708
2709 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
2710 {
2711         struct nfsd4_session *session = cstate->session;
2712         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2713         __be32 status;
2714
2715         status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2716         if (status)
2717                 return status;
2718         spin_lock(&nn->client_lock);
2719         session->se_cb_prog = bc->bc_cb_program;
2720         session->se_cb_sec = bc->bc_cb_sec;
2721         spin_unlock(&nn->client_lock);
2722
2723         nfsd4_probe_callback(session->se_client);
2724
2725         return nfs_ok;
2726 }
2727
2728 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2729                      struct nfsd4_compound_state *cstate,
2730                      struct nfsd4_bind_conn_to_session *bcts)
2731 {
2732         __be32 status;
2733         struct nfsd4_conn *conn;
2734         struct nfsd4_session *session;
2735         struct net *net = SVC_NET(rqstp);
2736         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2737
2738         if (!nfsd4_last_compound_op(rqstp))
2739                 return nfserr_not_only_op;
2740         spin_lock(&nn->client_lock);
2741         session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2742         spin_unlock(&nn->client_lock);
2743         if (!session)
2744                 goto out_no_session;
2745         status = nfserr_wrong_cred;
2746         if (!mach_creds_match(session->se_client, rqstp))
2747                 goto out;
2748         status = nfsd4_map_bcts_dir(&bcts->dir);
2749         if (status)
2750                 goto out;
2751         conn = alloc_conn(rqstp, bcts->dir);
2752         status = nfserr_jukebox;
2753         if (!conn)
2754                 goto out;
2755         nfsd4_init_conn(rqstp, conn, session);
2756         status = nfs_ok;
2757 out:
2758         nfsd4_put_session(session);
2759 out_no_session:
2760         return status;
2761 }
2762
2763 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2764 {
2765         if (!session)
2766                 return 0;
2767         return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2768 }
2769
2770 __be32
2771 nfsd4_destroy_session(struct svc_rqst *r,
2772                       struct nfsd4_compound_state *cstate,
2773                       struct nfsd4_destroy_session *sessionid)
2774 {
2775         struct nfsd4_session *ses;
2776         __be32 status;
2777         int ref_held_by_me = 0;
2778         struct net *net = SVC_NET(r);
2779         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2780
2781         status = nfserr_not_only_op;
2782         if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2783                 if (!nfsd4_last_compound_op(r))
2784                         goto out;
2785                 ref_held_by_me++;
2786         }
2787         dump_sessionid(__func__, &sessionid->sessionid);
2788         spin_lock(&nn->client_lock);
2789         ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2790         if (!ses)
2791                 goto out_client_lock;
2792         status = nfserr_wrong_cred;
2793         if (!mach_creds_match(ses->se_client, r))
2794                 goto out_put_session;
2795         status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2796         if (status)
2797                 goto out_put_session;
2798         unhash_session(ses);
2799         spin_unlock(&nn->client_lock);
2800
2801         nfsd4_probe_callback_sync(ses->se_client);
2802
2803         spin_lock(&nn->client_lock);
2804         status = nfs_ok;
2805 out_put_session:
2806         nfsd4_put_session_locked(ses);
2807 out_client_lock:
2808         spin_unlock(&nn->client_lock);
2809 out:
2810         return status;
2811 }
2812
2813 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2814 {
2815         struct nfsd4_conn *c;
2816
2817         list_for_each_entry(c, &s->se_conns, cn_persession) {
2818                 if (c->cn_xprt == xpt) {
2819                         return c;
2820                 }
2821         }
2822         return NULL;
2823 }
2824
2825 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2826 {
2827         struct nfs4_client *clp = ses->se_client;
2828         struct nfsd4_conn *c;
2829         __be32 status = nfs_ok;
2830         int ret;
2831
2832         spin_lock(&clp->cl_lock);
2833         c = __nfsd4_find_conn(new->cn_xprt, ses);
2834         if (c)
2835                 goto out_free;
2836         status = nfserr_conn_not_bound_to_session;
2837         if (clp->cl_mach_cred)
2838                 goto out_free;
2839         __nfsd4_hash_conn(new, ses);
2840         spin_unlock(&clp->cl_lock);
2841         ret = nfsd4_register_conn(new);
2842         if (ret)
2843                 /* oops; xprt is already down: */
2844                 nfsd4_conn_lost(&new->cn_xpt_user);
2845         return nfs_ok;
2846 out_free:
2847         spin_unlock(&clp->cl_lock);
2848         free_conn(new);
2849         return status;
2850 }
2851
2852 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2853 {
2854         struct nfsd4_compoundargs *args = rqstp->rq_argp;
2855
2856         return args->opcnt > session->se_fchannel.maxops;
2857 }
2858
2859 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2860                                   struct nfsd4_session *session)
2861 {
2862         struct xdr_buf *xb = &rqstp->rq_arg;
2863
2864         return xb->len > session->se_fchannel.maxreq_sz;
2865 }
2866
2867 __be32
2868 nfsd4_sequence(struct svc_rqst *rqstp,
2869                struct nfsd4_compound_state *cstate,
2870                struct nfsd4_sequence *seq)
2871 {
2872         struct nfsd4_compoundres *resp = rqstp->rq_resp;
2873         struct xdr_stream *xdr = &resp->xdr;
2874         struct nfsd4_session *session;
2875         struct nfs4_client *clp;
2876         struct nfsd4_slot *slot;
2877         struct nfsd4_conn *conn;
2878         __be32 status;
2879         int buflen;
2880         struct net *net = SVC_NET(rqstp);
2881         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2882
2883         if (resp->opcnt != 1)
2884                 return nfserr_sequence_pos;
2885
2886         /*
2887          * Will be either used or freed by nfsd4_sequence_check_conn
2888          * below.
2889          */
2890         conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2891         if (!conn)
2892                 return nfserr_jukebox;
2893
2894         spin_lock(&nn->client_lock);
2895         session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
2896         if (!session)
2897                 goto out_no_session;
2898         clp = session->se_client;
2899
2900         status = nfserr_too_many_ops;
2901         if (nfsd4_session_too_many_ops(rqstp, session))
2902                 goto out_put_session;
2903
2904         status = nfserr_req_too_big;
2905         if (nfsd4_request_too_big(rqstp, session))
2906                 goto out_put_session;
2907
2908         status = nfserr_badslot;
2909         if (seq->slotid >= session->se_fchannel.maxreqs)
2910                 goto out_put_session;
2911
2912         slot = session->se_slots[seq->slotid];
2913         dprintk("%s: slotid %d\n", __func__, seq->slotid);
2914
2915         /* We do not negotiate the number of slots yet, so set the
2916          * maxslots to the session maxreqs which is used to encode
2917          * sr_highest_slotid and the sr_target_slot id to maxslots */
2918         seq->maxslots = session->se_fchannel.maxreqs;
2919
2920         status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2921                                         slot->sl_flags & NFSD4_SLOT_INUSE);
2922         if (status == nfserr_replay_cache) {
2923                 status = nfserr_seq_misordered;
2924                 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2925                         goto out_put_session;
2926                 cstate->slot = slot;
2927                 cstate->session = session;
2928                 cstate->clp = clp;
2929                 /* Return the cached reply status and set cstate->status
2930                  * for nfsd4_proc_compound processing */
2931                 status = nfsd4_replay_cache_entry(resp, seq);
2932                 cstate->status = nfserr_replay_cache;
2933                 goto out;
2934         }
2935         if (status)
2936                 goto out_put_session;
2937
2938         status = nfsd4_sequence_check_conn(conn, session);
2939         conn = NULL;
2940         if (status)
2941                 goto out_put_session;
2942
2943         buflen = (seq->cachethis) ?
2944                         session->se_fchannel.maxresp_cached :
2945                         session->se_fchannel.maxresp_sz;
2946         status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
2947                                     nfserr_rep_too_big;
2948         if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2949                 goto out_put_session;
2950         svc_reserve(rqstp, buflen);
2951
2952         status = nfs_ok;
2953         /* Success! bump slot seqid */
2954         slot->sl_seqid = seq->seqid;
2955         slot->sl_flags |= NFSD4_SLOT_INUSE;
2956         if (seq->cachethis)
2957                 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2958         else
2959                 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2960
2961         cstate->slot = slot;
2962         cstate->session = session;
2963         cstate->clp = clp;
2964
2965 out:
2966         switch (clp->cl_cb_state) {
2967         case NFSD4_CB_DOWN:
2968                 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2969                 break;
2970         case NFSD4_CB_FAULT:
2971                 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2972                 break;
2973         default:
2974                 seq->status_flags = 0;
2975         }
2976         if (!list_empty(&clp->cl_revoked))
2977                 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2978 out_no_session:
2979         if (conn)
2980                 free_conn(conn);
2981         spin_unlock(&nn->client_lock);
2982         return status;
2983 out_put_session:
2984         nfsd4_put_session_locked(session);
2985         goto out_no_session;
2986 }
2987
2988 void
2989 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
2990 {
2991         struct nfsd4_compound_state *cs = &resp->cstate;
2992
2993         if (nfsd4_has_session(cs)) {
2994                 if (cs->status != nfserr_replay_cache) {
2995                         nfsd4_store_cache_entry(resp);
2996                         cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
2997                 }
2998                 /* Drop session reference that was taken in nfsd4_sequence() */
2999                 nfsd4_put_session(cs->session);
3000         } else if (cs->clp)
3001                 put_client_renew(cs->clp);
3002 }
3003
3004 __be32
3005 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
3006 {
3007         struct nfs4_client *conf, *unconf;
3008         struct nfs4_client *clp = NULL;
3009         __be32 status = 0;
3010         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3011
3012         spin_lock(&nn->client_lock);
3013         unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3014         conf = find_confirmed_client(&dc->clientid, true, nn);
3015         WARN_ON_ONCE(conf && unconf);
3016
3017         if (conf) {
3018                 if (client_has_state(conf)) {
3019                         status = nfserr_clientid_busy;
3020                         goto out;
3021                 }
3022                 status = mark_client_expired_locked(conf);
3023                 if (status)
3024                         goto out;
3025                 clp = conf;
3026         } else if (unconf)
3027                 clp = unconf;
3028         else {
3029                 status = nfserr_stale_clientid;
3030                 goto out;
3031         }
3032         if (!mach_creds_match(clp, rqstp)) {
3033                 clp = NULL;
3034                 status = nfserr_wrong_cred;
3035                 goto out;
3036         }
3037         unhash_client_locked(clp);
3038 out:
3039         spin_unlock(&nn->client_lock);
3040         if (clp)
3041                 expire_client(clp);
3042         return status;
3043 }
3044
3045 __be32
3046 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
3047 {
3048         __be32 status = 0;
3049
3050         if (rc->rca_one_fs) {
3051                 if (!cstate->current_fh.fh_dentry)
3052                         return nfserr_nofilehandle;
3053                 /*
3054                  * We don't take advantage of the rca_one_fs case.
3055                  * That's OK, it's optional, we can safely ignore it.
3056                  */
3057                  return nfs_ok;
3058         }
3059
3060         status = nfserr_complete_already;
3061         if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3062                              &cstate->session->se_client->cl_flags))
3063                 goto out;
3064
3065         status = nfserr_stale_clientid;
3066         if (is_client_expired(cstate->session->se_client))
3067                 /*
3068                  * The following error isn't really legal.
3069                  * But we only get here if the client just explicitly
3070                  * destroyed the client.  Surely it no longer cares what
3071                  * error it gets back on an operation for the dead
3072                  * client.
3073                  */
3074                 goto out;
3075
3076         status = nfs_ok;
3077         nfsd4_client_record_create(cstate->session->se_client);
3078 out:
3079         return status;
3080 }
3081
3082 __be32
3083 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3084                   struct nfsd4_setclientid *setclid)
3085 {
3086         struct xdr_netobj       clname = setclid->se_name;
3087         nfs4_verifier           clverifier = setclid->se_verf;
3088         struct nfs4_client      *conf, *new;
3089         struct nfs4_client      *unconf = NULL;
3090         __be32                  status;
3091         struct nfsd_net         *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3092
3093         new = create_client(clname, rqstp, &clverifier);
3094         if (new == NULL)
3095                 return nfserr_jukebox;
3096         /* Cases below refer to rfc 3530 section 14.2.33: */
3097         spin_lock(&nn->client_lock);
3098         conf = find_confirmed_client_by_name(&clname, nn);
3099         if (conf && client_has_state(conf)) {
3100                 /* case 0: */
3101                 status = nfserr_clid_inuse;
3102                 if (clp_used_exchangeid(conf))
3103                         goto out;
3104                 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3105                         char addr_str[INET6_ADDRSTRLEN];
3106                         rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3107                                  sizeof(addr_str));
3108                         dprintk("NFSD: setclientid: string in use by client "
3109                                 "at %s\n", addr_str);
3110                         goto out;
3111                 }
3112         }
3113         unconf = find_unconfirmed_client_by_name(&clname, nn);
3114         if (unconf)
3115                 unhash_client_locked(unconf);
3116         if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3117                 /* case 1: probable callback update */
3118                 copy_clid(new, conf);
3119                 gen_confirm(new, nn);
3120         } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3121                 gen_clid(new, nn);
3122         new->cl_minorversion = 0;
3123         gen_callback(new, setclid, rqstp);
3124         add_to_unconfirmed(new);
3125         setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3126         setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3127         memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3128         new = NULL;
3129         status = nfs_ok;
3130 out:
3131         spin_unlock(&nn->client_lock);
3132         if (new)
3133                 free_client(new);
3134         if (unconf)
3135                 expire_client(unconf);
3136         return status;
3137 }
3138
3139
3140 __be32
3141 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3142                          struct nfsd4_compound_state *cstate,
3143                          struct nfsd4_setclientid_confirm *setclientid_confirm)
3144 {
3145         struct nfs4_client *conf, *unconf;
3146         struct nfs4_client *old = NULL;
3147         nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
3148         clientid_t * clid = &setclientid_confirm->sc_clientid;
3149         __be32 status;
3150         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3151
3152         if (STALE_CLIENTID(clid, nn))
3153                 return nfserr_stale_clientid;
3154
3155         spin_lock(&nn->client_lock);
3156         conf = find_confirmed_client(clid, false, nn);
3157         unconf = find_unconfirmed_client(clid, false, nn);
3158         /*
3159          * We try hard to give out unique clientid's, so if we get an
3160          * attempt to confirm the same clientid with a different cred,
3161          * the client may be buggy; this should never happen.
3162          *
3163          * Nevertheless, RFC 7530 recommends INUSE for this case:
3164          */
3165         status = nfserr_clid_inuse;
3166         if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3167                 goto out;
3168         if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3169                 goto out;
3170         /* cases below refer to rfc 3530 section 14.2.34: */
3171         if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3172                 if (conf && !unconf) /* case 2: probable retransmit */
3173                         status = nfs_ok;
3174                 else /* case 4: client hasn't noticed we rebooted yet? */
3175                         status = nfserr_stale_clientid;
3176                 goto out;
3177         }
3178         status = nfs_ok;
3179         if (conf) { /* case 1: callback update */
3180                 old = unconf;
3181                 unhash_client_locked(old);
3182                 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3183         } else { /* case 3: normal case; new or rebooted client */
3184                 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3185                 if (old) {
3186                         status = nfserr_clid_inuse;
3187                         if (client_has_state(old)
3188                                         && !same_creds(&unconf->cl_cred,
3189                                                         &old->cl_cred))
3190                                 goto out;
3191                         status = mark_client_expired_locked(old);
3192                         if (status) {
3193                                 old = NULL;
3194                                 goto out;
3195                         }
3196                 }
3197                 move_to_confirmed(unconf);
3198                 conf = unconf;
3199         }
3200         get_client_locked(conf);
3201         spin_unlock(&nn->client_lock);
3202         nfsd4_probe_callback(conf);
3203         spin_lock(&nn->client_lock);
3204         put_client_renew_locked(conf);
3205 out:
3206         spin_unlock(&nn->client_lock);
3207         if (old)
3208                 expire_client(old);
3209         return status;
3210 }
3211
3212 static struct nfs4_file *nfsd4_alloc_file(void)
3213 {
3214         return kmem_cache_alloc(file_slab, GFP_KERNEL);
3215 }
3216
3217 /* OPEN Share state helper functions */
3218 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3219                                 struct nfs4_file *fp)
3220 {
3221         lockdep_assert_held(&state_lock);
3222
3223         atomic_set(&fp->fi_ref, 1);
3224         spin_lock_init(&fp->fi_lock);
3225         INIT_LIST_HEAD(&fp->fi_stateids);
3226         INIT_LIST_HEAD(&fp->fi_delegations);
3227         INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3228         fh_copy_shallow(&fp->fi_fhandle, fh);
3229         fp->fi_deleg_file = NULL;
3230         fp->fi_had_conflict = false;
3231         fp->fi_share_deny = 0;
3232         memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3233         memset(fp->fi_access, 0, sizeof(fp->fi_access));
3234 #ifdef CONFIG_NFSD_PNFS
3235         INIT_LIST_HEAD(&fp->fi_lo_states);
3236         atomic_set(&fp->fi_lo_recalls, 0);
3237 #endif
3238         hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3239 }
3240
3241 void
3242 nfsd4_free_slabs(void)
3243 {
3244         kmem_cache_destroy(odstate_slab);
3245         kmem_cache_destroy(openowner_slab);
3246         kmem_cache_destroy(lockowner_slab);
3247         kmem_cache_destroy(file_slab);
3248         kmem_cache_destroy(stateid_slab);
3249         kmem_cache_destroy(deleg_slab);
3250 }
3251
3252 int
3253 nfsd4_init_slabs(void)
3254 {
3255         openowner_slab = kmem_cache_create("nfsd4_openowners",
3256                         sizeof(struct nfs4_openowner), 0, 0, NULL);
3257         if (openowner_slab == NULL)
3258                 goto out;
3259         lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3260                         sizeof(struct nfs4_lockowner), 0, 0, NULL);
3261         if (lockowner_slab == NULL)
3262                 goto out_free_openowner_slab;
3263         file_slab = kmem_cache_create("nfsd4_files",
3264                         sizeof(struct nfs4_file), 0, 0, NULL);
3265         if (file_slab == NULL)
3266                 goto out_free_lockowner_slab;
3267         stateid_slab = kmem_cache_create("nfsd4_stateids",
3268                         sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3269         if (stateid_slab == NULL)
3270                 goto out_free_file_slab;
3271         deleg_slab = kmem_cache_create("nfsd4_delegations",
3272                         sizeof(struct nfs4_delegation), 0, 0, NULL);
3273         if (deleg_slab == NULL)
3274                 goto out_free_stateid_slab;
3275         odstate_slab = kmem_cache_create("nfsd4_odstate",
3276                         sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3277         if (odstate_slab == NULL)
3278                 goto out_free_deleg_slab;
3279         return 0;
3280
3281 out_free_deleg_slab:
3282         kmem_cache_destroy(deleg_slab);
3283 out_free_stateid_slab:
3284         kmem_cache_destroy(stateid_slab);
3285 out_free_file_slab:
3286         kmem_cache_destroy(file_slab);
3287 out_free_lockowner_slab:
3288         kmem_cache_destroy(lockowner_slab);
3289 out_free_openowner_slab:
3290         kmem_cache_destroy(openowner_slab);
3291 out:
3292         dprintk("nfsd4: out of memory while initializing nfsv4\n");
3293         return -ENOMEM;
3294 }
3295
3296 static void init_nfs4_replay(struct nfs4_replay *rp)
3297 {
3298         rp->rp_status = nfserr_serverfault;
3299         rp->rp_buflen = 0;
3300         rp->rp_buf = rp->rp_ibuf;
3301         mutex_init(&rp->rp_mutex);
3302 }
3303
3304 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3305                 struct nfs4_stateowner *so)
3306 {
3307         if (!nfsd4_has_session(cstate)) {
3308                 mutex_lock(&so->so_replay.rp_mutex);
3309                 cstate->replay_owner = nfs4_get_stateowner(so);
3310         }
3311 }
3312
3313 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3314 {
3315         struct nfs4_stateowner *so = cstate->replay_owner;
3316
3317         if (so != NULL) {
3318                 cstate->replay_owner = NULL;
3319                 mutex_unlock(&so->so_replay.rp_mutex);
3320                 nfs4_put_stateowner(so);
3321         }
3322 }
3323
3324 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3325 {
3326         struct nfs4_stateowner *sop;
3327
3328         sop = kmem_cache_alloc(slab, GFP_KERNEL);
3329         if (!sop)
3330                 return NULL;
3331
3332         sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3333         if (!sop->so_owner.data) {
3334                 kmem_cache_free(slab, sop);
3335                 return NULL;
3336         }
3337         sop->so_owner.len = owner->len;
3338
3339         INIT_LIST_HEAD(&sop->so_stateids);
3340         sop->so_client = clp;
3341         init_nfs4_replay(&sop->so_replay);
3342         atomic_set(&sop->so_count, 1);
3343         return sop;
3344 }
3345
3346 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3347 {
3348         lockdep_assert_held(&clp->cl_lock);
3349
3350         list_add(&oo->oo_owner.so_strhash,
3351                  &clp->cl_ownerstr_hashtbl[strhashval]);
3352         list_add(&oo->oo_perclient, &clp->cl_openowners);
3353 }
3354
3355 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3356 {
3357         unhash_openowner_locked(openowner(so));
3358 }
3359
3360 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3361 {
3362         struct nfs4_openowner *oo = openowner(so);
3363
3364         kmem_cache_free(openowner_slab, oo);
3365 }
3366
3367 static const struct nfs4_stateowner_operations openowner_ops = {
3368         .so_unhash =    nfs4_unhash_openowner,
3369         .so_free =      nfs4_free_openowner,
3370 };
3371
3372 static struct nfs4_ol_stateid *
3373 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3374 {
3375         struct nfs4_ol_stateid *local, *ret = NULL;
3376         struct nfs4_openowner *oo = open->op_openowner;
3377
3378         lockdep_assert_held(&fp->fi_lock);
3379
3380         list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3381                 /* ignore lock owners */
3382                 if (local->st_stateowner->so_is_open_owner == 0)
3383                         continue;
3384                 if (local->st_stateowner == &oo->oo_owner) {
3385                         ret = local;
3386                         atomic_inc(&ret->st_stid.sc_count);
3387                         break;
3388                 }
3389         }
3390         return ret;
3391 }
3392
3393 static struct nfs4_openowner *
3394 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3395                            struct nfsd4_compound_state *cstate)
3396 {
3397         struct nfs4_client *clp = cstate->clp;
3398         struct nfs4_openowner *oo, *ret;
3399
3400         oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3401         if (!oo)
3402                 return NULL;
3403         oo->oo_owner.so_ops = &openowner_ops;
3404         oo->oo_owner.so_is_open_owner = 1;
3405         oo->oo_owner.so_seqid = open->op_seqid;
3406         oo->oo_flags = 0;
3407         if (nfsd4_has_session(cstate))
3408                 oo->oo_flags |= NFS4_OO_CONFIRMED;
3409         oo->oo_time = 0;
3410         oo->oo_last_closed_stid = NULL;
3411         INIT_LIST_HEAD(&oo->oo_close_lru);
3412         spin_lock(&clp->cl_lock);
3413         ret = find_openstateowner_str_locked(strhashval, open, clp);
3414         if (ret == NULL) {
3415                 hash_openowner(oo, clp, strhashval);
3416                 ret = oo;
3417         } else
3418                 nfs4_free_stateowner(&oo->oo_owner);
3419
3420         spin_unlock(&clp->cl_lock);
3421         return ret;
3422 }
3423
3424 static struct nfs4_ol_stateid *
3425 init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
3426                 struct nfsd4_open *open)
3427 {
3428
3429         struct nfs4_openowner *oo = open->op_openowner;
3430         struct nfs4_ol_stateid *retstp = NULL;
3431
3432         /* We are moving these outside of the spinlocks to avoid the warnings */
3433         mutex_init(&stp->st_mutex);
3434         mutex_lock(&stp->st_mutex);
3435
3436         spin_lock(&oo->oo_owner.so_client->cl_lock);
3437         spin_lock(&fp->fi_lock);
3438
3439         retstp = nfsd4_find_existing_open(fp, open);
3440         if (retstp)
3441                 goto out_unlock;
3442         atomic_inc(&stp->st_stid.sc_count);
3443         stp->st_stid.sc_type = NFS4_OPEN_STID;
3444         INIT_LIST_HEAD(&stp->st_locks);
3445         stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3446         get_nfs4_file(fp);
3447         stp->st_stid.sc_file = fp;
3448         stp->st_access_bmap = 0;
3449         stp->st_deny_bmap = 0;
3450         stp->st_openstp = NULL;
3451         list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3452         list_add(&stp->st_perfile, &fp->fi_stateids);
3453
3454 out_unlock:
3455         spin_unlock(&fp->fi_lock);
3456         spin_unlock(&oo->oo_owner.so_client->cl_lock);
3457         if (retstp) {
3458                 mutex_lock(&retstp->st_mutex);
3459                 /* Not that we need to, just for neatness */
3460                 mutex_unlock(&stp->st_mutex);
3461         }
3462         return retstp;
3463 }
3464
3465 /*
3466  * In the 4.0 case we need to keep the owners around a little while to handle
3467  * CLOSE replay. We still do need to release any file access that is held by
3468  * them before returning however.
3469  */
3470 static void
3471 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3472 {
3473         struct nfs4_ol_stateid *last;
3474         struct nfs4_openowner *oo = openowner(s->st_stateowner);
3475         struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3476                                                 nfsd_net_id);
3477
3478         dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3479
3480         /*
3481          * We know that we hold one reference via nfsd4_close, and another
3482          * "persistent" reference for the client. If the refcount is higher
3483          * than 2, then there are still calls in progress that are using this
3484          * stateid. We can't put the sc_file reference until they are finished.
3485          * Wait for the refcount to drop to 2. Since it has been unhashed,
3486          * there should be no danger of the refcount going back up again at
3487          * this point.
3488          */
3489         wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3490
3491         release_all_access(s);
3492         if (s->st_stid.sc_file) {
3493                 put_nfs4_file(s->st_stid.sc_file);
3494                 s->st_stid.sc_file = NULL;
3495         }
3496
3497         spin_lock(&nn->client_lock);
3498         last = oo->oo_last_closed_stid;
3499         oo->oo_last_closed_stid = s;
3500         list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3501         oo->oo_time = get_seconds();
3502         spin_unlock(&nn->client_lock);
3503         if (last)
3504                 nfs4_put_stid(&last->st_stid);
3505 }
3506
3507 /* search file_hashtbl[] for file */
3508 static struct nfs4_file *
3509 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3510 {
3511         struct nfs4_file *fp;
3512
3513         hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3514                 if (fh_match(&fp->fi_fhandle, fh)) {
3515                         if (atomic_inc_not_zero(&fp->fi_ref))
3516                                 return fp;
3517                 }
3518         }
3519         return NULL;
3520 }
3521
3522 struct nfs4_file *
3523 find_file(struct knfsd_fh *fh)
3524 {
3525         struct nfs4_file *fp;
3526         unsigned int hashval = file_hashval(fh);
3527
3528         rcu_read_lock();
3529         fp = find_file_locked(fh, hashval);
3530         rcu_read_unlock();
3531         return fp;
3532 }
3533
3534 static struct nfs4_file *
3535 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3536 {
3537         struct nfs4_file *fp;
3538         unsigned int hashval = file_hashval(fh);
3539
3540         rcu_read_lock();
3541         fp = find_file_locked(fh, hashval);
3542         rcu_read_unlock();
3543         if (fp)
3544                 return fp;
3545
3546         spin_lock(&state_lock);
3547         fp = find_file_locked(fh, hashval);
3548         if (likely(fp == NULL)) {
3549                 nfsd4_init_file(fh, hashval, new);
3550                 fp = new;
3551         }
3552         spin_unlock(&state_lock);
3553
3554         return fp;
3555 }
3556
3557 /*
3558  * Called to check deny when READ with all zero stateid or
3559  * WRITE with all zero or all one stateid
3560  */
3561 static __be32
3562 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3563 {
3564         struct nfs4_file *fp;
3565         __be32 ret = nfs_ok;
3566
3567         fp = find_file(&current_fh->fh_handle);
3568         if (!fp)
3569                 return ret;
3570         /* Check for conflicting share reservations */
3571         spin_lock(&fp->fi_lock);
3572         if (fp->fi_share_deny & deny_type)
3573                 ret = nfserr_locked;
3574         spin_unlock(&fp->fi_lock);
3575         put_nfs4_file(fp);
3576         return ret;
3577 }
3578
3579 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3580 {
3581         struct nfs4_delegation *dp = cb_to_delegation(cb);
3582         struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3583                                           nfsd_net_id);
3584
3585         block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3586
3587         /*
3588          * We can't do this in nfsd_break_deleg_cb because it is
3589          * already holding inode->i_lock.
3590          *
3591          * If the dl_time != 0, then we know that it has already been
3592          * queued for a lease break. Don't queue it again.
3593          */
3594         spin_lock(&state_lock);
3595         if (dp->dl_time == 0) {
3596                 dp->dl_time = get_seconds();
3597                 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3598         }
3599         spin_unlock(&state_lock);
3600 }
3601
3602 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3603                 struct rpc_task *task)
3604 {
3605         struct nfs4_delegation *dp = cb_to_delegation(cb);
3606
3607         if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3608                 return 1;
3609
3610         switch (task->tk_status) {
3611         case 0:
3612                 return 1;
3613         case -EBADHANDLE:
3614         case -NFS4ERR_BAD_STATEID:
3615                 /*
3616                  * Race: client probably got cb_recall before open reply
3617                  * granting delegation.
3618                  */
3619                 if (dp->dl_retries--) {
3620                         rpc_delay(task, 2 * HZ);
3621                         return 0;
3622                 }
3623                 /*FALLTHRU*/
3624         default:
3625                 return -1;
3626         }
3627 }
3628
3629 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3630 {
3631         struct nfs4_delegation *dp = cb_to_delegation(cb);
3632
3633         nfs4_put_stid(&dp->dl_stid);
3634 }
3635
3636 static struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3637         .prepare        = nfsd4_cb_recall_prepare,
3638         .done           = nfsd4_cb_recall_done,
3639         .release        = nfsd4_cb_recall_release,
3640 };
3641
3642 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3643 {
3644         /*
3645          * We're assuming the state code never drops its reference
3646          * without first removing the lease.  Since we're in this lease
3647          * callback (and since the lease code is serialized by the kernel
3648          * lock) we know the server hasn't removed the lease yet, we know
3649          * it's safe to take a reference.
3650          */
3651         atomic_inc(&dp->dl_stid.sc_count);
3652         nfsd4_run_cb(&dp->dl_recall);
3653 }
3654
3655 /* Called from break_lease() with i_lock held. */
3656 static bool
3657 nfsd_break_deleg_cb(struct file_lock *fl)
3658 {
3659         bool ret = false;
3660         struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3661         struct nfs4_delegation *dp;
3662
3663         if (!fp) {
3664                 WARN(1, "(%p)->fl_owner NULL\n", fl);
3665                 return ret;
3666         }
3667         if (fp->fi_had_conflict) {
3668                 WARN(1, "duplicate break on %p\n", fp);
3669                 return ret;
3670         }
3671         /*
3672          * We don't want the locks code to timeout the lease for us;
3673          * we'll remove it ourself if a delegation isn't returned
3674          * in time:
3675          */
3676         fl->fl_break_time = 0;
3677
3678         spin_lock(&fp->fi_lock);
3679         fp->fi_had_conflict = true;
3680         /*
3681          * If there are no delegations on the list, then return true
3682          * so that the lease code will go ahead and delete it.
3683          */
3684         if (list_empty(&fp->fi_delegations))
3685                 ret = true;
3686         else
3687                 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3688                         nfsd_break_one_deleg(dp);
3689         spin_unlock(&fp->fi_lock);
3690         return ret;
3691 }
3692
3693 static int
3694 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3695                      struct list_head *dispose)
3696 {
3697         if (arg & F_UNLCK)
3698                 return lease_modify(onlist, arg, dispose);
3699         else
3700                 return -EAGAIN;
3701 }
3702
3703 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3704         .lm_break = nfsd_break_deleg_cb,
3705         .lm_change = nfsd_change_deleg_cb,
3706 };
3707
3708 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3709 {
3710         if (nfsd4_has_session(cstate))
3711                 return nfs_ok;
3712         if (seqid == so->so_seqid - 1)
3713                 return nfserr_replay_me;
3714         if (seqid == so->so_seqid)
3715                 return nfs_ok;
3716         return nfserr_bad_seqid;
3717 }
3718
3719 static __be32 lookup_clientid(clientid_t *clid,
3720                 struct nfsd4_compound_state *cstate,
3721                 struct nfsd_net *nn)
3722 {
3723         struct nfs4_client *found;
3724
3725         if (cstate->clp) {
3726                 found = cstate->clp;
3727                 if (!same_clid(&found->cl_clientid, clid))
3728                         return nfserr_stale_clientid;
3729                 return nfs_ok;
3730         }
3731
3732         if (STALE_CLIENTID(clid, nn))
3733                 return nfserr_stale_clientid;
3734
3735         /*
3736          * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3737          * cached already then we know this is for is for v4.0 and "sessions"
3738          * will be false.
3739          */
3740         WARN_ON_ONCE(cstate->session);
3741         spin_lock(&nn->client_lock);
3742         found = find_confirmed_client(clid, false, nn);
3743         if (!found) {
3744                 spin_unlock(&nn->client_lock);
3745                 return nfserr_expired;
3746         }
3747         atomic_inc(&found->cl_refcount);
3748         spin_unlock(&nn->client_lock);
3749
3750         /* Cache the nfs4_client in cstate! */
3751         cstate->clp = found;
3752         return nfs_ok;
3753 }
3754
3755 __be32
3756 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3757                     struct nfsd4_open *open, struct nfsd_net *nn)
3758 {
3759         clientid_t *clientid = &open->op_clientid;
3760         struct nfs4_client *clp = NULL;
3761         unsigned int strhashval;
3762         struct nfs4_openowner *oo = NULL;
3763         __be32 status;
3764
3765         if (STALE_CLIENTID(&open->op_clientid, nn))
3766                 return nfserr_stale_clientid;
3767         /*
3768          * In case we need it later, after we've already created the
3769          * file and don't want to risk a further failure:
3770          */
3771         open->op_file = nfsd4_alloc_file();
3772         if (open->op_file == NULL)
3773                 return nfserr_jukebox;
3774
3775         status = lookup_clientid(clientid, cstate, nn);
3776         if (status)
3777                 return status;
3778         clp = cstate->clp;
3779
3780         strhashval = ownerstr_hashval(&open->op_owner);
3781         oo = find_openstateowner_str(strhashval, open, clp);
3782         open->op_openowner = oo;
3783         if (!oo) {
3784                 goto new_owner;
3785         }
3786         if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3787                 /* Replace unconfirmed owners without checking for replay. */
3788                 release_openowner(oo);
3789                 open->op_openowner = NULL;
3790                 goto new_owner;
3791         }
3792         status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
3793         if (status)
3794                 return status;
3795         goto alloc_stateid;
3796 new_owner:
3797         oo = alloc_init_open_stateowner(strhashval, open, cstate);
3798         if (oo == NULL)
3799                 return nfserr_jukebox;
3800         open->op_openowner = oo;
3801 alloc_stateid:
3802         open->op_stp = nfs4_alloc_open_stateid(clp);
3803         if (!open->op_stp)
3804                 return nfserr_jukebox;
3805
3806         if (nfsd4_has_session(cstate) &&
3807             (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
3808                 open->op_odstate = alloc_clnt_odstate(clp);
3809                 if (!open->op_odstate)
3810                         return nfserr_jukebox;
3811         }
3812
3813         return nfs_ok;
3814 }
3815
3816 static inline __be32
3817 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
3818 {
3819         if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
3820                 return nfserr_openmode;
3821         else
3822                 return nfs_ok;
3823 }
3824
3825 static int share_access_to_flags(u32 share_access)
3826 {
3827         return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3828 }
3829
3830 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3831 {
3832         struct nfs4_stid *ret;
3833
3834         ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3835         if (!ret)
3836                 return NULL;
3837         return delegstateid(ret);
3838 }
3839
3840 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
3841 {
3842         return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
3843                open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
3844 }
3845
3846 static __be32
3847 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3848                 struct nfs4_delegation **dp)
3849 {
3850         int flags;
3851         __be32 status = nfserr_bad_stateid;
3852         struct nfs4_delegation *deleg;
3853
3854         deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
3855         if (deleg == NULL)
3856                 goto out;
3857         flags = share_access_to_flags(open->op_share_access);
3858         status = nfs4_check_delegmode(deleg, flags);
3859         if (status) {
3860                 nfs4_put_stid(&deleg->dl_stid);
3861                 goto out;
3862         }
3863         *dp = deleg;
3864 out:
3865         if (!nfsd4_is_deleg_cur(open))
3866                 return nfs_ok;
3867         if (status)
3868                 return status;
3869         open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3870         return nfs_ok;
3871 }
3872
3873 static inline int nfs4_access_to_access(u32 nfs4_access)
3874 {
3875         int flags = 0;
3876
3877         if (nfs4_access & NFS4_SHARE_ACCESS_READ)
3878                 flags |= NFSD_MAY_READ;
3879         if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
3880                 flags |= NFSD_MAY_WRITE;
3881         return flags;
3882 }
3883
3884 static inline __be32
3885 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3886                 struct nfsd4_open *open)
3887 {
3888         struct iattr iattr = {
3889                 .ia_valid = ATTR_SIZE,
3890                 .ia_size = 0,
3891         };
3892         if (!open->op_truncate)
3893                 return 0;
3894         if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
3895                 return nfserr_inval;
3896         return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
3897 }
3898
3899 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3900                 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
3901                 struct nfsd4_open *open)
3902 {
3903         struct file *filp = NULL;
3904         __be32 status;
3905         int oflag = nfs4_access_to_omode(open->op_share_access);
3906         int access = nfs4_access_to_access(open->op_share_access);
3907         unsigned char old_access_bmap, old_deny_bmap;
3908
3909         spin_lock(&fp->fi_lock);
3910
3911         /*
3912          * Are we trying to set a deny mode that would conflict with
3913          * current access?
3914          */
3915         status = nfs4_file_check_deny(fp, open->op_share_deny);
3916         if (status != nfs_ok) {
3917                 spin_unlock(&fp->fi_lock);
3918                 goto out;
3919         }
3920
3921         /* set access to the file */
3922         status = nfs4_file_get_access(fp, open->op_share_access);
3923         if (status != nfs_ok) {
3924                 spin_unlock(&fp->fi_lock);
3925                 goto out;
3926         }
3927
3928         /* Set access bits in stateid */
3929         old_access_bmap = stp->st_access_bmap;
3930         set_access(open->op_share_access, stp);
3931
3932         /* Set new deny mask */
3933         old_deny_bmap = stp->st_deny_bmap;
3934         set_deny(open->op_share_deny, stp);
3935         fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3936
3937         if (!fp->fi_fds[oflag]) {
3938                 spin_unlock(&fp->fi_lock);
3939                 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
3940                 if (status)
3941                         goto out_put_access;
3942                 spin_lock(&fp->fi_lock);
3943                 if (!fp->fi_fds[oflag]) {
3944                         fp->fi_fds[oflag] = filp;
3945                         filp = NULL;
3946                 }
3947         }
3948         spin_unlock(&fp->fi_lock);
3949         if (filp)
3950                 fput(filp);
3951
3952         status = nfsd4_truncate(rqstp, cur_fh, open);
3953         if (status)
3954                 goto out_put_access;
3955 out:
3956         return status;
3957 out_put_access:
3958         stp->st_access_bmap = old_access_bmap;
3959         nfs4_file_put_access(fp, open->op_share_access);
3960         reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
3961         goto out;
3962 }
3963
3964 static __be32
3965 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
3966 {
3967         __be32 status;
3968         unsigned char old_deny_bmap = stp->st_deny_bmap;
3969
3970         if (!test_access(open->op_share_access, stp))
3971                 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
3972
3973         /* test and set deny mode */
3974         spin_lock(&fp->fi_lock);
3975         status = nfs4_file_check_deny(fp, open->op_share_deny);
3976         if (status == nfs_ok) {
3977                 set_deny(open->op_share_deny, stp);
3978                 fp->fi_share_deny |=
3979                                 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3980         }
3981         spin_unlock(&fp->fi_lock);
3982
3983         if (status != nfs_ok)
3984                 return status;
3985
3986         status = nfsd4_truncate(rqstp, cur_fh, open);
3987         if (status != nfs_ok)
3988                 reset_union_bmap_deny(old_deny_bmap, stp);
3989         return status;
3990 }
3991
3992 /* Should we give out recallable state?: */
3993 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
3994 {
3995         if (clp->cl_cb_state == NFSD4_CB_UP)
3996                 return true;
3997         /*
3998          * In the sessions case, since we don't have to establish a
3999          * separate connection for callbacks, we assume it's OK
4000          * until we hear otherwise:
4001          */
4002         return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4003 }
4004
4005 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
4006 {
4007         struct file_lock *fl;
4008
4009         fl = locks_alloc_lock();
4010         if (!fl)
4011                 return NULL;
4012         fl->fl_lmops = &nfsd_lease_mng_ops;
4013         fl->fl_flags = FL_DELEG;
4014         fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4015         fl->fl_end = OFFSET_MAX;
4016         fl->fl_owner = (fl_owner_t)fp;
4017         fl->fl_pid = current->tgid;
4018         return fl;
4019 }
4020
4021 /**
4022  * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4023  * @dp:   a pointer to the nfs4_delegation we're adding.
4024  *
4025  * Return:
4026  *      On success: Return code will be 0 on success.
4027  *
4028  *      On error: -EAGAIN if there was an existing delegation.
4029  *                 nonzero if there is an error in other cases.
4030  *
4031  */
4032
4033 static int nfs4_setlease(struct nfs4_delegation *dp)
4034 {
4035         struct nfs4_file *fp = dp->dl_stid.sc_file;
4036         struct file_lock *fl;
4037         struct file *filp;
4038         int status = 0;
4039
4040         fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
4041         if (!fl)
4042                 return -ENOMEM;
4043         filp = find_readable_file(fp);
4044         if (!filp) {
4045                 /* We should always have a readable file here */
4046                 WARN_ON_ONCE(1);
4047                 locks_free_lock(fl);
4048                 return -EBADF;
4049         }
4050         fl->fl_file = filp;
4051         status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
4052         if (fl)
4053                 locks_free_lock(fl);
4054         if (status)
4055                 goto out_fput;
4056         spin_lock(&state_lock);
4057         spin_lock(&fp->fi_lock);
4058         /* Did the lease get broken before we took the lock? */
4059         status = -EAGAIN;
4060         if (fp->fi_had_conflict)
4061                 goto out_unlock;
4062         /* Race breaker */
4063         if (fp->fi_deleg_file) {
4064                 status = hash_delegation_locked(dp, fp);
4065                 goto out_unlock;
4066         }
4067         fp->fi_deleg_file = filp;
4068         fp->fi_delegees = 0;
4069         status = hash_delegation_locked(dp, fp);
4070         spin_unlock(&fp->fi_lock);
4071         spin_unlock(&state_lock);
4072         if (status) {
4073                 /* Should never happen, this is a new fi_deleg_file  */
4074                 WARN_ON_ONCE(1);
4075                 goto out_fput;
4076         }
4077         return 0;
4078 out_unlock:
4079         spin_unlock(&fp->fi_lock);
4080         spin_unlock(&state_lock);
4081 out_fput:
4082         fput(filp);
4083         return status;
4084 }
4085
4086 static struct nfs4_delegation *
4087 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4088                     struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4089 {
4090         int status;
4091         struct nfs4_delegation *dp;
4092
4093         if (fp->fi_had_conflict)
4094                 return ERR_PTR(-EAGAIN);
4095
4096         spin_lock(&state_lock);
4097         spin_lock(&fp->fi_lock);
4098         status = nfs4_get_existing_delegation(clp, fp);
4099         spin_unlock(&fp->fi_lock);
4100         spin_unlock(&state_lock);
4101
4102         if (status)
4103                 return ERR_PTR(status);
4104
4105         dp = alloc_init_deleg(clp, fh, odstate);
4106         if (!dp)
4107                 return ERR_PTR(-ENOMEM);
4108
4109         get_nfs4_file(fp);
4110         spin_lock(&state_lock);
4111         spin_lock(&fp->fi_lock);
4112         dp->dl_stid.sc_file = fp;
4113         if (!fp->fi_deleg_file) {
4114                 spin_unlock(&fp->fi_lock);
4115                 spin_unlock(&state_lock);
4116                 status = nfs4_setlease(dp);
4117                 goto out;
4118         }
4119         if (fp->fi_had_conflict) {
4120                 status = -EAGAIN;
4121                 goto out_unlock;
4122         }
4123         status = hash_delegation_locked(dp, fp);
4124 out_unlock:
4125         spin_unlock(&fp->fi_lock);
4126         spin_unlock(&state_lock);
4127 out:
4128         if (status) {
4129                 put_clnt_odstate(dp->dl_clnt_odstate);
4130                 nfs4_put_stid(&dp->dl_stid);
4131                 return ERR_PTR(status);
4132         }
4133         return dp;
4134 }
4135
4136 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4137 {
4138         open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4139         if (status == -EAGAIN)
4140                 open->op_why_no_deleg = WND4_CONTENTION;
4141         else {
4142                 open->op_why_no_deleg = WND4_RESOURCE;
4143                 switch (open->op_deleg_want) {
4144                 case NFS4_SHARE_WANT_READ_DELEG:
4145                 case NFS4_SHARE_WANT_WRITE_DELEG:
4146                 case NFS4_SHARE_WANT_ANY_DELEG:
4147                         break;
4148                 case NFS4_SHARE_WANT_CANCEL:
4149                         open->op_why_no_deleg = WND4_CANCELLED;
4150                         break;
4151                 case NFS4_SHARE_WANT_NO_DELEG:
4152                         WARN_ON_ONCE(1);
4153                 }
4154         }
4155 }
4156
4157 /*
4158  * Attempt to hand out a delegation.
4159  *
4160  * Note we don't support write delegations, and won't until the vfs has
4161  * proper support for them.
4162  */
4163 static void
4164 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4165                         struct nfs4_ol_stateid *stp)
4166 {
4167         struct nfs4_delegation *dp;
4168         struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4169         struct nfs4_client *clp = stp->st_stid.sc_client;
4170         int cb_up;
4171         int status = 0;
4172
4173         cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4174         open->op_recall = 0;
4175         switch (open->op_claim_type) {
4176                 case NFS4_OPEN_CLAIM_PREVIOUS:
4177                         if (!cb_up)
4178                                 open->op_recall = 1;
4179                         if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4180                                 goto out_no_deleg;
4181                         break;
4182                 case NFS4_OPEN_CLAIM_NULL:
4183                 case NFS4_OPEN_CLAIM_FH:
4184                         /*
4185                          * Let's not give out any delegations till everyone's
4186                          * had the chance to reclaim theirs, *and* until
4187                          * NLM locks have all been reclaimed:
4188                          */
4189                         if (locks_in_grace(clp->net))
4190                                 goto out_no_deleg;
4191                         if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4192                                 goto out_no_deleg;
4193                         /*
4194                          * Also, if the file was opened for write or
4195                          * create, there's a good chance the client's
4196                          * about to write to it, resulting in an
4197                          * immediate recall (since we don't support
4198                          * write delegations):
4199                          */
4200                         if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4201                                 goto out_no_deleg;
4202                         if (open->op_create == NFS4_OPEN_CREATE)
4203                                 goto out_no_deleg;
4204                         break;
4205                 default:
4206                         goto out_no_deleg;
4207         }
4208         dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4209         if (IS_ERR(dp))
4210                 goto out_no_deleg;
4211
4212         memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4213
4214         dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4215                 STATEID_VAL(&dp->dl_stid.sc_stateid));
4216         open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4217         nfs4_put_stid(&dp->dl_stid);
4218         return;
4219 out_no_deleg:
4220         open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4221         if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4222             open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4223                 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4224                 open->op_recall = 1;
4225         }
4226
4227         /* 4.1 client asking for a delegation? */
4228         if (open->op_deleg_want)
4229                 nfsd4_open_deleg_none_ext(open, status);
4230         return;
4231 }
4232
4233 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4234                                         struct nfs4_delegation *dp)
4235 {
4236         if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4237             dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4238                 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4239                 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4240         } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4241                    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4242                 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4243                 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4244         }
4245         /* Otherwise the client must be confused wanting a delegation
4246          * it already has, therefore we don't return
4247          * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4248          */
4249 }
4250
4251 __be32
4252 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4253 {
4254         struct nfsd4_compoundres *resp = rqstp->rq_resp;
4255         struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4256         struct nfs4_file *fp = NULL;
4257         struct nfs4_ol_stateid *stp = NULL;
4258         struct nfs4_ol_stateid *swapstp = NULL;
4259         struct nfs4_delegation *dp = NULL;
4260         __be32 status;
4261
4262         /*
4263          * Lookup file; if found, lookup stateid and check open request,
4264          * and check for delegations in the process of being recalled.
4265          * If not found, create the nfs4_file struct
4266          */
4267         fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4268         if (fp != open->op_file) {
4269                 status = nfs4_check_deleg(cl, open, &dp);
4270                 if (status)
4271                         goto out;
4272                 spin_lock(&fp->fi_lock);
4273                 stp = nfsd4_find_existing_open(fp, open);
4274                 spin_unlock(&fp->fi_lock);
4275         } else {
4276                 open->op_file = NULL;
4277                 status = nfserr_bad_stateid;
4278                 if (nfsd4_is_deleg_cur(open))
4279                         goto out;
4280         }
4281
4282         /*
4283          * OPEN the file, or upgrade an existing OPEN.
4284          * If truncate fails, the OPEN fails.
4285          */
4286         if (stp) {
4287                 /* Stateid was found, this is an OPEN upgrade */
4288                 mutex_lock(&stp->st_mutex);
4289                 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4290                 if (status) {
4291                         mutex_unlock(&stp->st_mutex);
4292                         goto out;
4293                 }
4294         } else {
4295                 stp = open->op_stp;
4296                 open->op_stp = NULL;
4297                 /*
4298                  * init_open_stateid() either returns a locked stateid
4299                  * it found, or initializes and locks the new one we passed in
4300                  */
4301                 swapstp = init_open_stateid(stp, fp, open);
4302                 if (swapstp) {
4303                         nfs4_put_stid(&stp->st_stid);
4304                         stp = swapstp;
4305                         status = nfs4_upgrade_open(rqstp, fp, current_fh,
4306                                                 stp, open);
4307                         if (status) {
4308                                 mutex_unlock(&stp->st_mutex);
4309                                 goto out;
4310                         }
4311                         goto upgrade_out;
4312                 }
4313                 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4314                 if (status) {
4315                         mutex_unlock(&stp->st_mutex);
4316                         release_open_stateid(stp);
4317                         goto out;
4318                 }
4319
4320                 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4321                                                         open->op_odstate);
4322                 if (stp->st_clnt_odstate == open->op_odstate)
4323                         open->op_odstate = NULL;
4324         }
4325 upgrade_out:
4326         nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4327         mutex_unlock(&stp->st_mutex);
4328
4329         if (nfsd4_has_session(&resp->cstate)) {
4330                 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4331                         open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4332                         open->op_why_no_deleg = WND4_NOT_WANTED;
4333                         goto nodeleg;
4334                 }
4335         }
4336
4337         /*
4338         * Attempt to hand out a delegation. No error return, because the
4339         * OPEN succeeds even if we fail.
4340         */
4341         nfs4_open_delegation(current_fh, open, stp);
4342 nodeleg:
4343         status = nfs_ok;
4344
4345         dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4346                 STATEID_VAL(&stp->st_stid.sc_stateid));
4347 out:
4348         /* 4.1 client trying to upgrade/downgrade delegation? */
4349         if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4350             open->op_deleg_want)
4351                 nfsd4_deleg_xgrade_none_ext(open, dp);
4352
4353         if (fp)
4354                 put_nfs4_file(fp);
4355         if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4356                 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4357         /*
4358         * To finish the open response, we just need to set the rflags.
4359         */
4360         open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4361         if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
4362             !nfsd4_has_session(&resp->cstate))
4363                 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4364         if (dp)
4365                 nfs4_put_stid(&dp->dl_stid);
4366         if (stp)
4367                 nfs4_put_stid(&stp->st_stid);
4368
4369         return status;
4370 }
4371
4372 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4373                               struct nfsd4_open *open)
4374 {
4375         if (open->op_openowner) {
4376                 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4377
4378                 nfsd4_cstate_assign_replay(cstate, so);
4379                 nfs4_put_stateowner(so);
4380         }
4381         if (open->op_file)
4382                 kmem_cache_free(file_slab, open->op_file);
4383         if (open->op_stp)
4384                 nfs4_put_stid(&open->op_stp->st_stid);
4385         if (open->op_odstate)
4386                 kmem_cache_free(odstate_slab, open->op_odstate);
4387 }
4388
4389 __be32
4390 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4391             clientid_t *clid)
4392 {
4393         struct nfs4_client *clp;
4394         __be32 status;
4395         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4396
4397         dprintk("process_renew(%08x/%08x): starting\n", 
4398                         clid->cl_boot, clid->cl_id);
4399         status = lookup_clientid(clid, cstate, nn);
4400         if (status)
4401                 goto out;
4402         clp = cstate->clp;
4403         status = nfserr_cb_path_down;
4404         if (!list_empty(&clp->cl_delegations)
4405                         && clp->cl_cb_state != NFSD4_CB_UP)
4406                 goto out;
4407         status = nfs_ok;
4408 out:
4409         return status;
4410 }
4411
4412 void
4413 nfsd4_end_grace(struct nfsd_net *nn)
4414 {
4415         /* do nothing if grace period already ended */
4416         if (nn->grace_ended)
4417                 return;
4418
4419         dprintk("NFSD: end of grace period\n");
4420         nn->grace_ended = true;
4421         /*
4422          * If the server goes down again right now, an NFSv4
4423          * client will still be allowed to reclaim after it comes back up,
4424          * even if it hasn't yet had a chance to reclaim state this time.
4425          *
4426          */
4427         nfsd4_record_grace_done(nn);
4428         /*
4429          * At this point, NFSv4 clients can still reclaim.  But if the
4430          * server crashes, any that have not yet reclaimed will be out
4431          * of luck on the next boot.
4432          *
4433          * (NFSv4.1+ clients are considered to have reclaimed once they
4434          * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
4435          * have reclaimed after their first OPEN.)
4436          */
4437         locks_end_grace(&nn->nfsd4_manager);
4438         /*
4439          * At this point, and once lockd and/or any other containers
4440          * exit their grace period, further reclaims will fail and
4441          * regular locking can resume.
4442          */
4443 }
4444
4445 static time_t
4446 nfs4_laundromat(struct nfsd_net *nn)
4447 {
4448         struct nfs4_client *clp;
4449         struct nfs4_openowner *oo;
4450         struct nfs4_delegation *dp;
4451         struct nfs4_ol_stateid *stp;
4452         struct list_head *pos, *next, reaplist;
4453         time_t cutoff = get_seconds() - nn->nfsd4_lease;
4454         time_t t, new_timeo = nn->nfsd4_lease;
4455
4456         dprintk("NFSD: laundromat service - starting\n");
4457         nfsd4_end_grace(nn);
4458         INIT_LIST_HEAD(&reaplist);
4459         spin_lock(&nn->client_lock);
4460         list_for_each_safe(pos, next, &nn->client_lru) {
4461                 clp = list_entry(pos, struct nfs4_client, cl_lru);
4462                 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4463                         t = clp->cl_time - cutoff;
4464                         new_timeo = min(new_timeo, t);
4465                         break;
4466                 }
4467                 if (mark_client_expired_locked(clp)) {
4468                         dprintk("NFSD: client in use (clientid %08x)\n",
4469                                 clp->cl_clientid.cl_id);
4470                         continue;
4471                 }
4472                 list_add(&clp->cl_lru, &reaplist);
4473         }
4474         spin_unlock(&nn->client_lock);
4475         list_for_each_safe(pos, next, &reaplist) {
4476                 clp = list_entry(pos, struct nfs4_client, cl_lru);
4477                 dprintk("NFSD: purging unused client (clientid %08x)\n",
4478                         clp->cl_clientid.cl_id);
4479                 list_del_init(&clp->cl_lru);
4480                 expire_client(clp);
4481         }
4482         spin_lock(&state_lock);
4483         list_for_each_safe(pos, next, &nn->del_recall_lru) {
4484                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4485                 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4486                         t = dp->dl_time - cutoff;
4487                         new_timeo = min(new_timeo, t);
4488                         break;
4489                 }
4490                 WARN_ON(!unhash_delegation_locked(dp));
4491                 list_add(&dp->dl_recall_lru, &reaplist);
4492         }
4493         spin_unlock(&state_lock);
4494         while (!list_empty(&reaplist)) {
4495                 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4496                                         dl_recall_lru);
4497                 list_del_init(&dp->dl_recall_lru);
4498                 revoke_delegation(dp);
4499         }
4500
4501         spin_lock(&nn->client_lock);
4502         while (!list_empty(&nn->close_lru)) {
4503                 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4504                                         oo_close_lru);
4505                 if (time_after((unsigned long)oo->oo_time,
4506                                (unsigned long)cutoff)) {
4507                         t = oo->oo_time - cutoff;
4508                         new_timeo = min(new_timeo, t);
4509                         break;
4510                 }
4511                 list_del_init(&oo->oo_close_lru);
4512                 stp = oo->oo_last_closed_stid;
4513                 oo->oo_last_closed_stid = NULL;
4514                 spin_unlock(&nn->client_lock);
4515                 nfs4_put_stid(&stp->st_stid);
4516                 spin_lock(&nn->client_lock);
4517         }
4518         spin_unlock(&nn->client_lock);
4519
4520         new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4521         return new_timeo;
4522 }
4523
4524 static struct workqueue_struct *laundry_wq;
4525 static void laundromat_main(struct work_struct *);
4526
4527 static void
4528 laundromat_main(struct work_struct *laundry)
4529 {
4530         time_t t;
4531         struct delayed_work *dwork = container_of(laundry, struct delayed_work,
4532                                                   work);
4533         struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4534                                            laundromat_work);
4535
4536         t = nfs4_laundromat(nn);
4537         dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4538         queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4539 }
4540
4541 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4542 {
4543         if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4544                 return nfserr_bad_stateid;
4545         return nfs_ok;
4546 }
4547
4548 static inline int
4549 access_permit_read(struct nfs4_ol_stateid *stp)
4550 {
4551         return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4552                 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4553                 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4554 }
4555
4556 static inline int
4557 access_permit_write(struct nfs4_ol_stateid *stp)
4558 {
4559         return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4560                 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4561 }
4562
4563 static
4564 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4565 {
4566         __be32 status = nfserr_openmode;
4567
4568         /* For lock stateid's, we test the parent open, not the lock: */
4569         if (stp->st_openstp)
4570                 stp = stp->st_openstp;
4571         if ((flags & WR_STATE) && !access_permit_write(stp))
4572                 goto out;
4573         if ((flags & RD_STATE) && !access_permit_read(stp))
4574                 goto out;
4575         status = nfs_ok;
4576 out:
4577         return status;
4578 }
4579
4580 static inline __be32
4581 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4582 {
4583         if (ONE_STATEID(stateid) && (flags & RD_STATE))
4584                 return nfs_ok;
4585         else if (opens_in_grace(net)) {
4586                 /* Answer in remaining cases depends on existence of
4587                  * conflicting state; so we must wait out the grace period. */
4588                 return nfserr_grace;
4589         } else if (flags & WR_STATE)
4590                 return nfs4_share_conflict(current_fh,
4591                                 NFS4_SHARE_DENY_WRITE);
4592         else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4593                 return nfs4_share_conflict(current_fh,
4594                                 NFS4_SHARE_DENY_READ);
4595 }
4596
4597 /*
4598  * Allow READ/WRITE during grace period on recovered state only for files
4599  * that are not able to provide mandatory locking.
4600  */
4601 static inline int
4602 grace_disallows_io(struct net *net, struct inode *inode)
4603 {
4604         return opens_in_grace(net) && mandatory_lock(inode);
4605 }
4606
4607 /* Returns true iff a is later than b: */
4608 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
4609 {
4610         return (s32)(a->si_generation - b->si_generation) > 0;
4611 }
4612
4613 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4614 {
4615         /*
4616          * When sessions are used the stateid generation number is ignored
4617          * when it is zero.
4618          */
4619         if (has_session && in->si_generation == 0)
4620                 return nfs_ok;
4621
4622         if (in->si_generation == ref->si_generation)
4623                 return nfs_ok;
4624
4625         /* If the client sends us a stateid from the future, it's buggy: */
4626         if (stateid_generation_after(in, ref))
4627                 return nfserr_bad_stateid;
4628         /*
4629          * However, we could see a stateid from the past, even from a
4630          * non-buggy client.  For example, if the client sends a lock
4631          * while some IO is outstanding, the lock may bump si_generation
4632          * while the IO is still in flight.  The client could avoid that
4633          * situation by waiting for responses on all the IO requests,
4634          * but better performance may result in retrying IO that
4635          * receives an old_stateid error if requests are rarely
4636          * reordered in flight:
4637          */
4638         return nfserr_old_stateid;
4639 }
4640
4641 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4642 {
4643         if (ols->st_stateowner->so_is_open_owner &&
4644             !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4645                 return nfserr_bad_stateid;
4646         return nfs_ok;
4647 }
4648
4649 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4650 {
4651         struct nfs4_stid *s;
4652         __be32 status = nfserr_bad_stateid;
4653
4654         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4655                 return status;
4656         /* Client debugging aid. */
4657         if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4658                 char addr_str[INET6_ADDRSTRLEN];
4659                 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4660                                  sizeof(addr_str));
4661                 pr_warn_ratelimited("NFSD: client %s testing state ID "
4662                                         "with incorrect client ID\n", addr_str);
4663                 return status;
4664         }
4665         spin_lock(&cl->cl_lock);
4666         s = find_stateid_locked(cl, stateid);
4667         if (!s)
4668                 goto out_unlock;
4669         status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4670         if (status)
4671                 goto out_unlock;
4672         switch (s->sc_type) {
4673         case NFS4_DELEG_STID:
4674                 status = nfs_ok;
4675                 break;
4676         case NFS4_REVOKED_DELEG_STID:
4677                 status = nfserr_deleg_revoked;
4678                 break;
4679         case NFS4_OPEN_STID:
4680         case NFS4_LOCK_STID:
4681                 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
4682                 break;
4683         default:
4684                 printk("unknown stateid type %x\n", s->sc_type);
4685                 /* Fallthrough */
4686         case NFS4_CLOSED_STID:
4687         case NFS4_CLOSED_DELEG_STID:
4688                 status = nfserr_bad_stateid;
4689         }
4690 out_unlock:
4691         spin_unlock(&cl->cl_lock);
4692         return status;
4693 }
4694
4695 __be32
4696 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4697                      stateid_t *stateid, unsigned char typemask,
4698                      struct nfs4_stid **s, struct nfsd_net *nn)
4699 {
4700         __be32 status;
4701
4702         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4703                 return nfserr_bad_stateid;
4704         status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4705         if (status == nfserr_stale_clientid) {
4706                 if (cstate->session)
4707                         return nfserr_bad_stateid;
4708                 return nfserr_stale_stateid;
4709         }
4710         if (status)
4711                 return status;
4712         *s = find_stateid_by_type(cstate->clp, stateid, typemask);
4713         if (!*s)
4714                 return nfserr_bad_stateid;
4715         return nfs_ok;
4716 }
4717
4718 static struct file *
4719 nfs4_find_file(struct nfs4_stid *s, int flags)
4720 {
4721         if (!s)
4722                 return NULL;
4723
4724         switch (s->sc_type) {
4725         case NFS4_DELEG_STID:
4726                 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
4727                         return NULL;
4728                 return get_file(s->sc_file->fi_deleg_file);
4729         case NFS4_OPEN_STID:
4730         case NFS4_LOCK_STID:
4731                 if (flags & RD_STATE)
4732                         return find_readable_file(s->sc_file);
4733                 else
4734                         return find_writeable_file(s->sc_file);
4735                 break;
4736         }
4737
4738         return NULL;
4739 }
4740
4741 static __be32
4742 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
4743 {
4744         __be32 status;
4745
4746         status = nfsd4_check_openowner_confirmed(ols);
4747         if (status)
4748                 return status;
4749         return nfs4_check_openmode(ols, flags);
4750 }
4751
4752 static __be32
4753 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
4754                 struct file **filpp, bool *tmp_file, int flags)
4755 {
4756         int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
4757         struct file *file;
4758         __be32 status;
4759
4760         file = nfs4_find_file(s, flags);
4761         if (file) {
4762                 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
4763                                 acc | NFSD_MAY_OWNER_OVERRIDE);
4764                 if (status) {
4765                         fput(file);
4766                         return status;
4767                 }
4768
4769                 *filpp = file;
4770         } else {
4771                 status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
4772                 if (status)
4773                         return status;
4774
4775                 if (tmp_file)
4776                         *tmp_file = true;
4777         }
4778
4779         return 0;
4780 }
4781
4782 /*
4783  * Checks for stateid operations
4784  */
4785 __be32
4786 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
4787                 struct nfsd4_compound_state *cstate, stateid_t *stateid,
4788                 int flags, struct file **filpp, bool *tmp_file)
4789 {
4790         struct svc_fh *fhp = &cstate->current_fh;
4791         struct inode *ino = d_inode(fhp->fh_dentry);
4792         struct net *net = SVC_NET(rqstp);
4793         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4794         struct nfs4_stid *s = NULL;
4795         __be32 status;
4796
4797         if (filpp)
4798                 *filpp = NULL;
4799         if (tmp_file)
4800                 *tmp_file = false;
4801
4802         if (grace_disallows_io(net, ino))
4803                 return nfserr_grace;
4804
4805         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
4806                 status = check_special_stateids(net, fhp, stateid, flags);
4807                 goto done;
4808         }
4809
4810         status = nfsd4_lookup_stateid(cstate, stateid,
4811                                 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
4812                                 &s, nn);
4813         if (status)
4814                 return status;
4815         status = check_stateid_generation(stateid, &s->sc_stateid,
4816                         nfsd4_has_session(cstate));
4817         if (status)
4818                 goto out;
4819
4820         switch (s->sc_type) {
4821         case NFS4_DELEG_STID:
4822                 status = nfs4_check_delegmode(delegstateid(s), flags);
4823                 break;
4824         case NFS4_OPEN_STID:
4825         case NFS4_LOCK_STID:
4826                 status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
4827                 break;
4828         default:
4829                 status = nfserr_bad_stateid;
4830                 break;
4831         }
4832         if (status)
4833                 goto out;
4834         status = nfs4_check_fh(fhp, s);
4835
4836 done:
4837         if (!status && filpp)
4838                 status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
4839 out:
4840         if (s)
4841                 nfs4_put_stid(s);
4842         return status;
4843 }
4844
4845 /*
4846  * Test if the stateid is valid
4847  */
4848 __be32
4849 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4850                    struct nfsd4_test_stateid *test_stateid)
4851 {
4852         struct nfsd4_test_stateid_id *stateid;
4853         struct nfs4_client *cl = cstate->session->se_client;
4854
4855         list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
4856                 stateid->ts_id_status =
4857                         nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
4858
4859         return nfs_ok;
4860 }
4861
4862 static __be32
4863 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
4864 {
4865         struct nfs4_ol_stateid *stp = openlockstateid(s);
4866         __be32 ret;
4867
4868         mutex_lock(&stp->st_mutex);
4869
4870         ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4871         if (ret)
4872                 goto out;
4873
4874         ret = nfserr_locks_held;
4875         if (check_for_locks(stp->st_stid.sc_file,
4876                             lockowner(stp->st_stateowner)))
4877                 goto out;
4878
4879         release_lock_stateid(stp);
4880         ret = nfs_ok;
4881
4882 out:
4883         mutex_unlock(&stp->st_mutex);
4884         nfs4_put_stid(s);
4885         return ret;
4886 }
4887
4888 __be32
4889 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4890                    struct nfsd4_free_stateid *free_stateid)
4891 {
4892         stateid_t *stateid = &free_stateid->fr_stateid;
4893         struct nfs4_stid *s;
4894         struct nfs4_delegation *dp;
4895         struct nfs4_client *cl = cstate->session->se_client;
4896         __be32 ret = nfserr_bad_stateid;
4897
4898         spin_lock(&cl->cl_lock);
4899         s = find_stateid_locked(cl, stateid);
4900         if (!s)
4901                 goto out_unlock;
4902         switch (s->sc_type) {
4903         case NFS4_DELEG_STID:
4904                 ret = nfserr_locks_held;
4905                 break;
4906         case NFS4_OPEN_STID:
4907                 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4908                 if (ret)
4909                         break;
4910                 ret = nfserr_locks_held;
4911                 break;
4912         case NFS4_LOCK_STID:
4913                 atomic_inc(&s->sc_count);
4914                 spin_unlock(&cl->cl_lock);
4915                 ret = nfsd4_free_lock_stateid(stateid, s);
4916                 goto out;
4917         case NFS4_REVOKED_DELEG_STID:
4918                 dp = delegstateid(s);
4919                 list_del_init(&dp->dl_recall_lru);
4920                 spin_unlock(&cl->cl_lock);
4921                 nfs4_put_stid(s);
4922                 ret = nfs_ok;
4923                 goto out;
4924         /* Default falls through and returns nfserr_bad_stateid */
4925         }
4926 out_unlock:
4927         spin_unlock(&cl->cl_lock);
4928 out:
4929         return ret;
4930 }
4931
4932 static inline int
4933 setlkflg (int type)
4934 {
4935         return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
4936                 RD_STATE : WR_STATE;
4937 }
4938
4939 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
4940 {
4941         struct svc_fh *current_fh = &cstate->current_fh;
4942         struct nfs4_stateowner *sop = stp->st_stateowner;
4943         __be32 status;
4944
4945         status = nfsd4_check_seqid(cstate, sop, seqid);
4946         if (status)
4947                 return status;
4948         if (stp->st_stid.sc_type == NFS4_CLOSED_STID
4949                 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4950                 /*
4951                  * "Closed" stateid's exist *only* to return
4952                  * nfserr_replay_me from the previous step, and
4953                  * revoked delegations are kept only for free_stateid.
4954                  */
4955                 return nfserr_bad_stateid;
4956         mutex_lock(&stp->st_mutex);
4957         status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4958         if (status == nfs_ok)
4959                 status = nfs4_check_fh(current_fh, &stp->st_stid);
4960         if (status != nfs_ok)
4961                 mutex_unlock(&stp->st_mutex);
4962         return status;
4963 }
4964
4965 /* 
4966  * Checks for sequence id mutating operations. 
4967  */
4968 static __be32
4969 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4970                          stateid_t *stateid, char typemask,
4971                          struct nfs4_ol_stateid **stpp,
4972                          struct nfsd_net *nn)
4973 {
4974         __be32 status;
4975         struct nfs4_stid *s;
4976         struct nfs4_ol_stateid *stp = NULL;
4977
4978         dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
4979                 seqid, STATEID_VAL(stateid));
4980
4981         *stpp = NULL;
4982         status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
4983         if (status)
4984                 return status;
4985         stp = openlockstateid(s);
4986         nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
4987
4988         status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
4989         if (!status)
4990                 *stpp = stp;
4991         else
4992                 nfs4_put_stid(&stp->st_stid);
4993         return status;
4994 }
4995
4996 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4997                                                  stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
4998 {
4999         __be32 status;
5000         struct nfs4_openowner *oo;
5001         struct nfs4_ol_stateid *stp;
5002
5003         status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5004                                                 NFS4_OPEN_STID, &stp, nn);
5005         if (status)
5006                 return status;
5007         oo = openowner(stp->st_stateowner);
5008         if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5009                 mutex_unlock(&stp->st_mutex);
5010                 nfs4_put_stid(&stp->st_stid);
5011                 return nfserr_bad_stateid;
5012         }
5013         *stpp = stp;
5014         return nfs_ok;
5015 }
5016
5017 __be32
5018 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5019                    struct nfsd4_open_confirm *oc)
5020 {
5021         __be32 status;
5022         struct nfs4_openowner *oo;
5023         struct nfs4_ol_stateid *stp;
5024         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5025
5026         dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5027                         cstate->current_fh.fh_dentry);
5028
5029         status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5030         if (status)
5031                 return status;
5032
5033         status = nfs4_preprocess_seqid_op(cstate,
5034                                         oc->oc_seqid, &oc->oc_req_stateid,
5035                                         NFS4_OPEN_STID, &stp, nn);
5036         if (status)
5037                 goto out;
5038         oo = openowner(stp->st_stateowner);
5039         status = nfserr_bad_stateid;
5040         if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5041                 mutex_unlock(&stp->st_mutex);
5042                 goto put_stateid;
5043         }
5044         oo->oo_flags |= NFS4_OO_CONFIRMED;
5045         nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5046         mutex_unlock(&stp->st_mutex);
5047         dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5048                 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5049
5050         nfsd4_client_record_create(oo->oo_owner.so_client);
5051         status = nfs_ok;
5052 put_stateid:
5053         nfs4_put_stid(&stp->st_stid);
5054 out:
5055         nfsd4_bump_seqid(cstate, status);
5056         return status;
5057 }
5058
5059 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5060 {
5061         if (!test_access(access, stp))
5062                 return;
5063         nfs4_file_put_access(stp->st_stid.sc_file, access);
5064         clear_access(access, stp);
5065 }
5066
5067 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5068 {
5069         switch (to_access) {
5070         case NFS4_SHARE_ACCESS_READ:
5071                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5072                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5073                 break;
5074         case NFS4_SHARE_ACCESS_WRITE:
5075                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5076                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5077                 break;
5078         case NFS4_SHARE_ACCESS_BOTH:
5079                 break;
5080         default:
5081                 WARN_ON_ONCE(1);
5082         }
5083 }
5084
5085 __be32
5086 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5087                      struct nfsd4_compound_state *cstate,
5088                      struct nfsd4_open_downgrade *od)
5089 {
5090         __be32 status;
5091         struct nfs4_ol_stateid *stp;
5092         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5093
5094         dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
5095                         cstate->current_fh.fh_dentry);
5096
5097         /* We don't yet support WANT bits: */
5098         if (od->od_deleg_want)
5099                 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5100                         od->od_deleg_want);
5101
5102         status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5103                                         &od->od_stateid, &stp, nn);
5104         if (status)
5105                 goto out; 
5106         status = nfserr_inval;
5107         if (!test_access(od->od_share_access, stp)) {
5108                 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5109                         stp->st_access_bmap, od->od_share_access);
5110                 goto put_stateid;
5111         }
5112         if (!test_deny(od->od_share_deny, stp)) {
5113                 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5114                         stp->st_deny_bmap, od->od_share_deny);
5115                 goto put_stateid;
5116         }
5117         nfs4_stateid_downgrade(stp, od->od_share_access);
5118         reset_union_bmap_deny(od->od_share_deny, stp);
5119         nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5120         status = nfs_ok;
5121 put_stateid:
5122         mutex_unlock(&stp->st_mutex);
5123         nfs4_put_stid(&stp->st_stid);
5124 out:
5125         nfsd4_bump_seqid(cstate, status);
5126         return status;
5127 }
5128
5129 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5130 {
5131         struct nfs4_client *clp = s->st_stid.sc_client;
5132         bool unhashed;
5133         LIST_HEAD(reaplist);
5134
5135         s->st_stid.sc_type = NFS4_CLOSED_STID;
5136         spin_lock(&clp->cl_lock);
5137         unhashed = unhash_open_stateid(s, &reaplist);
5138
5139         if (clp->cl_minorversion) {
5140                 if (unhashed)
5141                         put_ol_stateid_locked(s, &reaplist);
5142                 spin_unlock(&clp->cl_lock);
5143                 free_ol_stateid_reaplist(&reaplist);
5144         } else {
5145                 spin_unlock(&clp->cl_lock);
5146                 free_ol_stateid_reaplist(&reaplist);
5147                 if (unhashed)
5148                         move_to_close_lru(s, clp->net);
5149         }
5150 }
5151
5152 /*
5153  * nfs4_unlock_state() called after encode
5154  */
5155 __be32
5156 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5157             struct nfsd4_close *close)
5158 {
5159         __be32 status;
5160         struct nfs4_ol_stateid *stp;
5161         struct net *net = SVC_NET(rqstp);
5162         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5163
5164         dprintk("NFSD: nfsd4_close on file %pd\n", 
5165                         cstate->current_fh.fh_dentry);
5166
5167         status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5168                                         &close->cl_stateid,
5169                                         NFS4_OPEN_STID|NFS4_CLOSED_STID,
5170                                         &stp, nn);
5171         nfsd4_bump_seqid(cstate, status);
5172         if (status)
5173                 goto out; 
5174         nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5175         mutex_unlock(&stp->st_mutex);
5176
5177         nfsd4_close_open_stateid(stp);
5178
5179         /* put reference from nfs4_preprocess_seqid_op */
5180         nfs4_put_stid(&stp->st_stid);
5181 out:
5182         return status;
5183 }
5184
5185 __be32
5186 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5187                   struct nfsd4_delegreturn *dr)
5188 {
5189         struct nfs4_delegation *dp;
5190         stateid_t *stateid = &dr->dr_stateid;
5191         struct nfs4_stid *s;
5192         __be32 status;
5193         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5194
5195         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5196                 return status;
5197
5198         status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5199         if (status)
5200                 goto out;
5201         dp = delegstateid(s);
5202         status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
5203         if (status)
5204                 goto put_stateid;
5205
5206         destroy_delegation(dp);
5207 put_stateid:
5208         nfs4_put_stid(&dp->dl_stid);
5209 out:
5210         return status;
5211 }
5212
5213 static inline u64
5214 end_offset(u64 start, u64 len)
5215 {
5216         u64 end;
5217
5218         end = start + len;
5219         return end >= start ? end: NFS4_MAX_UINT64;
5220 }
5221
5222 /* last octet in a range */
5223 static inline u64
5224 last_byte_offset(u64 start, u64 len)
5225 {
5226         u64 end;
5227
5228         WARN_ON_ONCE(!len);
5229         end = start + len;
5230         return end > start ? end - 1: NFS4_MAX_UINT64;
5231 }
5232
5233 /*
5234  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5235  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5236  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
5237  * locking, this prevents us from being completely protocol-compliant.  The
5238  * real solution to this problem is to start using unsigned file offsets in
5239  * the VFS, but this is a very deep change!
5240  */
5241 static inline void
5242 nfs4_transform_lock_offset(struct file_lock *lock)
5243 {
5244         if (lock->fl_start < 0)
5245                 lock->fl_start = OFFSET_MAX;
5246         if (lock->fl_end < 0)
5247                 lock->fl_end = OFFSET_MAX;
5248 }
5249
5250 static fl_owner_t
5251 nfsd4_fl_get_owner(fl_owner_t owner)
5252 {
5253         struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5254
5255         nfs4_get_stateowner(&lo->lo_owner);
5256         return owner;
5257 }
5258
5259 static void
5260 nfsd4_fl_put_owner(fl_owner_t owner)
5261 {
5262         struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5263
5264         if (lo)
5265                 nfs4_put_stateowner(&lo->lo_owner);
5266 }
5267
5268 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
5269         .lm_get_owner = nfsd4_fl_get_owner,
5270         .lm_put_owner = nfsd4_fl_put_owner,
5271 };
5272
5273 static inline void
5274 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5275 {
5276         struct nfs4_lockowner *lo;
5277
5278         if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5279                 lo = (struct nfs4_lockowner *) fl->fl_owner;
5280                 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5281                                         lo->lo_owner.so_owner.len, GFP_KERNEL);
5282                 if (!deny->ld_owner.data)
5283                         /* We just don't care that much */
5284                         goto nevermind;
5285                 deny->ld_owner.len = lo->lo_owner.so_owner.len;
5286                 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5287         } else {
5288 nevermind:
5289                 deny->ld_owner.len = 0;
5290                 deny->ld_owner.data = NULL;
5291                 deny->ld_clientid.cl_boot = 0;
5292                 deny->ld_clientid.cl_id = 0;
5293         }
5294         deny->ld_start = fl->fl_start;
5295         deny->ld_length = NFS4_MAX_UINT64;
5296         if (fl->fl_end != NFS4_MAX_UINT64)
5297                 deny->ld_length = fl->fl_end - fl->fl_start + 1;        
5298         deny->ld_type = NFS4_READ_LT;
5299         if (fl->fl_type != F_RDLCK)
5300                 deny->ld_type = NFS4_WRITE_LT;
5301 }
5302
5303 static struct nfs4_lockowner *
5304 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5305 {
5306         unsigned int strhashval = ownerstr_hashval(owner);
5307         struct nfs4_stateowner *so;
5308
5309         lockdep_assert_held(&clp->cl_lock);
5310
5311         list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5312                             so_strhash) {
5313                 if (so->so_is_open_owner)
5314                         continue;
5315                 if (same_owner_str(so, owner))
5316                         return lockowner(nfs4_get_stateowner(so));
5317         }
5318         return NULL;
5319 }
5320
5321 static struct nfs4_lockowner *
5322 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5323 {
5324         struct nfs4_lockowner *lo;
5325
5326         spin_lock(&clp->cl_lock);
5327         lo = find_lockowner_str_locked(clp, owner);
5328         spin_unlock(&clp->cl_lock);
5329         return lo;
5330 }
5331
5332 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5333 {
5334         unhash_lockowner_locked(lockowner(sop));
5335 }
5336
5337 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5338 {
5339         struct nfs4_lockowner *lo = lockowner(sop);
5340
5341         kmem_cache_free(lockowner_slab, lo);
5342 }
5343
5344 static const struct nfs4_stateowner_operations lockowner_ops = {
5345         .so_unhash =    nfs4_unhash_lockowner,
5346         .so_free =      nfs4_free_lockowner,
5347 };
5348
5349 /*
5350  * Alloc a lock owner structure.
5351  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
5352  * occurred. 
5353  *
5354  * strhashval = ownerstr_hashval
5355  */
5356 static struct nfs4_lockowner *
5357 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5358                            struct nfs4_ol_stateid *open_stp,
5359                            struct nfsd4_lock *lock)
5360 {
5361         struct nfs4_lockowner *lo, *ret;
5362
5363         lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5364         if (!lo)
5365                 return NULL;
5366         INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5367         lo->lo_owner.so_is_open_owner = 0;
5368         lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5369         lo->lo_owner.so_ops = &lockowner_ops;
5370         spin_lock(&clp->cl_lock);
5371         ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5372         if (ret == NULL) {
5373                 list_add(&lo->lo_owner.so_strhash,
5374                          &clp->cl_ownerstr_hashtbl[strhashval]);
5375                 ret = lo;
5376         } else
5377                 nfs4_free_stateowner(&lo->lo_owner);
5378
5379         spin_unlock(&clp->cl_lock);
5380         return ret;
5381 }
5382
5383 static void
5384 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5385                   struct nfs4_file *fp, struct inode *inode,
5386                   struct nfs4_ol_stateid *open_stp)
5387 {
5388         struct nfs4_client *clp = lo->lo_owner.so_client;
5389
5390         lockdep_assert_held(&clp->cl_lock);
5391
5392         atomic_inc(&stp->st_stid.sc_count);
5393         stp->st_stid.sc_type = NFS4_LOCK_STID;
5394         stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5395         get_nfs4_file(fp);
5396         stp->st_stid.sc_file = fp;
5397         stp->st_access_bmap = 0;
5398         stp->st_deny_bmap = open_stp->st_deny_bmap;
5399         stp->st_openstp = open_stp;
5400         mutex_init(&stp->st_mutex);
5401         list_add(&stp->st_locks, &open_stp->st_locks);
5402         list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5403         spin_lock(&fp->fi_lock);
5404         list_add(&stp->st_perfile, &fp->fi_stateids);
5405         spin_unlock(&fp->fi_lock);
5406 }
5407
5408 static struct nfs4_ol_stateid *
5409 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5410 {
5411         struct nfs4_ol_stateid *lst;
5412         struct nfs4_client *clp = lo->lo_owner.so_client;
5413
5414         lockdep_assert_held(&clp->cl_lock);
5415
5416         list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5417                 if (lst->st_stid.sc_file == fp) {
5418                         atomic_inc(&lst->st_stid.sc_count);
5419                         return lst;
5420                 }
5421         }
5422         return NULL;
5423 }
5424
5425 static struct nfs4_ol_stateid *
5426 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5427                             struct inode *inode, struct nfs4_ol_stateid *ost,
5428                             bool *new)
5429 {
5430         struct nfs4_stid *ns = NULL;
5431         struct nfs4_ol_stateid *lst;
5432         struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5433         struct nfs4_client *clp = oo->oo_owner.so_client;
5434
5435         spin_lock(&clp->cl_lock);
5436         lst = find_lock_stateid(lo, fi);
5437         if (lst == NULL) {
5438                 spin_unlock(&clp->cl_lock);
5439                 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5440                 if (ns == NULL)
5441                         return NULL;
5442
5443                 spin_lock(&clp->cl_lock);
5444                 lst = find_lock_stateid(lo, fi);
5445                 if (likely(!lst)) {
5446                         lst = openlockstateid(ns);
5447                         init_lock_stateid(lst, lo, fi, inode, ost);
5448                         ns = NULL;
5449                         *new = true;
5450                 }
5451         }
5452         spin_unlock(&clp->cl_lock);
5453         if (ns)
5454                 nfs4_put_stid(ns);
5455         return lst;
5456 }
5457
5458 static int
5459 check_lock_length(u64 offset, u64 length)
5460 {
5461         return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5462                 (length > ~offset)));
5463 }
5464
5465 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5466 {
5467         struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5468
5469         lockdep_assert_held(&fp->fi_lock);
5470
5471         if (test_access(access, lock_stp))
5472                 return;
5473         __nfs4_file_get_access(fp, access);
5474         set_access(access, lock_stp);
5475 }
5476
5477 static __be32
5478 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5479                             struct nfs4_ol_stateid *ost,
5480                             struct nfsd4_lock *lock,
5481                             struct nfs4_ol_stateid **plst, bool *new)
5482 {
5483         __be32 status;
5484         struct nfs4_file *fi = ost->st_stid.sc_file;
5485         struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5486         struct nfs4_client *cl = oo->oo_owner.so_client;
5487         struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5488         struct nfs4_lockowner *lo;
5489         struct nfs4_ol_stateid *lst;
5490         unsigned int strhashval;
5491         bool hashed;
5492
5493         lo = find_lockowner_str(cl, &lock->lk_new_owner);
5494         if (!lo) {
5495                 strhashval = ownerstr_hashval(&lock->lk_new_owner);
5496                 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5497                 if (lo == NULL)
5498                         return nfserr_jukebox;
5499         } else {
5500                 /* with an existing lockowner, seqids must be the same */
5501                 status = nfserr_bad_seqid;
5502                 if (!cstate->minorversion &&
5503                     lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5504                         goto out;
5505         }
5506
5507 retry:
5508         lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5509         if (lst == NULL) {
5510                 status = nfserr_jukebox;
5511                 goto out;
5512         }
5513
5514         mutex_lock(&lst->st_mutex);
5515
5516         /* See if it's still hashed to avoid race with FREE_STATEID */
5517         spin_lock(&cl->cl_lock);
5518         hashed = !list_empty(&lst->st_perfile);
5519         spin_unlock(&cl->cl_lock);
5520
5521         if (!hashed) {
5522                 mutex_unlock(&lst->st_mutex);
5523                 nfs4_put_stid(&lst->st_stid);
5524                 goto retry;
5525         }
5526         status = nfs_ok;
5527         *plst = lst;
5528 out:
5529         nfs4_put_stateowner(&lo->lo_owner);
5530         return status;
5531 }
5532
5533 /*
5534  *  LOCK operation 
5535  */
5536 __be32
5537 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5538            struct nfsd4_lock *lock)
5539 {
5540         struct nfs4_openowner *open_sop = NULL;
5541         struct nfs4_lockowner *lock_sop = NULL;
5542         struct nfs4_ol_stateid *lock_stp = NULL;
5543         struct nfs4_ol_stateid *open_stp = NULL;
5544         struct nfs4_file *fp;
5545         struct file *filp = NULL;
5546         struct file_lock *file_lock = NULL;
5547         struct file_lock *conflock = NULL;
5548         __be32 status = 0;
5549         int lkflg;
5550         int err;
5551         bool new = false;
5552         struct net *net = SVC_NET(rqstp);
5553         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5554
5555         dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5556                 (long long) lock->lk_offset,
5557                 (long long) lock->lk_length);
5558
5559         if (check_lock_length(lock->lk_offset, lock->lk_length))
5560                  return nfserr_inval;
5561
5562         if ((status = fh_verify(rqstp, &cstate->current_fh,
5563                                 S_IFREG, NFSD_MAY_LOCK))) {
5564                 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5565                 return status;
5566         }
5567
5568         if (lock->lk_is_new) {
5569                 if (nfsd4_has_session(cstate))
5570                         /* See rfc 5661 18.10.3: given clientid is ignored: */
5571                         memcpy(&lock->lk_new_clientid,
5572                                 &cstate->session->se_client->cl_clientid,
5573                                 sizeof(clientid_t));
5574
5575                 status = nfserr_stale_clientid;
5576                 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5577                         goto out;
5578
5579                 /* validate and update open stateid and open seqid */
5580                 status = nfs4_preprocess_confirmed_seqid_op(cstate,
5581                                         lock->lk_new_open_seqid,
5582                                         &lock->lk_new_open_stateid,
5583                                         &open_stp, nn);
5584                 if (status)
5585                         goto out;
5586                 mutex_unlock(&open_stp->st_mutex);
5587                 open_sop = openowner(open_stp->st_stateowner);
5588                 status = nfserr_bad_stateid;
5589                 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5590                                                 &lock->lk_new_clientid))
5591                         goto out;
5592                 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5593                                                         &lock_stp, &new);
5594         } else {
5595                 status = nfs4_preprocess_seqid_op(cstate,
5596                                        lock->lk_old_lock_seqid,
5597                                        &lock->lk_old_lock_stateid,
5598                                        NFS4_LOCK_STID, &lock_stp, nn);
5599         }
5600         if (status)
5601                 goto out;
5602         lock_sop = lockowner(lock_stp->st_stateowner);
5603
5604         lkflg = setlkflg(lock->lk_type);
5605         status = nfs4_check_openmode(lock_stp, lkflg);
5606         if (status)
5607                 goto out;
5608
5609         status = nfserr_grace;
5610         if (locks_in_grace(net) && !lock->lk_reclaim)
5611                 goto out;
5612         status = nfserr_no_grace;
5613         if (!locks_in_grace(net) && lock->lk_reclaim)
5614                 goto out;
5615
5616         file_lock = locks_alloc_lock();
5617         if (!file_lock) {
5618                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5619                 status = nfserr_jukebox;
5620                 goto out;
5621         }
5622
5623         fp = lock_stp->st_stid.sc_file;
5624         switch (lock->lk_type) {
5625                 case NFS4_READ_LT:
5626                 case NFS4_READW_LT:
5627                         spin_lock(&fp->fi_lock);
5628                         filp = find_readable_file_locked(fp);
5629                         if (filp)
5630                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5631                         spin_unlock(&fp->fi_lock);
5632                         file_lock->fl_type = F_RDLCK;
5633                         break;
5634                 case NFS4_WRITE_LT:
5635                 case NFS4_WRITEW_LT:
5636                         spin_lock(&fp->fi_lock);
5637                         filp = find_writeable_file_locked(fp);
5638                         if (filp)
5639                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5640                         spin_unlock(&fp->fi_lock);
5641                         file_lock->fl_type = F_WRLCK;
5642                         break;
5643                 default:
5644                         status = nfserr_inval;
5645                 goto out;
5646         }
5647         if (!filp) {
5648                 status = nfserr_openmode;
5649                 goto out;
5650         }
5651
5652         file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
5653         file_lock->fl_pid = current->tgid;
5654         file_lock->fl_file = filp;
5655         file_lock->fl_flags = FL_POSIX;
5656         file_lock->fl_lmops = &nfsd_posix_mng_ops;
5657         file_lock->fl_start = lock->lk_offset;
5658         file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
5659         nfs4_transform_lock_offset(file_lock);
5660
5661         conflock = locks_alloc_lock();
5662         if (!conflock) {
5663                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5664                 status = nfserr_jukebox;
5665                 goto out;
5666         }
5667
5668         err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
5669         switch (-err) {
5670         case 0: /* success! */
5671                 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
5672                 status = 0;
5673                 break;
5674         case (EAGAIN):          /* conflock holds conflicting lock */
5675                 status = nfserr_denied;
5676                 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5677                 nfs4_set_lock_denied(conflock, &lock->lk_denied);
5678                 break;
5679         case (EDEADLK):
5680                 status = nfserr_deadlock;
5681                 break;
5682         default:
5683                 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
5684                 status = nfserrno(err);
5685                 break;
5686         }
5687 out:
5688         if (filp)
5689                 fput(filp);
5690         if (lock_stp) {
5691                 /* Bump seqid manually if the 4.0 replay owner is openowner */
5692                 if (cstate->replay_owner &&
5693                     cstate->replay_owner != &lock_sop->lo_owner &&
5694                     seqid_mutating_err(ntohl(status)))
5695                         lock_sop->lo_owner.so_seqid++;
5696
5697                 mutex_unlock(&lock_stp->st_mutex);
5698
5699                 /*
5700                  * If this is a new, never-before-used stateid, and we are
5701                  * returning an error, then just go ahead and release it.
5702                  */
5703                 if (status && new)
5704                         release_lock_stateid(lock_stp);
5705
5706                 nfs4_put_stid(&lock_stp->st_stid);
5707         }
5708         if (open_stp)
5709                 nfs4_put_stid(&open_stp->st_stid);
5710         nfsd4_bump_seqid(cstate, status);
5711         if (file_lock)
5712                 locks_free_lock(file_lock);
5713         if (conflock)
5714                 locks_free_lock(conflock);
5715         return status;
5716 }
5717
5718 /*
5719  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
5720  * so we do a temporary open here just to get an open file to pass to
5721  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
5722  * inode operation.)
5723  */
5724 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
5725 {
5726         struct file *file;
5727         __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
5728         if (!err) {
5729                 err = nfserrno(vfs_test_lock(file, lock));
5730                 fput(file);
5731         }
5732         return err;
5733 }
5734
5735 /*
5736  * LOCKT operation
5737  */
5738 __be32
5739 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5740             struct nfsd4_lockt *lockt)
5741 {
5742         struct file_lock *file_lock = NULL;
5743         struct nfs4_lockowner *lo = NULL;
5744         __be32 status;
5745         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5746
5747         if (locks_in_grace(SVC_NET(rqstp)))
5748                 return nfserr_grace;
5749
5750         if (check_lock_length(lockt->lt_offset, lockt->lt_length))
5751                  return nfserr_inval;
5752
5753         if (!nfsd4_has_session(cstate)) {
5754                 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
5755                 if (status)
5756                         goto out;
5757         }
5758
5759         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5760                 goto out;
5761
5762         file_lock = locks_alloc_lock();
5763         if (!file_lock) {
5764                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5765                 status = nfserr_jukebox;
5766                 goto out;
5767         }
5768
5769         switch (lockt->lt_type) {
5770                 case NFS4_READ_LT:
5771                 case NFS4_READW_LT:
5772                         file_lock->fl_type = F_RDLCK;
5773                 break;
5774                 case NFS4_WRITE_LT:
5775                 case NFS4_WRITEW_LT:
5776                         file_lock->fl_type = F_WRLCK;
5777                 break;
5778                 default:
5779                         dprintk("NFSD: nfs4_lockt: bad lock type!\n");
5780                         status = nfserr_inval;
5781                 goto out;
5782         }
5783
5784         lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
5785         if (lo)
5786                 file_lock->fl_owner = (fl_owner_t)lo;
5787         file_lock->fl_pid = current->tgid;
5788         file_lock->fl_flags = FL_POSIX;
5789
5790         file_lock->fl_start = lockt->lt_offset;
5791         file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
5792
5793         nfs4_transform_lock_offset(file_lock);
5794
5795         status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
5796         if (status)
5797                 goto out;
5798
5799         if (file_lock->fl_type != F_UNLCK) {
5800                 status = nfserr_denied;
5801                 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
5802         }
5803 out:
5804         if (lo)
5805                 nfs4_put_stateowner(&lo->lo_owner);
5806         if (file_lock)
5807                 locks_free_lock(file_lock);
5808         return status;
5809 }
5810
5811 __be32
5812 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5813             struct nfsd4_locku *locku)
5814 {
5815         struct nfs4_ol_stateid *stp;
5816         struct file *filp = NULL;
5817         struct file_lock *file_lock = NULL;
5818         __be32 status;
5819         int err;
5820         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5821
5822         dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
5823                 (long long) locku->lu_offset,
5824                 (long long) locku->lu_length);
5825
5826         if (check_lock_length(locku->lu_offset, locku->lu_length))
5827                  return nfserr_inval;
5828
5829         status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
5830                                         &locku->lu_stateid, NFS4_LOCK_STID,
5831                                         &stp, nn);
5832         if (status)
5833                 goto out;
5834         filp = find_any_file(stp->st_stid.sc_file);
5835         if (!filp) {
5836                 status = nfserr_lock_range;
5837                 goto put_stateid;
5838         }
5839         file_lock = locks_alloc_lock();
5840         if (!file_lock) {
5841                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5842                 status = nfserr_jukebox;
5843                 goto fput;
5844         }
5845
5846         file_lock->fl_type = F_UNLCK;
5847         file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
5848         file_lock->fl_pid = current->tgid;
5849         file_lock->fl_file = filp;
5850         file_lock->fl_flags = FL_POSIX;
5851         file_lock->fl_lmops = &nfsd_posix_mng_ops;
5852         file_lock->fl_start = locku->lu_offset;
5853
5854         file_lock->fl_end = last_byte_offset(locku->lu_offset,
5855                                                 locku->lu_length);
5856         nfs4_transform_lock_offset(file_lock);
5857
5858         err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
5859         if (err) {
5860                 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5861                 goto out_nfserr;
5862         }
5863         nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
5864 fput:
5865         fput(filp);
5866 put_stateid:
5867         mutex_unlock(&stp->st_mutex);
5868         nfs4_put_stid(&stp->st_stid);
5869 out:
5870         nfsd4_bump_seqid(cstate, status);
5871         if (file_lock)
5872                 locks_free_lock(file_lock);
5873         return status;
5874
5875 out_nfserr:
5876         status = nfserrno(err);
5877         goto fput;
5878 }
5879
5880 /*
5881  * returns
5882  *      true:  locks held by lockowner
5883  *      false: no locks held by lockowner
5884  */
5885 static bool
5886 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
5887 {
5888         struct file_lock *fl;
5889         int status = false;
5890         struct file *filp = find_any_file(fp);
5891         struct inode *inode;
5892         struct file_lock_context *flctx;
5893
5894         if (!filp) {
5895                 /* Any valid lock stateid should have some sort of access */
5896                 WARN_ON_ONCE(1);
5897                 return status;
5898         }
5899
5900         inode = file_inode(filp);
5901         flctx = inode->i_flctx;
5902
5903         if (flctx && !list_empty_careful(&flctx->flc_posix)) {
5904                 spin_lock(&flctx->flc_lock);
5905                 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
5906                         if (fl->fl_owner == (fl_owner_t)lowner) {
5907                                 status = true;
5908                                 break;
5909                         }
5910                 }
5911                 spin_unlock(&flctx->flc_lock);
5912         }
5913         fput(filp);
5914         return status;
5915 }
5916
5917 __be32
5918 nfsd4_release_lockowner(struct svc_rqst *rqstp,
5919                         struct nfsd4_compound_state *cstate,
5920                         struct nfsd4_release_lockowner *rlockowner)
5921 {
5922         clientid_t *clid = &rlockowner->rl_clientid;
5923         struct nfs4_stateowner *sop;
5924         struct nfs4_lockowner *lo = NULL;
5925         struct nfs4_ol_stateid *stp;
5926         struct xdr_netobj *owner = &rlockowner->rl_owner;
5927         unsigned int hashval = ownerstr_hashval(owner);
5928         __be32 status;
5929         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5930         struct nfs4_client *clp;
5931         LIST_HEAD (reaplist);
5932
5933         dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
5934                 clid->cl_boot, clid->cl_id);
5935
5936         status = lookup_clientid(clid, cstate, nn);
5937         if (status)
5938                 return status;
5939
5940         clp = cstate->clp;
5941         /* Find the matching lock stateowner */
5942         spin_lock(&clp->cl_lock);
5943         list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
5944                             so_strhash) {
5945
5946                 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
5947                         continue;
5948
5949                 /* see if there are still any locks associated with it */
5950                 lo = lockowner(sop);
5951                 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
5952                         if (check_for_locks(stp->st_stid.sc_file, lo)) {
5953                                 status = nfserr_locks_held;
5954                                 spin_unlock(&clp->cl_lock);
5955                                 return status;
5956                         }
5957                 }
5958
5959                 nfs4_get_stateowner(sop);
5960                 break;
5961         }
5962         if (!lo) {
5963                 spin_unlock(&clp->cl_lock);
5964                 return status;
5965         }
5966
5967         unhash_lockowner_locked(lo);
5968         while (!list_empty(&lo->lo_owner.so_stateids)) {
5969                 stp = list_first_entry(&lo->lo_owner.so_stateids,
5970                                        struct nfs4_ol_stateid,
5971                                        st_perstateowner);
5972                 WARN_ON(!unhash_lock_stateid(stp));
5973                 put_ol_stateid_locked(stp, &reaplist);
5974         }
5975         spin_unlock(&clp->cl_lock);
5976         free_ol_stateid_reaplist(&reaplist);
5977         nfs4_put_stateowner(&lo->lo_owner);
5978
5979         return status;
5980 }
5981
5982 static inline struct nfs4_client_reclaim *
5983 alloc_reclaim(void)
5984 {
5985         return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
5986 }
5987
5988 bool
5989 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
5990 {
5991         struct nfs4_client_reclaim *crp;
5992
5993         crp = nfsd4_find_reclaim_client(name, nn);
5994         return (crp && crp->cr_clp);
5995 }
5996
5997 /*
5998  * failure => all reset bets are off, nfserr_no_grace...
5999  */
6000 struct nfs4_client_reclaim *
6001 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6002 {
6003         unsigned int strhashval;
6004         struct nfs4_client_reclaim *crp;
6005
6006         dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6007         crp = alloc_reclaim();
6008         if (crp) {
6009                 strhashval = clientstr_hashval(name);
6010                 INIT_LIST_HEAD(&crp->cr_strhash);
6011                 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6012                 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6013                 crp->cr_clp = NULL;
6014                 nn->reclaim_str_hashtbl_size++;
6015         }
6016         return crp;
6017 }
6018
6019 void
6020 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6021 {
6022         list_del(&crp->cr_strhash);
6023         kfree(crp);
6024         nn->reclaim_str_hashtbl_size--;
6025 }
6026
6027 void
6028 nfs4_release_reclaim(struct nfsd_net *nn)
6029 {
6030         struct nfs4_client_reclaim *crp = NULL;
6031         int i;
6032
6033         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6034                 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6035                         crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6036                                         struct nfs4_client_reclaim, cr_strhash);
6037                         nfs4_remove_reclaim_record(crp, nn);
6038                 }
6039         }
6040         WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6041 }
6042
6043 /*
6044  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6045 struct nfs4_client_reclaim *
6046 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6047 {
6048         unsigned int strhashval;
6049         struct nfs4_client_reclaim *crp = NULL;
6050
6051         dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6052
6053         strhashval = clientstr_hashval(recdir);
6054         list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6055                 if (same_name(crp->cr_recdir, recdir)) {
6056                         return crp;
6057                 }
6058         }
6059         return NULL;
6060 }
6061
6062 /*
6063 * Called from OPEN. Look for clientid in reclaim list.
6064 */
6065 __be32
6066 nfs4_check_open_reclaim(clientid_t *clid,
6067                 struct nfsd4_compound_state *cstate,
6068                 struct nfsd_net *nn)
6069 {
6070         __be32 status;
6071
6072         /* find clientid in conf_id_hashtbl */
6073         status = lookup_clientid(clid, cstate, nn);
6074         if (status)
6075                 return nfserr_reclaim_bad;
6076
6077         if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6078                 return nfserr_no_grace;
6079
6080         if (nfsd4_client_record_check(cstate->clp))
6081                 return nfserr_reclaim_bad;
6082
6083         return nfs_ok;
6084 }
6085
6086 #ifdef CONFIG_NFSD_FAULT_INJECTION
6087 static inline void
6088 put_client(struct nfs4_client *clp)
6089 {
6090         atomic_dec(&clp->cl_refcount);
6091 }
6092
6093 static struct nfs4_client *
6094 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6095 {
6096         struct nfs4_client *clp;
6097         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6098                                           nfsd_net_id);
6099
6100         if (!nfsd_netns_ready(nn))
6101                 return NULL;
6102
6103         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6104                 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6105                         return clp;
6106         }
6107         return NULL;
6108 }
6109
6110 u64
6111 nfsd_inject_print_clients(void)
6112 {
6113         struct nfs4_client *clp;
6114         u64 count = 0;
6115         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6116                                           nfsd_net_id);
6117         char buf[INET6_ADDRSTRLEN];
6118
6119         if (!nfsd_netns_ready(nn))
6120                 return 0;
6121
6122         spin_lock(&nn->client_lock);
6123         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6124                 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6125                 pr_info("NFS Client: %s\n", buf);
6126                 ++count;
6127         }
6128         spin_unlock(&nn->client_lock);
6129
6130         return count;
6131 }
6132
6133 u64
6134 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6135 {
6136         u64 count = 0;
6137         struct nfs4_client *clp;
6138         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6139                                           nfsd_net_id);
6140
6141         if (!nfsd_netns_ready(nn))
6142                 return count;
6143
6144         spin_lock(&nn->client_lock);
6145         clp = nfsd_find_client(addr, addr_size);
6146         if (clp) {
6147                 if (mark_client_expired_locked(clp) == nfs_ok)
6148                         ++count;
6149                 else
6150                         clp = NULL;
6151         }
6152         spin_unlock(&nn->client_lock);
6153
6154         if (clp)
6155                 expire_client(clp);
6156
6157         return count;
6158 }
6159
6160 u64
6161 nfsd_inject_forget_clients(u64 max)
6162 {
6163         u64 count = 0;
6164         struct nfs4_client *clp, *next;
6165         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6166                                                 nfsd_net_id);
6167         LIST_HEAD(reaplist);
6168
6169         if (!nfsd_netns_ready(nn))
6170                 return count;
6171
6172         spin_lock(&nn->client_lock);
6173         list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6174                 if (mark_client_expired_locked(clp) == nfs_ok) {
6175                         list_add(&clp->cl_lru, &reaplist);
6176                         if (max != 0 && ++count >= max)
6177                                 break;
6178                 }
6179         }
6180         spin_unlock(&nn->client_lock);
6181
6182         list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6183                 expire_client(clp);
6184
6185         return count;
6186 }
6187
6188 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6189                              const char *type)
6190 {
6191         char buf[INET6_ADDRSTRLEN];
6192         rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6193         printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6194 }
6195
6196 static void
6197 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6198                              struct list_head *collect)
6199 {
6200         struct nfs4_client *clp = lst->st_stid.sc_client;
6201         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6202                                           nfsd_net_id);
6203
6204         if (!collect)
6205                 return;
6206
6207         lockdep_assert_held(&nn->client_lock);
6208         atomic_inc(&clp->cl_refcount);
6209         list_add(&lst->st_locks, collect);
6210 }
6211
6212 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6213                                     struct list_head *collect,
6214                                     bool (*func)(struct nfs4_ol_stateid *))
6215 {
6216         struct nfs4_openowner *oop;
6217         struct nfs4_ol_stateid *stp, *st_next;
6218         struct nfs4_ol_stateid *lst, *lst_next;
6219         u64 count = 0;
6220
6221         spin_lock(&clp->cl_lock);
6222         list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6223                 list_for_each_entry_safe(stp, st_next,
6224                                 &oop->oo_owner.so_stateids, st_perstateowner) {
6225                         list_for_each_entry_safe(lst, lst_next,
6226                                         &stp->st_locks, st_locks) {
6227                                 if (func) {
6228                                         if (func(lst))
6229                                                 nfsd_inject_add_lock_to_list(lst,
6230                                                                         collect);
6231                                 }
6232                                 ++count;
6233                                 /*
6234                                  * Despite the fact that these functions deal
6235                                  * with 64-bit integers for "count", we must
6236                                  * ensure that it doesn't blow up the
6237                                  * clp->cl_refcount. Throw a warning if we
6238                                  * start to approach INT_MAX here.
6239                                  */
6240                                 WARN_ON_ONCE(count == (INT_MAX / 2));
6241                                 if (count == max)
6242                                         goto out;
6243                         }
6244                 }
6245         }
6246 out:
6247         spin_unlock(&clp->cl_lock);
6248
6249         return count;
6250 }
6251
6252 static u64
6253 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6254                           u64 max)
6255 {
6256         return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6257 }
6258
6259 static u64
6260 nfsd_print_client_locks(struct nfs4_client *clp)
6261 {
6262         u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6263         nfsd_print_count(clp, count, "locked files");
6264         return count;
6265 }
6266
6267 u64
6268 nfsd_inject_print_locks(void)
6269 {
6270         struct nfs4_client *clp;
6271         u64 count = 0;
6272         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6273                                                 nfsd_net_id);
6274
6275         if (!nfsd_netns_ready(nn))
6276                 return 0;
6277
6278         spin_lock(&nn->client_lock);
6279         list_for_each_entry(clp, &nn->client_lru, cl_lru)
6280                 count += nfsd_print_client_locks(clp);
6281         spin_unlock(&nn->client_lock);
6282
6283         return count;
6284 }
6285
6286 static void
6287 nfsd_reap_locks(struct list_head *reaplist)
6288 {
6289         struct nfs4_client *clp;
6290         struct nfs4_ol_stateid *stp, *next;
6291
6292         list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6293                 list_del_init(&stp->st_locks);
6294                 clp = stp->st_stid.sc_client;
6295                 nfs4_put_stid(&stp->st_stid);
6296                 put_client(clp);
6297         }
6298 }
6299
6300 u64
6301 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6302 {
6303         unsigned int count = 0;
6304         struct nfs4_client *clp;
6305         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6306                                                 nfsd_net_id);
6307         LIST_HEAD(reaplist);
6308
6309         if (!nfsd_netns_ready(nn))
6310                 return count;
6311
6312         spin_lock(&nn->client_lock);
6313         clp = nfsd_find_client(addr, addr_size);
6314         if (clp)
6315                 count = nfsd_collect_client_locks(clp, &reaplist, 0);
6316         spin_unlock(&nn->client_lock);
6317         nfsd_reap_locks(&reaplist);
6318         return count;
6319 }
6320
6321 u64
6322 nfsd_inject_forget_locks(u64 max)
6323 {
6324         u64 count = 0;
6325         struct nfs4_client *clp;
6326         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6327                                                 nfsd_net_id);
6328         LIST_HEAD(reaplist);
6329
6330         if (!nfsd_netns_ready(nn))
6331                 return count;
6332
6333         spin_lock(&nn->client_lock);
6334         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6335                 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6336                 if (max != 0 && count >= max)
6337                         break;
6338         }
6339         spin_unlock(&nn->client_lock);
6340         nfsd_reap_locks(&reaplist);
6341         return count;
6342 }
6343
6344 static u64
6345 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6346                               struct list_head *collect,
6347                               void (*func)(struct nfs4_openowner *))
6348 {
6349         struct nfs4_openowner *oop, *next;
6350         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6351                                                 nfsd_net_id);
6352         u64 count = 0;
6353
6354         lockdep_assert_held(&nn->client_lock);
6355
6356         spin_lock(&clp->cl_lock);
6357         list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6358                 if (func) {
6359                         func(oop);
6360                         if (collect) {
6361                                 atomic_inc(&clp->cl_refcount);
6362                                 list_add(&oop->oo_perclient, collect);
6363                         }
6364                 }
6365                 ++count;
6366                 /*
6367                  * Despite the fact that these functions deal with
6368                  * 64-bit integers for "count", we must ensure that
6369                  * it doesn't blow up the clp->cl_refcount. Throw a
6370                  * warning if we start to approach INT_MAX here.
6371                  */
6372                 WARN_ON_ONCE(count == (INT_MAX / 2));
6373                 if (count == max)
6374                         break;
6375         }
6376         spin_unlock(&clp->cl_lock);
6377
6378         return count;
6379 }
6380
6381 static u64
6382 nfsd_print_client_openowners(struct nfs4_client *clp)
6383 {
6384         u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6385
6386         nfsd_print_count(clp, count, "openowners");
6387         return count;
6388 }
6389
6390 static u64
6391 nfsd_collect_client_openowners(struct nfs4_client *clp,
6392                                struct list_head *collect, u64 max)
6393 {
6394         return nfsd_foreach_client_openowner(clp, max, collect,
6395                                                 unhash_openowner_locked);
6396 }
6397
6398 u64
6399 nfsd_inject_print_openowners(void)
6400 {
6401         struct nfs4_client *clp;
6402         u64 count = 0;
6403         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6404                                                 nfsd_net_id);
6405
6406         if (!nfsd_netns_ready(nn))
6407                 return 0;
6408
6409         spin_lock(&nn->client_lock);
6410         list_for_each_entry(clp, &nn->client_lru, cl_lru)
6411                 count += nfsd_print_client_openowners(clp);
6412         spin_unlock(&nn->client_lock);
6413
6414         return count;
6415 }
6416
6417 static void
6418 nfsd_reap_openowners(struct list_head *reaplist)
6419 {
6420         struct nfs4_client *clp;
6421         struct nfs4_openowner *oop, *next;
6422
6423         list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6424                 list_del_init(&oop->oo_perclient);
6425                 clp = oop->oo_owner.so_client;
6426                 release_openowner(oop);
6427                 put_client(clp);
6428         }
6429 }
6430
6431 u64
6432 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6433                                      size_t addr_size)
6434 {
6435         unsigned int count = 0;
6436         struct nfs4_client *clp;
6437         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6438                                                 nfsd_net_id);
6439         LIST_HEAD(reaplist);
6440
6441         if (!nfsd_netns_ready(nn))
6442                 return count;
6443
6444         spin_lock(&nn->client_lock);
6445         clp = nfsd_find_client(addr, addr_size);
6446         if (clp)
6447                 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6448         spin_unlock(&nn->client_lock);
6449         nfsd_reap_openowners(&reaplist);
6450         return count;
6451 }
6452
6453 u64
6454 nfsd_inject_forget_openowners(u64 max)
6455 {
6456         u64 count = 0;
6457         struct nfs4_client *clp;
6458         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6459                                                 nfsd_net_id);
6460         LIST_HEAD(reaplist);
6461
6462         if (!nfsd_netns_ready(nn))
6463                 return count;
6464
6465         spin_lock(&nn->client_lock);
6466         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6467                 count += nfsd_collect_client_openowners(clp, &reaplist,
6468                                                         max - count);
6469                 if (max != 0 && count >= max)
6470                         break;
6471         }
6472         spin_unlock(&nn->client_lock);
6473         nfsd_reap_openowners(&reaplist);
6474         return count;
6475 }
6476
6477 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6478                                      struct list_head *victims)
6479 {
6480         struct nfs4_delegation *dp, *next;
6481         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6482                                                 nfsd_net_id);
6483         u64 count = 0;
6484
6485         lockdep_assert_held(&nn->client_lock);
6486
6487         spin_lock(&state_lock);
6488         list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6489                 if (victims) {
6490                         /*
6491                          * It's not safe to mess with delegations that have a
6492                          * non-zero dl_time. They might have already been broken
6493                          * and could be processed by the laundromat outside of
6494                          * the state_lock. Just leave them be.
6495                          */
6496                         if (dp->dl_time != 0)
6497                                 continue;
6498
6499                         atomic_inc(&clp->cl_refcount);
6500                         WARN_ON(!unhash_delegation_locked(dp));
6501                         list_add(&dp->dl_recall_lru, victims);
6502                 }
6503                 ++count;
6504                 /*
6505                  * Despite the fact that these functions deal with
6506                  * 64-bit integers for "count", we must ensure that
6507                  * it doesn't blow up the clp->cl_refcount. Throw a
6508                  * warning if we start to approach INT_MAX here.
6509                  */
6510                 WARN_ON_ONCE(count == (INT_MAX / 2));
6511                 if (count == max)
6512                         break;
6513         }
6514         spin_unlock(&state_lock);
6515         return count;
6516 }
6517
6518 static u64
6519 nfsd_print_client_delegations(struct nfs4_client *clp)
6520 {
6521         u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6522
6523         nfsd_print_count(clp, count, "delegations");
6524         return count;
6525 }
6526
6527 u64
6528 nfsd_inject_print_delegations(void)
6529 {
6530         struct nfs4_client *clp;
6531         u64 count = 0;
6532         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6533                                                 nfsd_net_id);
6534
6535         if (!nfsd_netns_ready(nn))
6536                 return 0;
6537
6538         spin_lock(&nn->client_lock);
6539         list_for_each_entry(clp, &nn->client_lru, cl_lru)
6540                 count += nfsd_print_client_delegations(clp);
6541         spin_unlock(&nn->client_lock);
6542
6543         return count;
6544 }
6545
6546 static void
6547 nfsd_forget_delegations(struct list_head *reaplist)
6548 {
6549         struct nfs4_client *clp;
6550         struct nfs4_delegation *dp, *next;
6551
6552         list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6553                 list_del_init(&dp->dl_recall_lru);
6554                 clp = dp->dl_stid.sc_client;
6555                 revoke_delegation(dp);
6556                 put_client(clp);
6557         }
6558 }
6559
6560 u64
6561 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6562                                       size_t addr_size)
6563 {
6564         u64 count = 0;
6565         struct nfs4_client *clp;
6566         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6567                                                 nfsd_net_id);
6568         LIST_HEAD(reaplist);
6569
6570         if (!nfsd_netns_ready(nn))
6571                 return count;
6572
6573         spin_lock(&nn->client_lock);
6574         clp = nfsd_find_client(addr, addr_size);
6575         if (clp)
6576                 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6577         spin_unlock(&nn->client_lock);
6578
6579         nfsd_forget_delegations(&reaplist);
6580         return count;
6581 }
6582
6583 u64
6584 nfsd_inject_forget_delegations(u64 max)
6585 {
6586         u64 count = 0;
6587         struct nfs4_client *clp;
6588         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6589                                                 nfsd_net_id);
6590         LIST_HEAD(reaplist);
6591
6592         if (!nfsd_netns_ready(nn))
6593                 return count;
6594
6595         spin_lock(&nn->client_lock);
6596         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6597                 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6598                 if (max != 0 && count >= max)
6599                         break;
6600         }
6601         spin_unlock(&nn->client_lock);
6602         nfsd_forget_delegations(&reaplist);
6603         return count;
6604 }
6605
6606 static void
6607 nfsd_recall_delegations(struct list_head *reaplist)
6608 {
6609         struct nfs4_client *clp;
6610         struct nfs4_delegation *dp, *next;
6611
6612         list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6613                 list_del_init(&dp->dl_recall_lru);
6614                 clp = dp->dl_stid.sc_client;
6615                 /*
6616                  * We skipped all entries that had a zero dl_time before,
6617                  * so we can now reset the dl_time back to 0. If a delegation
6618                  * break comes in now, then it won't make any difference since
6619                  * we're recalling it either way.
6620                  */
6621                 spin_lock(&state_lock);
6622                 dp->dl_time = 0;
6623                 spin_unlock(&state_lock);
6624                 nfsd_break_one_deleg(dp);
6625                 put_client(clp);
6626         }
6627 }
6628
6629 u64
6630 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
6631                                       size_t addr_size)
6632 {
6633         u64 count = 0;
6634         struct nfs4_client *clp;
6635         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6636                                                 nfsd_net_id);
6637         LIST_HEAD(reaplist);
6638
6639         if (!nfsd_netns_ready(nn))
6640                 return count;
6641
6642         spin_lock(&nn->client_lock);
6643         clp = nfsd_find_client(addr, addr_size);
6644         if (clp)
6645                 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6646         spin_unlock(&nn->client_lock);
6647
6648         nfsd_recall_delegations(&reaplist);
6649         return count;
6650 }
6651
6652 u64
6653 nfsd_inject_recall_delegations(u64 max)
6654 {
6655         u64 count = 0;
6656         struct nfs4_client *clp, *next;
6657         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6658                                                 nfsd_net_id);
6659         LIST_HEAD(reaplist);
6660
6661         if (!nfsd_netns_ready(nn))
6662                 return count;
6663
6664         spin_lock(&nn->client_lock);
6665         list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6666                 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6667                 if (max != 0 && ++count >= max)
6668                         break;
6669         }
6670         spin_unlock(&nn->client_lock);
6671         nfsd_recall_delegations(&reaplist);
6672         return count;
6673 }
6674 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6675
6676 /*
6677  * Since the lifetime of a delegation isn't limited to that of an open, a
6678  * client may quite reasonably hang on to a delegation as long as it has
6679  * the inode cached.  This becomes an obvious problem the first time a
6680  * client's inode cache approaches the size of the server's total memory.
6681  *
6682  * For now we avoid this problem by imposing a hard limit on the number
6683  * of delegations, which varies according to the server's memory size.
6684  */
6685 static void
6686 set_max_delegations(void)
6687 {
6688         /*
6689          * Allow at most 4 delegations per megabyte of RAM.  Quick
6690          * estimates suggest that in the worst case (where every delegation
6691          * is for a different inode), a delegation could take about 1.5K,
6692          * giving a worst case usage of about 6% of memory.
6693          */
6694         max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
6695 }
6696
6697 static int nfs4_state_create_net(struct net *net)
6698 {
6699         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6700         int i;
6701
6702         nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6703                         CLIENT_HASH_SIZE, GFP_KERNEL);
6704         if (!nn->conf_id_hashtbl)
6705                 goto err;
6706         nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6707                         CLIENT_HASH_SIZE, GFP_KERNEL);
6708         if (!nn->unconf_id_hashtbl)
6709                 goto err_unconf_id;
6710         nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
6711                         SESSION_HASH_SIZE, GFP_KERNEL);
6712         if (!nn->sessionid_hashtbl)
6713                 goto err_sessionid;
6714
6715         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6716                 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
6717                 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
6718         }
6719         for (i = 0; i < SESSION_HASH_SIZE; i++)
6720                 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
6721         nn->conf_name_tree = RB_ROOT;
6722         nn->unconf_name_tree = RB_ROOT;
6723         INIT_LIST_HEAD(&nn->client_lru);
6724         INIT_LIST_HEAD(&nn->close_lru);
6725         INIT_LIST_HEAD(&nn->del_recall_lru);
6726         spin_lock_init(&nn->client_lock);
6727
6728         INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
6729         get_net(net);
6730
6731         return 0;
6732
6733 err_sessionid:
6734         kfree(nn->unconf_id_hashtbl);
6735 err_unconf_id:
6736         kfree(nn->conf_id_hashtbl);
6737 err:
6738         return -ENOMEM;
6739 }
6740
6741 static void
6742 nfs4_state_destroy_net(struct net *net)
6743 {
6744         int i;
6745         struct nfs4_client *clp = NULL;
6746         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6747
6748         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6749                 while (!list_empty(&nn->conf_id_hashtbl[i])) {
6750                         clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6751                         destroy_client(clp);
6752                 }
6753         }
6754
6755         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6756                 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
6757                         clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6758                         destroy_client(clp);
6759                 }
6760         }
6761
6762         kfree(nn->sessionid_hashtbl);
6763         kfree(nn->unconf_id_hashtbl);
6764         kfree(nn->conf_id_hashtbl);
6765         put_net(net);
6766 }
6767
6768 int
6769 nfs4_state_start_net(struct net *net)
6770 {
6771         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6772         int ret;
6773
6774         ret = nfs4_state_create_net(net);
6775         if (ret)
6776                 return ret;
6777         nn->boot_time = get_seconds();
6778         nn->grace_ended = false;
6779         nn->nfsd4_manager.block_opens = true;
6780         locks_start_grace(net, &nn->nfsd4_manager);
6781         nfsd4_client_tracking_init(net);
6782         printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
6783                nn->nfsd4_grace, net);
6784         queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
6785         return 0;
6786 }
6787
6788 /* initialization to perform when the nfsd service is started: */
6789
6790 int
6791 nfs4_state_start(void)
6792 {
6793         int ret;
6794
6795         ret = set_callback_cred();
6796         if (ret)
6797                 return -ENOMEM;
6798         laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
6799         if (laundry_wq == NULL) {
6800                 ret = -ENOMEM;
6801                 goto out_recovery;
6802         }
6803         ret = nfsd4_create_callback_queue();
6804         if (ret)
6805                 goto out_free_laundry;
6806
6807         set_max_delegations();
6808
6809         return 0;
6810
6811 out_free_laundry:
6812         destroy_workqueue(laundry_wq);
6813 out_recovery:
6814         return ret;
6815 }
6816
6817 void
6818 nfs4_state_shutdown_net(struct net *net)
6819 {
6820         struct nfs4_delegation *dp = NULL;
6821         struct list_head *pos, *next, reaplist;
6822         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6823
6824         cancel_delayed_work_sync(&nn->laundromat_work);
6825         locks_end_grace(&nn->nfsd4_manager);
6826
6827         INIT_LIST_HEAD(&reaplist);
6828         spin_lock(&state_lock);
6829         list_for_each_safe(pos, next, &nn->del_recall_lru) {
6830                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6831                 WARN_ON(!unhash_delegation_locked(dp));
6832                 list_add(&dp->dl_recall_lru, &reaplist);
6833         }
6834         spin_unlock(&state_lock);
6835         list_for_each_safe(pos, next, &reaplist) {
6836                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6837                 list_del_init(&dp->dl_recall_lru);
6838                 put_clnt_odstate(dp->dl_clnt_odstate);
6839                 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
6840                 nfs4_put_stid(&dp->dl_stid);
6841         }
6842
6843         nfsd4_client_tracking_exit(net);
6844         nfs4_state_destroy_net(net);
6845 }
6846
6847 void
6848 nfs4_state_shutdown(void)
6849 {
6850         destroy_workqueue(laundry_wq);
6851         nfsd4_destroy_callback_queue();
6852 }
6853
6854 static void
6855 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6856 {
6857         if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
6858                 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
6859 }
6860
6861 static void
6862 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6863 {
6864         if (cstate->minorversion) {
6865                 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
6866                 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6867         }
6868 }
6869
6870 void
6871 clear_current_stateid(struct nfsd4_compound_state *cstate)
6872 {
6873         CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6874 }
6875
6876 /*
6877  * functions to set current state id
6878  */
6879 void
6880 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6881 {
6882         put_stateid(cstate, &odp->od_stateid);
6883 }
6884
6885 void
6886 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
6887 {
6888         put_stateid(cstate, &open->op_stateid);
6889 }
6890
6891 void
6892 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6893 {
6894         put_stateid(cstate, &close->cl_stateid);
6895 }
6896
6897 void
6898 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
6899 {
6900         put_stateid(cstate, &lock->lk_resp_stateid);
6901 }
6902
6903 /*
6904  * functions to consume current state id
6905  */
6906
6907 void
6908 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6909 {
6910         get_stateid(cstate, &odp->od_stateid);
6911 }
6912
6913 void
6914 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
6915 {
6916         get_stateid(cstate, &drp->dr_stateid);
6917 }
6918
6919 void
6920 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
6921 {
6922         get_stateid(cstate, &fsp->fr_stateid);
6923 }
6924
6925 void
6926 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
6927 {
6928         get_stateid(cstate, &setattr->sa_stateid);
6929 }
6930
6931 void
6932 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6933 {
6934         get_stateid(cstate, &close->cl_stateid);
6935 }
6936
6937 void
6938 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
6939 {
6940         get_stateid(cstate, &locku->lu_stateid);
6941 }
6942
6943 void
6944 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
6945 {
6946         get_stateid(cstate, &read->rd_stateid);
6947 }
6948
6949 void
6950 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
6951 {
6952         get_stateid(cstate, &write->wr_stateid);
6953 }