4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/include/lustre/lustre_idl.h
38 * Lustre wire protocol definitions.
41 /** \defgroup lustreidl lustreidl
43 * Lustre wire protocol definitions.
45 * ALL structs passing over the wire should be declared here. Structs
46 * that are used in interfaces with userspace should go in lustre_user.h.
48 * All structs being declared here should be built from simple fixed-size
49 * types (__u8, __u16, __u32, __u64) or be built from other types or
50 * structs also declared in this file. Similarly, all flags and magic
51 * values in those structs should also be declared here. This ensures
52 * that the Lustre wire protocol is not influenced by external dependencies.
54 * The only other acceptable items in this file are VERY SIMPLE accessor
55 * functions to avoid callers grubbing inside the structures, and the
56 * prototypes of the swabber functions for each struct. Nothing that
57 * depends on external functions or definitions should be in here.
59 * Structs must be properly aligned to put 64-bit values on an 8-byte
60 * boundary. Any structs being added here must also be added to
61 * utils/wirecheck.c and "make newwiretest" run to regenerate the
62 * utils/wiretest.c sources. This allows us to verify that wire structs
63 * have the proper alignment/size on all architectures.
65 * DO NOT CHANGE any of the structs, flags, values declared here and used
66 * in released Lustre versions. Some structs may have padding fields that
67 * can be used. Some structs might allow addition at the end (verify this
68 * in the code to ensure that new/old clients that see this larger struct
69 * do not fail, otherwise you need to implement protocol compatibility).
71 * We assume all nodes are either little-endian or big-endian, and we
72 * always send messages in the sender's native format. The receiver
73 * detects the message format by checking the 'magic' field of the message
74 * (see lustre_msg_swabbed() below).
76 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
77 * implemented either here, inline (trivial implementations) or in
78 * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
79 * endian, in-place in the message buffer.
81 * A swabber takes a single pointer argument. The caller must already have
82 * verified that the length of the message buffer >= sizeof (type).
84 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
85 * may be defined that swabs just the variable part, after the caller has
86 * verified that the message buffer is large enough.
91 #ifndef _LUSTRE_IDL_H_
92 #define _LUSTRE_IDL_H_
94 #include "../../../include/linux/libcfs/libcfs.h"
96 /* Defn's shared with user-space. */
97 #include "lustre_user.h"
98 #include "lustre_errno.h"
103 /* FOO_REQUEST_PORTAL is for incoming requests on the FOO
104 * FOO_REPLY_PORTAL is for incoming replies on the FOO
105 * FOO_BULK_PORTAL is for incoming bulk on the FOO
108 /* Lustre service names are following the format
109 * service name + MDT + seq name
111 #define LUSTRE_MDT_MAXNAMELEN 80
113 #define CONNMGR_REQUEST_PORTAL 1
114 #define CONNMGR_REPLY_PORTAL 2
115 //#define OSC_REQUEST_PORTAL 3
116 #define OSC_REPLY_PORTAL 4
117 //#define OSC_BULK_PORTAL 5
118 #define OST_IO_PORTAL 6
119 #define OST_CREATE_PORTAL 7
120 #define OST_BULK_PORTAL 8
121 //#define MDC_REQUEST_PORTAL 9
122 #define MDC_REPLY_PORTAL 10
123 //#define MDC_BULK_PORTAL 11
124 #define MDS_REQUEST_PORTAL 12
125 //#define MDS_REPLY_PORTAL 13
126 #define MDS_BULK_PORTAL 14
127 #define LDLM_CB_REQUEST_PORTAL 15
128 #define LDLM_CB_REPLY_PORTAL 16
129 #define LDLM_CANCEL_REQUEST_PORTAL 17
130 #define LDLM_CANCEL_REPLY_PORTAL 18
131 //#define PTLBD_REQUEST_PORTAL 19
132 //#define PTLBD_REPLY_PORTAL 20
133 //#define PTLBD_BULK_PORTAL 21
134 #define MDS_SETATTR_PORTAL 22
135 #define MDS_READPAGE_PORTAL 23
136 #define OUT_PORTAL 24
138 #define MGC_REPLY_PORTAL 25
139 #define MGS_REQUEST_PORTAL 26
140 #define MGS_REPLY_PORTAL 27
141 #define OST_REQUEST_PORTAL 28
142 #define FLD_REQUEST_PORTAL 29
143 #define SEQ_METADATA_PORTAL 30
144 #define SEQ_DATA_PORTAL 31
145 #define SEQ_CONTROLLER_PORTAL 32
146 #define MGS_BULK_PORTAL 33
148 /* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
151 #define PTL_RPC_MSG_REQUEST 4711
152 #define PTL_RPC_MSG_ERR 4712
153 #define PTL_RPC_MSG_REPLY 4713
155 /* DON'T use swabbed values of MAGIC as magic! */
156 #define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
157 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
159 #define LUSTRE_MSG_MAGIC_V1_SWABBED 0xD00BD00B
160 #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
162 #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
164 #define PTLRPC_MSG_VERSION 0x00000003
165 #define LUSTRE_VERSION_MASK 0xffff0000
166 #define LUSTRE_OBD_VERSION 0x00010000
167 #define LUSTRE_MDS_VERSION 0x00020000
168 #define LUSTRE_OST_VERSION 0x00030000
169 #define LUSTRE_DLM_VERSION 0x00040000
170 #define LUSTRE_LOG_VERSION 0x00050000
171 #define LUSTRE_MGS_VERSION 0x00060000
174 * Describes a range of sequence, lsr_start is included but lsr_end is
176 * Same structure is used in fld module where lsr_index field holds mdt id
179 struct lu_seq_range {
186 #define LU_SEQ_RANGE_MDT 0x0
187 #define LU_SEQ_RANGE_OST 0x1
188 #define LU_SEQ_RANGE_ANY 0x3
190 #define LU_SEQ_RANGE_MASK 0x3
192 static inline unsigned fld_range_type(const struct lu_seq_range *range)
194 return range->lsr_flags & LU_SEQ_RANGE_MASK;
197 static inline int fld_range_is_ost(const struct lu_seq_range *range)
199 return fld_range_type(range) == LU_SEQ_RANGE_OST;
202 static inline int fld_range_is_mdt(const struct lu_seq_range *range)
204 return fld_range_type(range) == LU_SEQ_RANGE_MDT;
208 * This all range is only being used when fld client sends fld query request,
209 * but it does not know whether the seq is MDT or OST, so it will send req
210 * with ALL type, which means either seq type gotten from lookup can be
213 static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
215 return fld_range_type(range) == LU_SEQ_RANGE_ANY;
218 static inline void fld_range_set_type(struct lu_seq_range *range,
221 range->lsr_flags |= flags;
224 static inline void fld_range_set_mdt(struct lu_seq_range *range)
226 fld_range_set_type(range, LU_SEQ_RANGE_MDT);
229 static inline void fld_range_set_ost(struct lu_seq_range *range)
231 fld_range_set_type(range, LU_SEQ_RANGE_OST);
234 static inline void fld_range_set_any(struct lu_seq_range *range)
236 fld_range_set_type(range, LU_SEQ_RANGE_ANY);
240 * returns width of given range \a r
243 static inline __u64 range_space(const struct lu_seq_range *range)
245 return range->lsr_end - range->lsr_start;
249 * initialize range to zero
252 static inline void range_init(struct lu_seq_range *range)
254 memset(range, 0, sizeof(*range));
258 * check if given seq id \a s is within given range \a r
261 static inline int range_within(const struct lu_seq_range *range,
264 return s >= range->lsr_start && s < range->lsr_end;
267 static inline int range_is_sane(const struct lu_seq_range *range)
269 return (range->lsr_end >= range->lsr_start);
272 static inline int range_is_zero(const struct lu_seq_range *range)
274 return (range->lsr_start == 0 && range->lsr_end == 0);
277 static inline int range_is_exhausted(const struct lu_seq_range *range)
280 return range_space(range) == 0;
283 /* return 0 if two range have the same location */
284 static inline int range_compare_loc(const struct lu_seq_range *r1,
285 const struct lu_seq_range *r2)
287 return r1->lsr_index != r2->lsr_index ||
288 r1->lsr_flags != r2->lsr_flags;
291 #define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
293 #define PRANGE(range) \
294 (range)->lsr_start, \
296 (range)->lsr_index, \
297 fld_range_is_mdt(range) ? "mdt" : "ost"
300 /** \defgroup lu_fid lu_fid
304 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
305 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
309 LMAC_HSM = 0x00000001,
310 LMAC_SOM = 0x00000002,
311 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
312 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
313 * under /O/<seq>/d<x>. */
317 * Masks for all features that should be supported by a Lustre version to
318 * access a specific file.
319 * This information is stored in lustre_mdt_attrs::lma_incompat.
322 LMAI_RELEASED = 0x00000001, /* file is released */
323 LMAI_AGENT = 0x00000002, /* agent inode */
324 LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
325 is on the remote MDT */
327 #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
333 /** LASTID file has zero OID */
334 LUSTRE_FID_LASTID_OID = 0UL,
335 /** initial fid id value */
336 LUSTRE_FID_INIT_OID = 1UL
339 /** returns fid object sequence */
340 static inline __u64 fid_seq(const struct lu_fid *fid)
345 /** returns fid object id */
346 static inline __u32 fid_oid(const struct lu_fid *fid)
351 /** returns fid object version */
352 static inline __u32 fid_ver(const struct lu_fid *fid)
357 static inline void fid_zero(struct lu_fid *fid)
359 memset(fid, 0, sizeof(*fid));
362 static inline __u64 fid_ver_oid(const struct lu_fid *fid)
364 return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
368 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
369 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
370 * used for other purposes and not risk collisions with existing inodes.
372 * Different FID Format
373 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
376 FID_SEQ_OST_MDT0 = 0,
377 FID_SEQ_LLOG = 1, /* unnamed llogs */
379 FID_SEQ_OST_MDT1 = 3,
380 FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
381 FID_SEQ_LLOG_NAME = 10, /* named llogs */
384 FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
385 FID_SEQ_IDIF = 0x100000000ULL,
386 FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
387 /* Normal FID sequence starts from this value, i.e. 1<<33 */
388 FID_SEQ_START = 0x200000000ULL,
389 /* sequence for local pre-defined FIDs listed in local_oid */
390 FID_SEQ_LOCAL_FILE = 0x200000001ULL,
391 FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
392 /* sequence is used for local named objects FIDs generated
393 * by local_object_storage library */
394 FID_SEQ_LOCAL_NAME = 0x200000003ULL,
395 /* Because current FLD will only cache the fid sequence, instead
396 * of oid on the client side, if the FID needs to be exposed to
397 * clients sides, it needs to make sure all of fids under one
398 * sequence will be located in one MDT. */
399 FID_SEQ_SPECIAL = 0x200000004ULL,
400 FID_SEQ_QUOTA = 0x200000005ULL,
401 FID_SEQ_QUOTA_GLB = 0x200000006ULL,
402 FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
403 FID_SEQ_NORMAL = 0x200000400ULL,
404 FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
407 #define OBIF_OID_MAX_BITS 32
408 #define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
409 #define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
410 #define IDIF_OID_MAX_BITS 48
411 #define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
412 #define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
414 /** OID for FID_SEQ_SPECIAL */
416 /* Big Filesystem Lock to serialize rename operations */
417 FID_OID_SPECIAL_BFL = 1UL,
420 /** OID for FID_SEQ_DOT_LUSTRE */
421 enum dot_lustre_oid {
422 FID_OID_DOT_LUSTRE = 1UL,
423 FID_OID_DOT_LUSTRE_OBF = 2UL,
426 static inline int fid_seq_is_mdt0(__u64 seq)
428 return (seq == FID_SEQ_OST_MDT0);
431 static inline int fid_seq_is_mdt(const __u64 seq)
433 return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
436 static inline int fid_seq_is_echo(__u64 seq)
438 return (seq == FID_SEQ_ECHO);
441 static inline int fid_is_echo(const struct lu_fid *fid)
443 return fid_seq_is_echo(fid_seq(fid));
446 static inline int fid_seq_is_llog(__u64 seq)
448 return (seq == FID_SEQ_LLOG);
451 static inline int fid_is_llog(const struct lu_fid *fid)
453 /* file with OID == 0 is not llog but contains last oid */
454 return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
457 static inline int fid_seq_is_rsvd(const __u64 seq)
459 return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
462 static inline int fid_seq_is_special(const __u64 seq)
464 return seq == FID_SEQ_SPECIAL;
467 static inline int fid_seq_is_local_file(const __u64 seq)
469 return seq == FID_SEQ_LOCAL_FILE ||
470 seq == FID_SEQ_LOCAL_NAME;
473 static inline int fid_seq_is_root(const __u64 seq)
475 return seq == FID_SEQ_ROOT;
478 static inline int fid_seq_is_dot(const __u64 seq)
480 return seq == FID_SEQ_DOT_LUSTRE;
483 static inline int fid_seq_is_default(const __u64 seq)
485 return seq == FID_SEQ_LOV_DEFAULT;
488 static inline int fid_is_mdt0(const struct lu_fid *fid)
490 return fid_seq_is_mdt0(fid_seq(fid));
493 static inline void lu_root_fid(struct lu_fid *fid)
495 fid->f_seq = FID_SEQ_ROOT;
501 * Check if a fid is igif or not.
502 * \param fid the fid to be tested.
503 * \return true if the fid is a igif; otherwise false.
505 static inline int fid_seq_is_igif(const __u64 seq)
507 return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
510 static inline int fid_is_igif(const struct lu_fid *fid)
512 return fid_seq_is_igif(fid_seq(fid));
516 * Check if a fid is idif or not.
517 * \param fid the fid to be tested.
518 * \return true if the fid is a idif; otherwise false.
520 static inline int fid_seq_is_idif(const __u64 seq)
522 return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
525 static inline int fid_is_idif(const struct lu_fid *fid)
527 return fid_seq_is_idif(fid_seq(fid));
530 static inline int fid_is_local_file(const struct lu_fid *fid)
532 return fid_seq_is_local_file(fid_seq(fid));
535 static inline int fid_seq_is_norm(const __u64 seq)
537 return (seq >= FID_SEQ_NORMAL);
540 static inline int fid_is_norm(const struct lu_fid *fid)
542 return fid_seq_is_norm(fid_seq(fid));
545 /* convert an OST objid into an IDIF FID SEQ number */
546 static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
548 return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
551 /* convert a packed IDIF FID into an OST objid */
552 static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
554 return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
557 /* extract ost index from IDIF FID */
558 static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
560 return (fid_seq(fid) >> 16) & 0xffff;
563 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
564 static inline __u64 ostid_seq(const struct ost_id *ostid)
566 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
567 return FID_SEQ_OST_MDT0;
569 if (fid_seq_is_default(ostid->oi.oi_seq))
570 return FID_SEQ_LOV_DEFAULT;
572 if (fid_is_idif(&ostid->oi_fid))
573 return FID_SEQ_OST_MDT0;
575 return fid_seq(&ostid->oi_fid);
578 /* extract OST objid from a wire ost_id (id/seq) pair */
579 static inline __u64 ostid_id(const struct ost_id *ostid)
581 if (fid_seq_is_mdt0(ostid_seq(ostid)))
582 return ostid->oi.oi_id & IDIF_OID_MASK;
584 if (fid_is_idif(&ostid->oi_fid))
585 return fid_idif_id(fid_seq(&ostid->oi_fid),
586 fid_oid(&ostid->oi_fid), 0);
588 return fid_oid(&ostid->oi_fid);
591 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
593 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
596 oi->oi_fid.f_seq = seq;
597 /* Note: if f_oid + f_ver is zero, we need init it
598 * to be 1, otherwise, ostid_seq will treat this
599 * as old ostid (oi_seq == 0) */
600 if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
601 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
605 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
607 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
610 static inline void ostid_set_seq_echo(struct ost_id *oi)
612 ostid_set_seq(oi, FID_SEQ_ECHO);
615 static inline void ostid_set_seq_llog(struct ost_id *oi)
617 ostid_set_seq(oi, FID_SEQ_LLOG);
621 * Note: we need check oi_seq to decide where to set oi_id,
622 * so oi_seq should always be set ahead of oi_id.
624 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
626 if (fid_seq_is_mdt0(ostid_seq(oi))) {
627 if (oid >= IDIF_MAX_OID) {
628 CERROR("Bad %llu to set "DOSTID"\n",
634 if (oid > OBIF_MAX_OID) {
635 CERROR("Bad %llu to set "DOSTID"\n",
639 oi->oi_fid.f_oid = oid;
643 static inline void ostid_inc_id(struct ost_id *oi)
645 if (fid_seq_is_mdt0(ostid_seq(oi))) {
646 if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
647 CERROR("Bad inc "DOSTID"\n", POSTID(oi));
656 static inline void ostid_dec_id(struct ost_id *oi)
658 if (fid_seq_is_mdt0(ostid_seq(oi)))
665 * Unpack an OST object id/seq (group) into a FID. This is needed for
666 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
667 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
668 * be passed through unchanged. Only legacy OST objects in "group 0"
669 * will be mapped into the IDIF namespace so that they can fit into the
670 * struct lu_fid fields without loss. For reference see:
671 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
673 static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
676 if (ost_idx > 0xffff) {
677 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
682 if (fid_seq_is_mdt0(ostid_seq(ostid))) {
683 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
684 * that we map into the IDIF namespace. It allows up to 2^48
685 * objects per OST, as this is the object namespace that has
686 * been in production for years. This can handle create rates
687 * of 1M objects/s/OST for 9 years, or combinations thereof. */
688 if (ostid_id(ostid) >= IDIF_MAX_OID) {
689 CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
690 POSTID(ostid), ost_idx);
693 fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
694 /* truncate to 32 bits by assignment */
695 fid->f_oid = ostid_id(ostid);
696 /* in theory, not currently used */
697 fid->f_ver = ostid_id(ostid) >> 48;
698 } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
699 /* This is either an IDIF object, which identifies objects across
700 * all OSTs, or a regular FID. The IDIF namespace maps legacy
701 * OST objects into the FID namespace. In both cases, we just
702 * pass the FID through, no conversion needed. */
703 if (ostid->oi_fid.f_ver != 0) {
704 CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
705 POSTID(ostid), ost_idx);
708 *fid = ostid->oi_fid;
714 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
715 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
717 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
718 CERROR("bad IGIF, "DFID"\n", PFID(fid));
722 if (fid_is_idif(fid)) {
723 ostid_set_seq_mdt0(ostid);
724 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
727 ostid->oi_fid = *fid;
733 /* Check whether the fid is for LAST_ID */
734 static inline int fid_is_last_id(const struct lu_fid *fid)
736 return (fid_oid(fid) == 0);
740 * Get inode number from a igif.
741 * \param fid a igif to get inode number from.
742 * \return inode number for the igif.
744 static inline ino_t lu_igif_ino(const struct lu_fid *fid)
749 extern void lustre_swab_ost_id(struct ost_id *oid);
752 * Get inode generation from a igif.
753 * \param fid a igif to get inode generation from.
754 * \return inode generation for the igif.
756 static inline __u32 lu_igif_gen(const struct lu_fid *fid)
762 * Build igif from the inode number/generation.
764 static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
772 * Fids are transmitted across network (in the sender byte-ordering),
773 * and stored on disk in big-endian order.
775 static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
777 dst->f_seq = cpu_to_le64(fid_seq(src));
778 dst->f_oid = cpu_to_le32(fid_oid(src));
779 dst->f_ver = cpu_to_le32(fid_ver(src));
782 static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
784 dst->f_seq = le64_to_cpu(fid_seq(src));
785 dst->f_oid = le32_to_cpu(fid_oid(src));
786 dst->f_ver = le32_to_cpu(fid_ver(src));
789 static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
791 dst->f_seq = cpu_to_be64(fid_seq(src));
792 dst->f_oid = cpu_to_be32(fid_oid(src));
793 dst->f_ver = cpu_to_be32(fid_ver(src));
796 static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
798 dst->f_seq = be64_to_cpu(fid_seq(src));
799 dst->f_oid = be32_to_cpu(fid_oid(src));
800 dst->f_ver = be32_to_cpu(fid_ver(src));
803 static inline int fid_is_sane(const struct lu_fid *fid)
805 return fid != NULL &&
806 ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
807 fid_is_igif(fid) || fid_is_idif(fid) ||
808 fid_seq_is_rsvd(fid_seq(fid)));
811 static inline int fid_is_zero(const struct lu_fid *fid)
813 return fid_seq(fid) == 0 && fid_oid(fid) == 0;
816 extern void lustre_swab_lu_fid(struct lu_fid *fid);
817 extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
819 static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
821 return memcmp(f0, f1, sizeof(*f0)) == 0;
824 #define __diff_normalize(val0, val1) \
826 typeof(val0) __val0 = (val0); \
827 typeof(val1) __val1 = (val1); \
829 (__val0 == __val1 ? 0 : __val0 > __val1 ? +1 : -1); \
832 static inline int lu_fid_cmp(const struct lu_fid *f0,
833 const struct lu_fid *f1)
836 __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
837 __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
838 __diff_normalize(fid_ver(f0), fid_ver(f1));
841 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
842 struct ost_id *dst_oi)
844 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
845 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
846 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
848 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
852 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
853 struct ost_id *dst_oi)
855 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
856 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
857 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
859 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
865 /** \defgroup lu_dir lu_dir
869 * Enumeration of possible directory entry attributes.
871 * Attributes follow directory entry header in the order they appear in this
874 enum lu_dirent_attrs {
877 LUDA_64BITHASH = 0x0004,
879 /* The following attrs are used for MDT internal only,
880 * not visible to client */
882 /* Verify the dirent consistency */
883 LUDA_VERIFY = 0x8000,
884 /* Only check but not repair the dirent inconsistency */
885 LUDA_VERIFY_DRYRUN = 0x4000,
886 /* The dirent has been repaired, or to be repaired (dryrun). */
887 LUDA_REPAIR = 0x2000,
888 /* The system is upgraded, has beed or to be repaired (dryrun). */
889 LUDA_UPGRADE = 0x1000,
890 /* Ignore this record, go to next directly. */
891 LUDA_IGNORE = 0x0800,
894 #define LU_DIRENT_ATTRS_MASK 0xf800
897 * Layout of readdir pages, as transmitted on wire.
900 /** valid if LUDA_FID is set. */
901 struct lu_fid lde_fid;
902 /** a unique entry identifier: a hash or an offset. */
904 /** total record length, including all attributes. */
908 /** optional variable size attributes following this entry.
909 * taken from enum lu_dirent_attrs.
912 /** name is followed by the attributes indicated in ->ldp_attrs, in
913 * their natural order. After the last attribute, padding bytes are
914 * added to make ->lde_reclen a multiple of 8.
920 * Definitions of optional directory entry attributes formats.
922 * Individual attributes do not have their length encoded in a generic way. It
923 * is assumed that consumer of an attribute knows its format. This means that
924 * it is impossible to skip over an unknown attribute, except by skipping over all
925 * remaining attributes (by using ->lde_reclen), which is not too
926 * constraining, because new server versions will append new attributes at
927 * the end of an entry.
931 * Fid directory attribute: a fid of an object referenced by the entry. This
932 * will be almost always requested by the client and supplied by the server.
934 * Aligned to 8 bytes.
936 /* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
941 * Aligned to 2 bytes.
952 #define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
955 #define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
960 __u64 ldp_hash_start;
964 struct lu_dirent ldp_entries[0];
967 enum lu_dirpage_flags {
969 * dirpage contains no entry.
973 * last entry's lde_hash equals ldp_hash_end.
978 static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
980 if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
983 return dp->ldp_entries;
986 static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
988 struct lu_dirent *next;
990 if (le16_to_cpu(ent->lde_reclen) != 0)
991 next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
998 static inline int lu_dirent_calc_size(int namelen, __u16 attr)
1002 if (attr & LUDA_TYPE) {
1003 const unsigned align = sizeof(struct luda_type) - 1;
1004 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1005 size += sizeof(struct luda_type);
1007 size = sizeof(struct lu_dirent) + namelen;
1009 return (size + 7) & ~7;
1012 static inline int lu_dirent_size(struct lu_dirent *ent)
1014 if (le16_to_cpu(ent->lde_reclen) == 0) {
1015 return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
1016 le32_to_cpu(ent->lde_attrs));
1018 return le16_to_cpu(ent->lde_reclen);
1021 #define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1024 * MDS_READPAGE page size
1026 * This is the directory page size packed in MDS_READPAGE RPC.
1027 * It's different than PAGE_CACHE_SIZE because the client needs to
1028 * access the struct lu_dirpage header packed at the beginning of
1029 * the "page" and without this there isn't any way to know find the
1030 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
1032 #define LU_PAGE_SHIFT 12
1033 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1034 #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1036 #define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
1040 struct lustre_handle {
1043 #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1045 static inline int lustre_handle_is_used(struct lustre_handle *lh)
1047 return lh->cookie != 0ull;
1050 static inline int lustre_handle_equal(const struct lustre_handle *lh1,
1051 const struct lustre_handle *lh2)
1053 return lh1->cookie == lh2->cookie;
1056 static inline void lustre_handle_copy(struct lustre_handle *tgt,
1057 struct lustre_handle *src)
1059 tgt->cookie = src->cookie;
1062 /* flags for lm_flags */
1063 #define MSGHDR_AT_SUPPORT 0x1
1064 #define MSGHDR_CKSUM_INCOMPAT18 0x2
1066 #define lustre_msg lustre_msg_v2
1067 /* we depend on this structure to be 8-byte aligned */
1068 /* this type is only endian-adjusted in lustre_unpack_msg() */
1069 struct lustre_msg_v2 {
1078 __u32 lm_buflens[0];
1081 /* without gss, ptlrpc_body is put at the first buffer. */
1082 #define PTLRPC_NUM_VERSIONS 4
1083 #define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */
1084 struct ptlrpc_body_v3 {
1085 struct lustre_handle pb_handle;
1092 __u64 pb_last_committed;
1097 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1098 __u32 pb_service_time; /* for rep, actual service time */
1101 /* VBR: pre-versions */
1102 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1103 /* padding for future needs */
1104 __u64 pb_padding[4];
1105 char pb_jobid[JOBSTATS_JOBID_SIZE];
1107 #define ptlrpc_body ptlrpc_body_v3
1109 struct ptlrpc_body_v2 {
1110 struct lustre_handle pb_handle;
1117 __u64 pb_last_committed;
1122 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1123 __u32 pb_service_time; /* for rep, actual service time, also used for
1124 net_latency of req */
1127 /* VBR: pre-versions */
1128 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1129 /* padding for future needs */
1130 __u64 pb_padding[4];
1133 extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1135 /* message body offset for lustre_msg_v2 */
1136 /* ptlrpc body offset in all request/reply messages */
1137 #define MSG_PTLRPC_BODY_OFF 0
1139 /* normal request/reply message record offset */
1140 #define REQ_REC_OFF 1
1141 #define REPLY_REC_OFF 1
1143 /* ldlm request message body offset */
1144 #define DLM_LOCKREQ_OFF 1 /* lockreq offset */
1145 #define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
1147 /* ldlm intent lock message body offset */
1148 #define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
1149 #define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
1151 /* ldlm reply message body offset */
1152 #define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
1153 #define DLM_REPLY_REC_OFF 2 /* reply record offset */
1155 /** only use in req->rq_{req,rep}_swab_mask */
1156 #define MSG_PTLRPC_HEADER_OFF 31
1158 /* Flags that are operation-specific go in the top 16 bits. */
1159 #define MSG_OP_FLAG_MASK 0xffff0000
1160 #define MSG_OP_FLAG_SHIFT 16
1162 /* Flags that apply to all requests are in the bottom 16 bits */
1163 #define MSG_GEN_FLAG_MASK 0x0000ffff
1164 #define MSG_LAST_REPLAY 0x0001
1165 #define MSG_RESENT 0x0002
1166 #define MSG_REPLAY 0x0004
1167 /* #define MSG_AT_SUPPORT 0x0008
1168 * This was used in early prototypes of adaptive timeouts, and while there
1169 * shouldn't be any users of that code there also isn't a need for using this
1170 * bits. Defer usage until at least 1.10 to avoid potential conflict. */
1171 #define MSG_DELAY_REPLAY 0x0010
1172 #define MSG_VERSION_REPLAY 0x0020
1173 #define MSG_REQ_REPLAY_DONE 0x0040
1174 #define MSG_LOCK_REPLAY_DONE 0x0080
1177 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1180 #define MSG_CONNECT_RECOVERING 0x00000001
1181 #define MSG_CONNECT_RECONNECT 0x00000002
1182 #define MSG_CONNECT_REPLAYABLE 0x00000004
1183 //#define MSG_CONNECT_PEER 0x8
1184 #define MSG_CONNECT_LIBCLIENT 0x00000010
1185 #define MSG_CONNECT_INITIAL 0x00000020
1186 #define MSG_CONNECT_ASYNC 0x00000040
1187 #define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
1188 #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
1191 #define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
1192 #define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
1193 #define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
1194 #define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
1195 #define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
1196 #define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
1197 #define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
1198 #define OBD_CONNECT_ACL 0x80ULL /*access control lists */
1199 #define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
1200 #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
1201 #define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
1202 #define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
1203 #define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
1204 #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
1205 *We do not support JOIN FILE
1206 *anymore, reserve this flags
1207 *just for preventing such bit
1209 #define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
1210 #define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
1211 #define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
1212 #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
1213 #define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
1214 #define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
1215 #define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
1216 #define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
1217 #define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
1218 #define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
1219 #define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
1220 #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
1221 #define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
1222 #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
1223 #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
1224 #define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
1225 #define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
1226 #define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
1227 #define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
1228 #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
1229 #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
1230 #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
1231 #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
1232 #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
1233 #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
1235 #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
1236 #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
1237 #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
1238 #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
1239 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1240 * RPC error properly */
1241 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1242 * finer space reservation */
1243 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1244 * policy and 2.x server */
1245 #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
1246 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1247 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1248 #define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
1249 #define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
1250 #define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
1251 #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
1254 * Please DO NOT add flag values here before first ensuring that this same
1255 * flag value is not in use on some other branch. Please clear any such
1256 * changes with senior engineers before starting to use a new flag. Then,
1257 * submit a small patch against EVERY branch that ONLY adds the new flag,
1258 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1259 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1260 * can be approved and landed easily to reserve the flag for future use. */
1262 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1263 * connection. It is a temporary bug fix for Imperative Recovery interop
1264 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1265 * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */
1266 #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
1268 #define OCD_HAS_FLAG(ocd, flg) \
1269 (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1272 #define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
1274 #define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
1275 OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
1276 OBD_CONNECT_IBITS | \
1277 OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
1278 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1279 OBD_CONNECT_RMT_CLIENT | \
1280 OBD_CONNECT_RMT_CLIENT_FORCE | \
1281 OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
1282 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
1283 OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
1284 OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
1285 OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
1286 OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
1287 OBD_CONNECT_EINPROGRESS | \
1288 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
1289 OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
1290 OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
1291 OBD_CONNECT_FLOCK_DEAD | \
1292 OBD_CONNECT_DISP_STRIPE)
1294 #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
1295 OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
1296 OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
1297 OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
1298 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1299 LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
1300 OBD_CONNECT_RMT_CLIENT | \
1301 OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
1302 OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
1303 OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
1304 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
1305 OBD_CONNECT_MAX_EASIZE | \
1306 OBD_CONNECT_EINPROGRESS | \
1307 OBD_CONNECT_JOBSTATS | \
1308 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
1309 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
1310 OBD_CONNECT_PINGLESS)
1311 #define ECHO_CONNECT_SUPPORTED (0)
1312 #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
1313 OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
1314 OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
1316 /* Features required for this version of the client to work with server */
1317 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1320 #define OBD_OCD_VERSION(major, minor, patch, fix) (((major)<<24) + \
1322 ((patch)<<8) + (fix))
1323 #define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
1324 #define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
1325 #define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
1326 #define OBD_OCD_VERSION_FIX(version) ((int)(version)&255)
1328 /* This structure is used for both request and reply.
1330 * If we eventually have separate connect data for different types, which we
1331 * almost certainly will, then perhaps we stick a union in here. */
1332 struct obd_connect_data_v1 {
1333 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1334 __u32 ocd_version; /* lustre release version number */
1335 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1336 __u32 ocd_index; /* LOV index to connect to */
1337 __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
1338 __u64 ocd_ibits_known; /* inode bits this client understands */
1339 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1340 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1341 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1342 __u32 ocd_unused; /* also fix lustre_swab_connect */
1343 __u64 ocd_transno; /* first transno from client to be replayed */
1344 __u32 ocd_group; /* MDS group on OST */
1345 __u32 ocd_cksum_types; /* supported checksum algorithms */
1346 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1347 __u32 ocd_instance; /* also fix lustre_swab_connect */
1348 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1351 struct obd_connect_data {
1352 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1353 __u32 ocd_version; /* lustre release version number */
1354 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1355 __u32 ocd_index; /* LOV index to connect to */
1356 __u32 ocd_brw_size; /* Maximum BRW size in bytes */
1357 __u64 ocd_ibits_known; /* inode bits this client understands */
1358 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1359 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1360 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1361 __u32 ocd_unused; /* also fix lustre_swab_connect */
1362 __u64 ocd_transno; /* first transno from client to be replayed */
1363 __u32 ocd_group; /* MDS group on OST */
1364 __u32 ocd_cksum_types; /* supported checksum algorithms */
1365 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1366 __u32 ocd_instance; /* instance # of this target */
1367 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1368 /* Fields after ocd_maxbytes are only accessible by the receiver
1369 * if the corresponding flag in ocd_connect_flags is set. Accessing
1370 * any field after ocd_maxbytes on the receiver without a valid flag
1371 * may result in out-of-bound memory access and kernel oops. */
1372 __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
1373 __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
1374 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1375 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1376 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
1377 __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
1378 __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
1379 __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
1380 __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
1381 __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
1382 __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
1383 __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
1384 __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
1385 __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
1386 __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
1389 * Please DO NOT use any fields here before first ensuring that this same
1390 * field is not in use on some other branch. Please clear any such changes
1391 * with senior engineers before starting to use a new field. Then, submit
1392 * a small patch against EVERY branch that ONLY adds the new field along with
1393 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1394 * reserve the flag for future use. */
1397 extern void lustre_swab_connect(struct obd_connect_data *ocd);
1400 * Supported checksum algorithms. Up to 32 checksum types are supported.
1401 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1402 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1403 * algorithm and also the OBD_FL_CKSUM* flags.
1406 OBD_CKSUM_CRC32 = 0x00000001,
1407 OBD_CKSUM_ADLER = 0x00000002,
1408 OBD_CKSUM_CRC32C= 0x00000004,
1412 * OST requests: OBDO & OBD request records
1417 OST_REPLY = 0, /* reply ? */
1433 OST_QUOTACHECK = 18,
1435 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1438 #define OST_FIRST_OPC OST_REPLY
1441 OBD_FL_INLINEDATA = 0x00000001,
1442 OBD_FL_OBDMDEXISTS = 0x00000002,
1443 OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
1444 OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
1445 OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
1446 OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */
1447 OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
1448 OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
1449 OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
1450 OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
1451 OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
1452 OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
1453 OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
1454 OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1455 OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
1456 OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
1457 OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1458 OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
1459 * XXX: obsoleted - reserved for old
1460 * clients prior than 2.2 */
1461 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1462 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1464 /* Note that while these checksum values are currently separate bits,
1465 * in 2.x we can actually allow all values from 1-31 if we wanted. */
1466 OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1467 OBD_FL_CKSUM_CRC32C,
1469 /* mask for local-only flag, which won't be sent over network */
1470 OBD_FL_LOCAL_MASK = 0xF0000000,
1473 #define LOV_MAGIC_V1 0x0BD10BD0
1474 #define LOV_MAGIC LOV_MAGIC_V1
1475 #define LOV_MAGIC_JOIN_V1 0x0BD20BD0
1476 #define LOV_MAGIC_V3 0x0BD30BD0
1479 * magic for fully defined striping
1480 * the idea is that we should have different magics for striping "hints"
1481 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1482 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1483 * we can't just change it w/o long way preparation, but we still need a
1484 * mechanism to allow LOD to differentiate hint versus ready striping.
1485 * so, at the moment we do a trick: MDT knows what to expect from request
1486 * depending on the case (replay uses ready striping, non-replay req uses
1487 * hints), so MDT replaces magic with appropriate one and now LOD can
1488 * easily understand what's inside -bzzz
1490 #define LOV_MAGIC_V1_DEF 0x0CD10BD0
1491 #define LOV_MAGIC_V3_DEF 0x0CD30BD0
1493 #define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
1494 #define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
1495 #define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
1496 #define LOV_PATTERN_CMOBD 0x200
1498 #define LOV_PATTERN_F_MASK 0xffff0000
1499 #define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
1501 #define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
1502 #define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
1504 #define lov_ost_data lov_ost_data_v1
1505 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
1506 struct ost_id l_ost_oi; /* OST object ID */
1507 __u32 l_ost_gen; /* generation of this l_ost_idx */
1508 __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
1511 #define lov_mds_md lov_mds_md_v1
1512 struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
1513 __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
1514 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1515 struct ost_id lmm_oi; /* LOV object ID */
1516 __u32 lmm_stripe_size; /* size of stripe in bytes */
1517 /* lmm_stripe_count used to be __u32 */
1518 __u16 lmm_stripe_count; /* num stripes in use for this object */
1519 __u16 lmm_layout_gen; /* layout generation number */
1520 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1524 * Sigh, because pre-2.4 uses
1525 * struct lov_mds_md_v1 {
1527 * __u64 lmm_object_id;
1528 * __u64 lmm_object_seq;
1531 * to identify the LOV(MDT) object, and lmm_object_seq will
1532 * be normal_fid, which make it hard to combine these conversion
1533 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1535 * We can tell the lmm_oi by this way,
1536 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1537 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1538 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1541 * But currently lmm_oi/lsm_oi does not have any "real" usages,
1542 * except for printing some information, and the user can always
1543 * get the real FID from LMA, besides this multiple case check might
1544 * make swab more complicate. So we will keep using id/seq for lmm_oi.
1547 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1550 oi->oi.oi_id = fid_oid(fid);
1551 oi->oi.oi_seq = fid_seq(fid);
1554 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1556 oi->oi.oi_seq = seq;
1559 static inline __u64 lmm_oi_id(struct ost_id *oi)
1561 return oi->oi.oi_id;
1564 static inline __u64 lmm_oi_seq(struct ost_id *oi)
1566 return oi->oi.oi_seq;
1569 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1570 struct ost_id *src_oi)
1572 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1573 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1576 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1577 struct ost_id *src_oi)
1579 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1580 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1583 /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1585 #define MAX_MD_SIZE \
1586 (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1587 #define MIN_MD_SIZE \
1588 (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1590 #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
1591 #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
1592 #define XATTR_USER_PREFIX "user."
1593 #define XATTR_TRUSTED_PREFIX "trusted."
1594 #define XATTR_SECURITY_PREFIX "security."
1595 #define XATTR_LUSTRE_PREFIX "lustre."
1597 #define XATTR_NAME_LOV "trusted.lov"
1598 #define XATTR_NAME_LMA "trusted.lma"
1599 #define XATTR_NAME_LMV "trusted.lmv"
1600 #define XATTR_NAME_LINK "trusted.link"
1601 #define XATTR_NAME_FID "trusted.fid"
1602 #define XATTR_NAME_VERSION "trusted.version"
1603 #define XATTR_NAME_SOM "trusted.som"
1604 #define XATTR_NAME_HSM "trusted.hsm"
1605 #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
1607 struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
1608 __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
1609 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1610 struct ost_id lmm_oi; /* LOV object ID */
1611 __u32 lmm_stripe_size; /* size of stripe in bytes */
1612 /* lmm_stripe_count used to be __u32 */
1613 __u16 lmm_stripe_count; /* num stripes in use for this object */
1614 __u16 lmm_layout_gen; /* layout generation number */
1615 char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
1616 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1619 static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1621 if (lmm_magic == LOV_MAGIC_V3)
1622 return sizeof(struct lov_mds_md_v3) +
1623 stripes * sizeof(struct lov_ost_data_v1);
1625 return sizeof(struct lov_mds_md_v1) +
1626 stripes * sizeof(struct lov_ost_data_v1);
1630 lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1632 switch (lmm_magic) {
1633 case LOV_MAGIC_V1: {
1634 struct lov_mds_md_v1 lmm;
1636 if (buf_size < sizeof(lmm))
1639 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1641 case LOV_MAGIC_V3: {
1642 struct lov_mds_md_v3 lmm;
1644 if (buf_size < sizeof(lmm))
1647 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1654 #define OBD_MD_FLID (0x00000001ULL) /* object ID */
1655 #define OBD_MD_FLATIME (0x00000002ULL) /* access time */
1656 #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
1657 #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
1658 #define OBD_MD_FLSIZE (0x00000010ULL) /* size */
1659 #define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
1660 #define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
1661 #define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1662 #define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
1663 #define OBD_MD_FLUID (0x00000200ULL) /* user ID */
1664 #define OBD_MD_FLGID (0x00000400ULL) /* group ID */
1665 #define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
1666 #define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
1667 #define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
1668 /*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
1669 #define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
1670 #define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
1671 #define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
1672 #define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
1673 #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1674 #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1675 /*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
1676 #define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
1677 #define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1678 #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1679 #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
1680 /* ->mds if epoch opens or closes */
1681 #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
1682 #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
1683 #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
1684 #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
1685 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1687 #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
1688 #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
1689 #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
1690 #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
1692 #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
1693 #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
1694 #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
1695 #define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
1696 #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
1697 #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
1698 #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
1699 #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
1700 #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
1701 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1702 * under lock; for xattr
1703 * requests means the
1704 * client holds the lock */
1705 #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
1707 #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
1708 #define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
1709 #define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
1710 #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
1712 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1713 #define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
1715 #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1716 OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
1717 OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
1718 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1719 OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
1721 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1723 /* don't forget obdo_fid which is way down at the bottom so it can
1724 * come after the definition of llog_cookie */
1728 HSS_CLEARMASK = 0x02,
1729 HSS_ARCHIVE_ID = 0x04,
1732 struct hsm_state_set {
1734 __u32 hss_archive_id;
1736 __u64 hss_clearmask;
1739 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1740 extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1742 extern void lustre_swab_obd_statfs (struct obd_statfs *os);
1744 /* ost_body.data values for OST_BRW */
1746 #define OBD_BRW_READ 0x01
1747 #define OBD_BRW_WRITE 0x02
1748 #define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
1749 #define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
1750 * transfer and is not accounted in
1752 #define OBD_BRW_CHECK 0x10
1753 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
1754 #define OBD_BRW_GRANTED 0x40 /* the ost manages this */
1755 #define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
1756 #define OBD_BRW_NOQUOTA 0x100
1757 #define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
1758 #define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
1759 #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1760 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1761 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1763 #define OBD_OBJECT_EOF 0xffffffffffffffffULL
1765 #define OST_MIN_PRECREATE 32
1766 #define OST_MAX_PRECREATE 20000
1769 struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
1770 __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
1771 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1772 * high 16 bits in 2.4 and later */
1773 __u32 ioo_bufcnt; /* number of niobufs for this object */
1776 #define IOOBJ_MAX_BRW_BITS 16
1777 #define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
1778 #define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1779 #define ioobj_max_brw_set(ioo, num) \
1780 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1782 extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);
1784 /* multiple of 8 bytes => can array */
1785 struct niobuf_remote {
1791 extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr);
1793 /* lock value block communicated between the filter and llite */
1795 /* OST_LVB_ERR_INIT is needed because the return code in rc is
1796 * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
1797 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1798 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1799 #define OST_LVB_IS_ERR(blocks) \
1800 ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1801 #define OST_LVB_SET_ERR(blocks, rc) \
1802 do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1803 #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
1813 extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1827 extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1830 * lquota data structures
1833 #ifndef QUOTABLOCK_BITS
1834 #define QUOTABLOCK_BITS 10
1837 #ifndef QUOTABLOCK_SIZE
1838 #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
1842 #define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
1845 /* The lquota_id structure is an union of all the possible identifier types that
1846 * can be used with quota, this includes:
1849 * - a FID which can be used for per-directory quota in the future */
1851 struct lu_fid qid_fid; /* FID for per-directory quota */
1852 __u64 qid_uid; /* user identifier */
1853 __u64 qid_gid; /* group identifier */
1856 /* quotactl management */
1857 struct obd_quotactl {
1859 __u32 qc_type; /* see Q_* flag below */
1862 struct obd_dqinfo qc_dqinfo;
1863 struct obd_dqblk qc_dqblk;
1866 extern void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1868 #define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
1869 #define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
1870 #define Q_GETOINFO 0x800102 /* get obd quota info */
1871 #define Q_GETOQUOTA 0x800103 /* get obd quotas */
1872 #define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
1874 #define Q_COPY(out, in, member) (out)->member = (in)->member
1876 #define QCTL_COPY(out, in) \
1878 Q_COPY(out, in, qc_cmd); \
1879 Q_COPY(out, in, qc_type); \
1880 Q_COPY(out, in, qc_id); \
1881 Q_COPY(out, in, qc_stat); \
1882 Q_COPY(out, in, qc_dqinfo); \
1883 Q_COPY(out, in, qc_dqblk); \
1886 /* Body of quota request used for quota acquire/release RPCs between quota
1887 * master (aka QMT) and slaves (ak QSD). */
1889 struct lu_fid qb_fid; /* FID of global index packing the pool ID
1890 * and type (data or metadata) as well as
1891 * the quota type (user or group). */
1892 union lquota_id qb_id; /* uid or gid or directory FID */
1893 __u32 qb_flags; /* see below */
1895 __u64 qb_count; /* acquire/release count (kbytes/inodes) */
1896 __u64 qb_usage; /* current slave usage (kbytes/inodes) */
1897 __u64 qb_slv_ver; /* slave index file version */
1898 struct lustre_handle qb_lockh; /* per-ID lock handle */
1899 struct lustre_handle qb_glb_lockh; /* global lock handle */
1900 __u64 qb_padding1[4];
1903 /* When the quota_body is used in the reply of quota global intent
1904 * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
1905 #define qb_slv_fid qb_fid
1906 /* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
1908 #define qb_qunit qb_usage
1910 #define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */
1911 #define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */
1912 #define QUOTA_DQACQ_FL_REL 0x4 /* release quota */
1913 #define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */
1915 extern void lustre_swab_quota_body(struct quota_body *b);
1917 /* Quota types currently supported */
1919 LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */
1920 LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */
1924 /* There are 2 different resource types on which a quota limit can be enforced:
1925 * - inodes on the MDTs
1926 * - blocks on the OSTs */
1928 LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */
1929 LQUOTA_RES_DT = 0x02,
1931 LQUOTA_FIRST_RES = LQUOTA_RES_MD
1933 #define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
1936 * Space accounting support
1937 * Format of an accounting record, providing disk usage information for a given
1940 struct lquota_acct_rec { /* 16 bytes */
1941 __u64 bspace; /* current space in use */
1942 __u64 ispace; /* current # inodes in use */
1946 * Global quota index support
1947 * Format of a global record, providing global quota settings for a given quota
1950 struct lquota_glb_rec { /* 32 bytes */
1951 __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
1952 __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
1953 __u64 qbr_time; /* grace time, in seconds */
1954 __u64 qbr_granted; /* how much is granted to slaves, in #inodes or
1959 * Slave index support
1960 * Format of a slave record, recording how much space is granted to a given
1963 struct lquota_slv_rec { /* 8 bytes */
1964 __u64 qsr_granted; /* space granted to the slave for the key=ID,
1965 * in #inodes or kbytes */
1968 /* Data structures associated with the quota locks */
1970 /* Glimpse descriptor used for the index & per-ID quota locks */
1971 struct ldlm_gl_lquota_desc {
1972 union lquota_id gl_id; /* quota ID subject to the glimpse */
1973 __u64 gl_flags; /* see LQUOTA_FL* below */
1974 __u64 gl_ver; /* new index version */
1975 __u64 gl_hardlimit; /* new hardlimit or qunit value */
1976 __u64 gl_softlimit; /* new softlimit */
1980 #define gl_qunit gl_hardlimit /* current qunit value used when
1981 * glimpsing per-ID quota locks */
1983 /* quota glimpse flags */
1984 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
1986 /* LVB used with quota (global and per-ID) locks */
1988 __u64 lvb_flags; /* see LQUOTA_FL* above */
1989 __u64 lvb_id_may_rel; /* space that might be released later */
1990 __u64 lvb_id_rel; /* space released by the slave for this ID */
1991 __u64 lvb_id_qunit; /* current qunit value */
1995 extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
1997 /* LVB used with global quota lock */
1998 #define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
2006 #define QUOTA_FIRST_OPC QUOTA_DQACQ
2015 MDS_GETATTR_NAME = 34,
2020 MDS_DISCONNECT = 39,
2026 MDS_DONE_WRITING = 45,
2028 MDS_QUOTACHECK = 47,
2031 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
2035 MDS_HSM_STATE_GET = 54,
2036 MDS_HSM_STATE_SET = 55,
2037 MDS_HSM_ACTION = 56,
2038 MDS_HSM_PROGRESS = 57,
2039 MDS_HSM_REQUEST = 58,
2040 MDS_HSM_CT_REGISTER = 59,
2041 MDS_HSM_CT_UNREGISTER = 60,
2042 MDS_SWAP_LAYOUTS = 61,
2046 #define MDS_FIRST_OPC MDS_GETATTR
2049 /* opcodes for object update */
2055 #define UPDATE_FIRST_OPC UPDATE_OBJ
2072 } mds_reint_t, mdt_reint_t;
2074 extern void lustre_swab_generic_32s (__u32 *val);
2076 /* the disposition of the intent outlines what was executed */
2077 #define DISP_IT_EXECD 0x00000001
2078 #define DISP_LOOKUP_EXECD 0x00000002
2079 #define DISP_LOOKUP_NEG 0x00000004
2080 #define DISP_LOOKUP_POS 0x00000008
2081 #define DISP_OPEN_CREATE 0x00000010
2082 #define DISP_OPEN_OPEN 0x00000020
2083 #define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
2084 #define DISP_ENQ_OPEN_REF 0x00800000
2085 #define DISP_ENQ_CREATE_REF 0x01000000
2086 #define DISP_OPEN_LOCK 0x02000000
2087 #define DISP_OPEN_LEASE 0x04000000
2088 #define DISP_OPEN_STRIPE 0x08000000
2090 /* INODE LOCK PARTS */
2091 #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
2092 * was used to protect permission (mode,
2093 * owner, group etc) before 2.4. */
2094 #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
2095 #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
2096 #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
2098 /* The PERM bit is added int 2.4, and it is used to protect permission(mode,
2099 * owner, group, acl etc), so to separate the permission from LOOKUP lock.
2100 * Because for remote directories(in DNE), these locks will be granted by
2101 * different MDTs(different ldlm namespace).
2103 * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
2104 * For Remote directory, the master MDT, where the remote directory is, will
2105 * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
2106 * will grant LOOKUP_LOCK. */
2107 #define MDS_INODELOCK_PERM 0x000010
2108 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
2110 #define MDS_INODELOCK_MAXSHIFT 5
2111 /* This FULL lock is useful to take on unlink sort of operations */
2112 #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
2114 extern void lustre_swab_ll_fid (struct ll_fid *fid);
2116 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
2117 * but was moved into name[1] along with the OID to avoid consuming the
2118 * name[2,3] fields that need to be used for the quota id (also a FID). */
2120 LUSTRE_RES_ID_SEQ_OFF = 0,
2121 LUSTRE_RES_ID_VER_OID_OFF = 1,
2122 LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
2123 LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
2124 LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
2125 LUSTRE_RES_ID_HSH_OFF = 3
2128 #define MDS_STATUS_CONN 1
2129 #define MDS_STATUS_LOV 2
2131 /* mdt_thread_info.mti_flags. */
2133 /* The flag indicates Size-on-MDS attributes are changed. */
2134 MF_SOM_CHANGE = (1 << 0),
2135 /* Flags indicates an epoch opens or closes. */
2136 MF_EPOCH_OPEN = (1 << 1),
2137 MF_EPOCH_CLOSE = (1 << 2),
2138 MF_MDC_CANCEL_FID1 = (1 << 3),
2139 MF_MDC_CANCEL_FID2 = (1 << 4),
2140 MF_MDC_CANCEL_FID3 = (1 << 5),
2141 MF_MDC_CANCEL_FID4 = (1 << 6),
2142 /* There is a pending attribute update. */
2143 MF_SOM_AU = (1 << 7),
2144 /* Cancel OST locks while getattr OST attributes. */
2145 MF_GETATTR_LOCK = (1 << 8),
2146 MF_GET_MDT_IDX = (1 << 9),
2149 #define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
2151 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
2153 /* these should be identical to their EXT4_*_FL counterparts, they are
2154 * redefined here only to avoid dragging in fs/ext4/ext4.h */
2155 #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
2156 #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
2157 #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
2158 #define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
2159 #define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
2161 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2162 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
2163 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2164 * the S_* flags are kernel-internal values that change between kernel
2165 * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2166 * See b=16526 for a full history. */
2167 static inline int ll_ext_to_inode_flags(int flags)
2169 return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
2170 ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
2171 ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
2172 #if defined(S_DIRSYNC)
2173 ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
2175 ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2178 static inline int ll_inode_to_ext_flags(int iflags)
2180 return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
2181 ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
2182 ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
2183 #if defined(S_DIRSYNC)
2184 ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
2186 ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2189 /* 64 possible states */
2190 enum md_transient_state {
2191 MS_RESTORE = (1 << 0), /* restore is running */
2197 struct lustre_handle handle;
2199 __u64 size; /* Offset, in the case of MDS_READPAGE */
2203 __u64 blocks; /* XID, in the case of MDS_READPAGE */
2205 __u64 t_state; /* transient file state defined in
2206 * enum md_transient_state
2207 * was "ino" until 2.4.0 */
2214 __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
2216 __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
2217 __u32 unused2; /* was "generation" until 2.4.0 */
2222 __u32 max_cookiesize;
2223 __u32 uid_h; /* high 32-bits of uid, for FUID */
2224 __u32 gid_h; /* high 32-bits of gid, for FUID */
2225 __u32 padding_5; /* also fix lustre_swab_mdt_body */
2233 extern void lustre_swab_mdt_body (struct mdt_body *b);
2235 struct mdt_ioepoch {
2236 struct lustre_handle handle;
2242 extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b);
2244 /* permissions for md_perm.mp_perm */
2246 CFS_SETUID_PERM = 0x01,
2247 CFS_SETGID_PERM = 0x02,
2248 CFS_SETGRP_PERM = 0x04,
2249 CFS_RMTACL_PERM = 0x08,
2250 CFS_RMTOWN_PERM = 0x10
2253 /* inode access permission for remote user, the inode info are omitted,
2254 * for client knows them. */
2255 struct mdt_remote_perm {
2262 __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
2266 extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
2268 struct mdt_rec_setattr {
2278 __u32 sa_padding_1_h;
2279 struct lu_fid sa_fid;
2288 __u32 sa_attr_flags;
2290 __u32 sa_bias; /* some operation flags */
2296 extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
2299 * Attribute flags used in mdt_rec_setattr::sa_valid.
2300 * The kernel's #defines for ATTR_* should not be used over the network
2301 * since the client and MDS may run different kernels (see bug 13828)
2302 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2304 #define MDS_ATTR_MODE 0x1ULL /* = 1 */
2305 #define MDS_ATTR_UID 0x2ULL /* = 2 */
2306 #define MDS_ATTR_GID 0x4ULL /* = 4 */
2307 #define MDS_ATTR_SIZE 0x8ULL /* = 8 */
2308 #define MDS_ATTR_ATIME 0x10ULL /* = 16 */
2309 #define MDS_ATTR_MTIME 0x20ULL /* = 32 */
2310 #define MDS_ATTR_CTIME 0x40ULL /* = 64 */
2311 #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
2312 #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
2313 #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
2314 #define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
2315 #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
2316 #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
2317 #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
2318 #define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
2319 #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
2322 #define FMODE_READ 00000001
2323 #define FMODE_WRITE 00000002
2326 #define MDS_FMODE_CLOSED 00000000
2327 #define MDS_FMODE_EXEC 00000004
2328 /* IO Epoch is opened on a closed file. */
2329 #define MDS_FMODE_EPOCH 01000000
2330 /* IO Epoch is opened on a file truncate. */
2331 #define MDS_FMODE_TRUNC 02000000
2332 /* Size-on-MDS Attribute Update is pending. */
2333 #define MDS_FMODE_SOM 04000000
2335 #define MDS_OPEN_CREATED 00000010
2336 #define MDS_OPEN_CROSS 00000020
2338 #define MDS_OPEN_CREAT 00000100
2339 #define MDS_OPEN_EXCL 00000200
2340 #define MDS_OPEN_TRUNC 00001000
2341 #define MDS_OPEN_APPEND 00002000
2342 #define MDS_OPEN_SYNC 00010000
2343 #define MDS_OPEN_DIRECTORY 00200000
2345 #define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
2346 #define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
2347 #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2348 #define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
2349 * We do not support JOIN FILE
2350 * anymore, reserve this flags
2351 * just for preventing such bit
2354 #define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
2355 #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
2356 #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
2357 #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
2358 #define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
2360 #define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
2362 #define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
2363 * delegation, succeed if it's not
2364 * being opened with conflict mode.
2366 #define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
2368 /* permission for create non-directory file */
2369 #define MAY_CREATE (1 << 7)
2370 /* permission for create directory file */
2371 #define MAY_LINK (1 << 8)
2372 /* permission for delete from the directory */
2373 #define MAY_UNLINK (1 << 9)
2374 /* source's permission for rename */
2375 #define MAY_RENAME_SRC (1 << 10)
2376 /* target's permission for rename */
2377 #define MAY_RENAME_TAR (1 << 11)
2378 /* part (parent's) VTX permission check */
2379 #define MAY_VTX_PART (1 << 12)
2380 /* full VTX permission check */
2381 #define MAY_VTX_FULL (1 << 13)
2382 /* lfs rgetfacl permission check */
2383 #define MAY_RGETFACL (1 << 14)
2386 MDS_CHECK_SPLIT = 1 << 0,
2387 MDS_CROSS_REF = 1 << 1,
2388 MDS_VTX_BYPASS = 1 << 2,
2389 MDS_PERM_BYPASS = 1 << 3,
2391 MDS_QUOTA_IGNORE = 1 << 5,
2392 MDS_CLOSE_CLEANUP = 1 << 6,
2393 MDS_KEEP_ORPHAN = 1 << 7,
2394 MDS_RECOV_OPEN = 1 << 8,
2395 MDS_DATA_MODIFIED = 1 << 9,
2396 MDS_CREATE_VOLATILE = 1 << 10,
2397 MDS_OWNEROVERRIDE = 1 << 11,
2398 MDS_HSM_RELEASE = 1 << 12,
2401 /* instance of mdt_reint_rec */
2402 struct mdt_rec_create {
2410 __u32 cr_suppgid1_h;
2412 __u32 cr_suppgid2_h;
2413 struct lu_fid cr_fid1;
2414 struct lu_fid cr_fid2;
2415 struct lustre_handle cr_old_handle; /* handle in case of open replay */
2419 __u64 cr_padding_1; /* rr_blocks */
2422 /* use of helpers set/get_mrc_cr_flags() is needed to access
2423 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2424 * extend cr_flags size without breaking 1.8 compat */
2425 __u32 cr_flags_l; /* for use with open, low 32 bits */
2426 __u32 cr_flags_h; /* for use with open, high 32 bits */
2427 __u32 cr_umask; /* umask for create */
2428 __u32 cr_padding_4; /* rr_padding_4 */
2431 static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2433 mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2434 mrc->cr_flags_h = (__u32)(flags >> 32);
2437 static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2439 return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2442 /* instance of mdt_reint_rec */
2443 struct mdt_rec_link {
2451 __u32 lk_suppgid1_h;
2453 __u32 lk_suppgid2_h;
2454 struct lu_fid lk_fid1;
2455 struct lu_fid lk_fid2;
2457 __u64 lk_padding_1; /* rr_atime */
2458 __u64 lk_padding_2; /* rr_ctime */
2459 __u64 lk_padding_3; /* rr_size */
2460 __u64 lk_padding_4; /* rr_blocks */
2462 __u32 lk_padding_5; /* rr_mode */
2463 __u32 lk_padding_6; /* rr_flags */
2464 __u32 lk_padding_7; /* rr_padding_2 */
2465 __u32 lk_padding_8; /* rr_padding_3 */
2466 __u32 lk_padding_9; /* rr_padding_4 */
2469 /* instance of mdt_reint_rec */
2470 struct mdt_rec_unlink {
2478 __u32 ul_suppgid1_h;
2480 __u32 ul_suppgid2_h;
2481 struct lu_fid ul_fid1;
2482 struct lu_fid ul_fid2;
2484 __u64 ul_padding_2; /* rr_atime */
2485 __u64 ul_padding_3; /* rr_ctime */
2486 __u64 ul_padding_4; /* rr_size */
2487 __u64 ul_padding_5; /* rr_blocks */
2490 __u32 ul_padding_6; /* rr_flags */
2491 __u32 ul_padding_7; /* rr_padding_2 */
2492 __u32 ul_padding_8; /* rr_padding_3 */
2493 __u32 ul_padding_9; /* rr_padding_4 */
2496 /* instance of mdt_reint_rec */
2497 struct mdt_rec_rename {
2505 __u32 rn_suppgid1_h;
2507 __u32 rn_suppgid2_h;
2508 struct lu_fid rn_fid1;
2509 struct lu_fid rn_fid2;
2511 __u64 rn_padding_1; /* rr_atime */
2512 __u64 rn_padding_2; /* rr_ctime */
2513 __u64 rn_padding_3; /* rr_size */
2514 __u64 rn_padding_4; /* rr_blocks */
2515 __u32 rn_bias; /* some operation flags */
2516 __u32 rn_mode; /* cross-ref rename has mode */
2517 __u32 rn_padding_5; /* rr_flags */
2518 __u32 rn_padding_6; /* rr_padding_2 */
2519 __u32 rn_padding_7; /* rr_padding_3 */
2520 __u32 rn_padding_8; /* rr_padding_4 */
2523 /* instance of mdt_reint_rec */
2524 struct mdt_rec_setxattr {
2532 __u32 sx_suppgid1_h;
2534 __u32 sx_suppgid2_h;
2535 struct lu_fid sx_fid;
2536 __u64 sx_padding_1; /* These three are rr_fid2 */
2541 __u64 sx_padding_5; /* rr_ctime */
2542 __u64 sx_padding_6; /* rr_size */
2543 __u64 sx_padding_7; /* rr_blocks */
2546 __u32 sx_padding_8; /* rr_flags */
2547 __u32 sx_padding_9; /* rr_padding_2 */
2548 __u32 sx_padding_10; /* rr_padding_3 */
2549 __u32 sx_padding_11; /* rr_padding_4 */
2553 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2554 * Do NOT change the size of various members, otherwise the value
2555 * will be broken in lustre_swab_mdt_rec_reint().
2557 * If you add new members in other mdt_reint_xxx structures and need to use the
2558 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2560 struct mdt_rec_reint {
2568 __u32 rr_suppgid1_h;
2570 __u32 rr_suppgid2_h;
2571 struct lu_fid rr_fid1;
2572 struct lu_fid rr_fid2;
2583 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2586 extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2589 __u32 ld_tgt_count; /* how many MDS's */
2590 __u32 ld_active_tgt_count; /* how many active */
2591 __u32 ld_default_stripe_count; /* how many objects are used */
2592 __u32 ld_pattern; /* default MEA_MAGIC_* */
2593 __u64 ld_default_hash_size;
2594 __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
2595 __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
2596 __u32 ld_qos_maxage; /* in second */
2597 __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
2598 __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
2599 struct obd_uuid ld_uuid;
2602 extern void lustre_swab_lmv_desc (struct lmv_desc *ld);
2604 /* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
2605 struct lmv_stripe_md {
2610 char mea_pool_name[LOV_MAXPOOLNAME];
2611 struct lu_fid mea_ids[0];
2614 extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea);
2616 /* lmv structures */
2617 #define MEA_MAGIC_LAST_CHAR 0xb2221ca1
2618 #define MEA_MAGIC_ALL_CHARS 0xb222a11c
2619 #define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
2621 #define MAX_HASH_SIZE_32 0x7fffffffUL
2622 #define MAX_HASH_SIZE 0x7fffffffffffffffULL
2623 #define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
2628 FLD_FIRST_OPC = FLD_QUERY
2634 SEQ_FIRST_OPC = SEQ_QUERY
2638 SEQ_ALLOC_SUPER = 0,
2643 * LOV data structures
2646 #define LOV_MAX_UUID_BUFFER_SIZE 8192
2647 /* The size of the buffer the lov/mdc reserves for the
2648 * array of UUIDs returned by the MDS. With the current
2649 * protocol, this will limit the max number of OSTs per LOV */
2651 #define LOV_DESC_MAGIC 0xB0CCDE5C
2652 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
2653 #define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
2655 /* LOV settings descriptor (should only contain static info) */
2657 __u32 ld_tgt_count; /* how many OBD's */
2658 __u32 ld_active_tgt_count; /* how many active */
2659 __u32 ld_default_stripe_count; /* how many objects are used */
2660 __u32 ld_pattern; /* default PATTERN_RAID0 */
2661 __u64 ld_default_stripe_size; /* in bytes */
2662 __u64 ld_default_stripe_offset; /* in bytes */
2663 __u32 ld_padding_0; /* unused */
2664 __u32 ld_qos_maxage; /* in second */
2665 __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
2666 __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
2667 struct obd_uuid ld_uuid;
2670 #define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2672 extern void lustre_swab_lov_desc (struct lov_desc *ld);
2677 /* opcodes -- MUST be distinct from OST/MDS opcodes */
2682 LDLM_BL_CALLBACK = 104,
2683 LDLM_CP_CALLBACK = 105,
2684 LDLM_GL_CALLBACK = 106,
2685 LDLM_SET_INFO = 107,
2688 #define LDLM_FIRST_OPC LDLM_ENQUEUE
2690 #define RES_NAME_SIZE 4
2691 struct ldlm_res_id {
2692 __u64 name[RES_NAME_SIZE];
2695 #define DLDLMRES "[%#llx:%#llx:%#llx].%llx"
2696 #define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
2697 (res)->lr_name.name[2], (res)->lr_name.name[3]
2699 extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
2701 static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
2702 const struct ldlm_res_id *res1)
2704 return !memcmp(res0, res1, sizeof(*res0));
2721 #define LCK_MODE_NUM 8
2731 #define LDLM_MIN_TYPE LDLM_PLAIN
2733 struct ldlm_extent {
2739 static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
2740 struct ldlm_extent *ex2)
2742 return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
2745 /* check if @ex1 contains @ex2 */
2746 static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
2747 struct ldlm_extent *ex2)
2749 return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
2752 struct ldlm_inodebits {
2756 struct ldlm_flock_wire {
2764 /* it's important that the fields of the ldlm_extent structure match
2765 * the first fields of the ldlm_flock structure because there is only
2766 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2767 * this ever changes we will need to swab the union differently based
2768 * on the resource type. */
2771 struct ldlm_extent l_extent;
2772 struct ldlm_flock_wire l_flock;
2773 struct ldlm_inodebits l_inodebits;
2774 } ldlm_wire_policy_data_t;
2776 extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
2778 union ldlm_gl_desc {
2779 struct ldlm_gl_lquota_desc lquota_desc;
2782 extern void lustre_swab_gl_desc(union ldlm_gl_desc *);
2784 struct ldlm_intent {
2788 extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
2790 struct ldlm_resource_desc {
2791 ldlm_type_t lr_type;
2792 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
2793 struct ldlm_res_id lr_name;
2796 extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
2798 struct ldlm_lock_desc {
2799 struct ldlm_resource_desc l_resource;
2800 ldlm_mode_t l_req_mode;
2801 ldlm_mode_t l_granted_mode;
2802 ldlm_wire_policy_data_t l_policy_data;
2805 extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);
2807 #define LDLM_LOCKREQ_HANDLES 2
2808 #define LDLM_ENQUEUE_CANCEL_OFF 1
2810 struct ldlm_request {
2813 struct ldlm_lock_desc lock_desc;
2814 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2817 extern void lustre_swab_ldlm_request (struct ldlm_request *rq);
2819 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2820 * Otherwise, 2 are available. */
2821 #define ldlm_request_bufsize(count, type) \
2823 int _avail = LDLM_LOCKREQ_HANDLES; \
2824 _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2825 sizeof(struct ldlm_request) + \
2826 (count > _avail ? count - _avail : 0) * \
2827 sizeof(struct lustre_handle); \
2832 __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
2833 struct ldlm_lock_desc lock_desc;
2834 struct lustre_handle lock_handle;
2835 __u64 lock_policy_res1;
2836 __u64 lock_policy_res2;
2839 extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
2841 #define ldlm_flags_to_wire(flags) ((__u32)(flags))
2842 #define ldlm_flags_from_wire(flags) ((__u64)(flags))
2845 * Opcodes for mountconf (mgs and mgc)
2850 MGS_EXCEPTION, /* node died, etc. */
2851 MGS_TARGET_REG, /* whenever target starts up */
2857 #define MGS_FIRST_OPC MGS_CONNECT
2859 #define MGS_PARAM_MAXLEN 1024
2860 #define KEY_SET_INFO "set_info"
2862 struct mgs_send_param {
2863 char mgs_param[MGS_PARAM_MAXLEN];
2866 /* We pass this info to the MGS so it can write config logs */
2867 #define MTI_NAME_MAXLEN 64
2868 #define MTI_PARAM_MAXLEN 4096
2869 #define MTI_NIDS_MAX 32
2870 struct mgs_target_info {
2871 __u32 mti_lustre_ver;
2872 __u32 mti_stripe_index;
2873 __u32 mti_config_ver;
2875 __u32 mti_nid_count;
2876 __u32 mti_instance; /* Running instance of target */
2877 char mti_fsname[MTI_NAME_MAXLEN];
2878 char mti_svname[MTI_NAME_MAXLEN];
2879 char mti_uuid[sizeof(struct obd_uuid)];
2880 __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
2881 char mti_params[MTI_PARAM_MAXLEN];
2883 extern void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
2885 struct mgs_nidtbl_entry {
2886 __u64 mne_version; /* table version of this entry */
2887 __u32 mne_instance; /* target instance # */
2888 __u32 mne_index; /* target index */
2889 __u32 mne_length; /* length of this entry - by bytes */
2890 __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
2891 __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
2892 __u8 mne_nid_size; /* size of each NID, by bytes */
2893 __u8 mne_nid_count; /* # of NIDs in buffer */
2895 lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
2898 extern void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
2900 struct mgs_config_body {
2901 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
2902 __u64 mcb_offset; /* next index of config log to request */
2903 __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
2905 __u8 mcb_bits; /* bits unit size of config log */
2906 __u32 mcb_units; /* # of units for bulk transfer */
2908 extern void lustre_swab_mgs_config_body(struct mgs_config_body *body);
2910 struct mgs_config_res {
2911 __u64 mcr_offset; /* index of last config log */
2912 __u64 mcr_size; /* size of the log */
2914 extern void lustre_swab_mgs_config_res(struct mgs_config_res *body);
2916 /* Config marker flags (in config log) */
2917 #define CM_START 0x01
2919 #define CM_SKIP 0x04
2920 #define CM_UPGRADE146 0x08
2921 #define CM_EXCLUDE 0x10
2922 #define CM_START_SKIP (CM_START | CM_SKIP)
2925 __u32 cm_step; /* aka config version */
2927 __u32 cm_vers; /* lustre release version number */
2928 __u32 cm_padding; /* 64 bit align */
2929 __s64 cm_createtime; /*when this record was first created */
2930 __s64 cm_canceltime; /*when this record is no longer valid*/
2931 char cm_tgtname[MTI_NAME_MAXLEN];
2932 char cm_comment[MTI_NAME_MAXLEN];
2935 extern void lustre_swab_cfg_marker(struct cfg_marker *marker,
2936 int swab, int size);
2939 * Opcodes for multiple servers.
2949 #define OBD_FIRST_OPC OBD_PING
2951 /* catalog of log objects */
2953 /** Identifier for a single log object */
2955 struct ost_id lgl_oi;
2957 } __attribute__((packed));
2959 /** Records written to the CATALOGS list */
2960 #define CATLIST "CATALOGS"
2962 struct llog_logid lci_logid;
2966 } __attribute__((packed));
2968 /* Log data record types - there is no specific reason that these need to
2969 * be related to the RPC opcodes, but no reason not to (may be handy later?)
2971 #define LLOG_OP_MAGIC 0x10600000
2972 #define LLOG_OP_MASK 0xfff00000
2975 LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
2976 OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
2977 /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
2978 MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
2979 REINT_UNLINK, /* obsolete after 2.5.0 */
2980 MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2982 /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
2983 MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2985 OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
2986 /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
2987 LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
2988 /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
2989 CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
2990 CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
2991 HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
2992 LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
2993 LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
2996 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
2997 (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
2999 /** Log record header - stored in little endian order.
3000 * Each record must start with this struct, end with a llog_rec_tail,
3001 * and be a multiple of 256 bits in size.
3003 struct llog_rec_hdr {
3010 struct llog_rec_tail {
3015 /* Where data follow just after header */
3016 #define REC_DATA(ptr) \
3017 ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
3019 #define REC_DATA_LEN(rec) \
3020 (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
3021 sizeof(struct llog_rec_tail))
3023 struct llog_logid_rec {
3024 struct llog_rec_hdr lid_hdr;
3025 struct llog_logid lid_id;
3029 struct llog_rec_tail lid_tail;
3030 } __attribute__((packed));
3032 struct llog_unlink_rec {
3033 struct llog_rec_hdr lur_hdr;
3037 struct llog_rec_tail lur_tail;
3038 } __attribute__((packed));
3040 struct llog_unlink64_rec {
3041 struct llog_rec_hdr lur_hdr;
3042 struct lu_fid lur_fid;
3043 __u32 lur_count; /* to destroy the lost precreated */
3047 struct llog_rec_tail lur_tail;
3048 } __attribute__((packed));
3050 struct llog_setattr64_rec {
3051 struct llog_rec_hdr lsr_hdr;
3052 struct ost_id lsr_oi;
3058 struct llog_rec_tail lsr_tail;
3059 } __attribute__((packed));
3061 struct llog_size_change_rec {
3062 struct llog_rec_hdr lsc_hdr;
3063 struct ll_fid lsc_fid;
3068 struct llog_rec_tail lsc_tail;
3069 } __attribute__((packed));
3071 #define CHANGELOG_MAGIC 0xca103000
3073 /** \a changelog_rec_type's that can't be masked */
3074 #define CHANGELOG_MINMASK (1 << CL_MARK)
3075 /** bits covering all \a changelog_rec_type's */
3076 #define CHANGELOG_ALLMASK 0XFFFFFFFF
3077 /** default \a changelog_rec_type mask */
3078 #define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE)
3080 /* changelog llog name, needed by client replicators */
3081 #define CHANGELOG_CATALOG "changelog_catalog"
3083 struct changelog_setinfo {
3086 } __attribute__((packed));
3088 /** changelog record */
3089 struct llog_changelog_rec {
3090 struct llog_rec_hdr cr_hdr;
3091 struct changelog_rec cr;
3092 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
3093 } __attribute__((packed));
3095 struct llog_changelog_ext_rec {
3096 struct llog_rec_hdr cr_hdr;
3097 struct changelog_ext_rec cr;
3098 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
3099 } __attribute__((packed));
3101 #define CHANGELOG_USER_PREFIX "cl"
3103 struct llog_changelog_user_rec {
3104 struct llog_rec_hdr cur_hdr;
3108 struct llog_rec_tail cur_tail;
3109 } __attribute__((packed));
3111 enum agent_req_status {
3119 static inline char *agent_req_status2name(enum agent_req_status ars)
3137 static inline bool agent_req_in_final_state(enum agent_req_status ars)
3139 return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
3140 (ars == ARS_CANCELED));
3143 struct llog_agent_req_rec {
3144 struct llog_rec_hdr arr_hdr; /**< record header */
3145 __u32 arr_status; /**< status of the request */
3147 * agent_req_status */
3148 __u32 arr_archive_id; /**< backend archive number */
3149 __u64 arr_flags; /**< req flags */
3150 __u64 arr_compound_id; /**< compound cookie */
3151 __u64 arr_req_create; /**< req. creation time */
3152 __u64 arr_req_change; /**< req. status change time */
3153 struct hsm_action_item arr_hai; /**< req. to the agent */
3154 struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
3155 } __attribute__((packed));
3157 /* Old llog gen for compatibility */
3161 } __attribute__((packed));
3163 struct llog_gen_rec {
3164 struct llog_rec_hdr lgr_hdr;
3165 struct llog_gen lgr_gen;
3169 struct llog_rec_tail lgr_tail;
3172 /* On-disk header structure of each log object, stored in little endian order */
3173 #define LLOG_CHUNK_SIZE 8192
3174 #define LLOG_HEADER_SIZE (96)
3175 #define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3177 #define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3179 /* flags for the logs */
3181 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
3182 LLOG_F_IS_CAT = 0x2,
3183 LLOG_F_IS_PLAIN = 0x4,
3186 struct llog_log_hdr {
3187 struct llog_rec_hdr llh_hdr;
3188 __s64 llh_timestamp;
3190 __u32 llh_bitmap_offset;
3194 /* for a catalog the first plain slot is next to it */
3195 struct obd_uuid llh_tgtuuid;
3196 __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
3197 __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
3198 struct llog_rec_tail llh_tail;
3199 } __attribute__((packed));
3201 #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3202 llh->llh_bitmap_offset - \
3203 sizeof(llh->llh_tail)) * 8)
3205 /** log cookies are used to reference a specific log file and a record therein */
3206 struct llog_cookie {
3207 struct llog_logid lgc_lgl;
3211 } __attribute__((packed));
3213 /** llog protocol */
3214 enum llogd_rpc_ops {
3215 LLOG_ORIGIN_HANDLE_CREATE = 501,
3216 LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
3217 LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
3218 LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
3219 LLOG_ORIGIN_HANDLE_CLOSE = 505,
3220 LLOG_ORIGIN_CONNECT = 506,
3221 LLOG_CATINFO = 507, /* deprecated */
3222 LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
3223 LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
3225 LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
3229 struct llog_logid lgd_logid;
3231 __u32 lgd_llh_flags;
3233 __u32 lgd_saved_index;
3235 __u64 lgd_cur_offset;
3236 } __attribute__((packed));
3238 struct llogd_conn_body {
3239 struct llog_gen lgdc_gen;
3240 struct llog_logid lgdc_logid;
3241 __u32 lgdc_ctxt_idx;
3242 } __attribute__((packed));
3244 /* Note: 64-bit types are 64-bit aligned in structure */
3246 __u64 o_valid; /* hot fields in this obdo */
3249 __u64 o_size; /* o_size-o_blocks == ost_lvb */
3253 __u64 o_blocks; /* brw: cli sent cached bytes */
3256 /* 32-bit fields start here: keep an even number of them via padding */
3257 __u32 o_blksize; /* optimal IO blocksize */
3258 __u32 o_mode; /* brw: cli sent cache remain */
3262 __u32 o_nlink; /* brw: checksum */
3264 __u32 o_misc; /* brw: o_dropped */
3266 __u64 o_ioepoch; /* epoch in ost writes */
3267 __u32 o_stripe_idx; /* holds stripe idx */
3269 struct lustre_handle o_handle; /* brw: lock handle to prolong
3271 struct llog_cookie o_lcookie; /* destroy: unlink cookie from
3276 __u64 o_data_version; /* getattr: sum of iversion for
3278 * brw: grant space consumed on
3279 * the client for the write */
3285 #define o_dirty o_blocks
3286 #define o_undirty o_mode
3287 #define o_dropped o_misc
3288 #define o_cksum o_nlink
3289 #define o_grant_used o_data_version
3291 static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
3293 const struct obdo *lobdo)
3296 wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3300 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3301 fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3302 /* Currently OBD_FL_OSTID will only be used when 2.4 echo
3303 * client communicate with pre-2.4 server */
3304 wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3305 wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3309 static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
3311 const struct obdo *wobdo)
3313 __u32 local_flags = 0;
3315 if (lobdo->o_valid & OBD_MD_FLFLAGS)
3316 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3319 if (local_flags != 0) {
3320 lobdo->o_valid |= OBD_MD_FLFLAGS;
3321 lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3322 lobdo->o_flags |= local_flags;
3327 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3328 fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3330 lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3331 lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3332 lobdo->o_oi.oi_fid.f_ver = 0;
3336 extern void lustre_swab_obdo (struct obdo *o);
3338 /* request structure for OST's */
3343 /* Key for FIEMAP to be used in get_info calls */
3344 struct ll_fiemap_info_key {
3347 struct ll_user_fiemap fiemap;
3350 extern void lustre_swab_ost_body (struct ost_body *b);
3351 extern void lustre_swab_ost_last_id(__u64 *id);
3352 extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3354 extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3355 extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3356 extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3358 extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3361 extern void lustre_swab_llogd_body (struct llogd_body *d);
3362 extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
3363 extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
3364 extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3365 extern void lustre_swab_llog_id(struct llog_logid *lid);
3368 extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3370 /* Functions for dumping PTLRPC fields */
3371 void dump_rniobuf(struct niobuf_remote *rnb);
3372 void dump_ioo(struct obd_ioobj *nb);
3373 void dump_obdo(struct obdo *oa);
3374 void dump_ost_body(struct ost_body *ob);
3375 void dump_rcs(__u32 *rc);
3377 #define IDX_INFO_MAGIC 0x3D37CC37
3379 /* Index file transfer through the network. The server serializes the index into
3380 * a byte stream which is sent to the client via a bulk transfer */
3384 /* reply: see idx_info_flags below */
3387 /* request & reply: number of lu_idxpage (to be) transferred */
3391 /* request: requested attributes passed down to the iterator API */
3394 /* request & reply: index file identifier (FID) */
3395 struct lu_fid ii_fid;
3397 /* reply: version of the index file before starting to walk the index.
3398 * Please note that the version can be modified at any time during the
3402 /* request: hash to start with:
3403 * reply: hash of the first entry of the first lu_idxpage and hash
3404 * of the entry to read next if any */
3405 __u64 ii_hash_start;
3408 /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
3412 /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
3420 extern void lustre_swab_idx_info(struct idx_info *ii);
3422 #define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
3424 /* List of flags used in idx_info::ii_flags */
3425 enum idx_info_flags {
3426 II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
3427 II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
3428 II_FL_VARREC = 1 << 2, /* records can be of variable size */
3429 II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
3432 #define LIP_MAGIC 0x8A6D6B6C
3434 /* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
3436 /* 16-byte header */
3439 __u16 lip_nr; /* number of entries in the container */
3440 __u64 lip_pad0; /* additional padding for future use */
3442 /* key/record pairs are stored in the remaining 4080 bytes.
3443 * depending upon the flags in idx_info::ii_flags, each key/record
3444 * pair might be preceded by:
3446 * - the key size (II_FL_VARKEY is set)
3447 * - the record size (II_FL_VARREC is set)
3449 * For the time being, we only support fixed-size key & record. */
3450 char lip_entries[0];
3452 extern void lustre_swab_lip_header(struct lu_idxpage *lip);
3454 #define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
3456 /* Gather all possible type associated with a 4KB container */
3458 struct lu_dirpage lp_dir; /* for MDS_READPAGE */
3459 struct lu_idxpage lp_idx; /* for OBD_IDX_READ */
3460 char lp_array[LU_PAGE_SIZE];
3463 /* security opcodes */
3466 SEC_CTX_INIT_CONT = 802,
3469 SEC_FIRST_OPC = SEC_CTX_INIT
3473 * capa related definitions
3475 #define CAPA_HMAC_MAX_LEN 64
3476 #define CAPA_HMAC_KEY_MAX_LEN 56
3478 /* NB take care when changing the sequence of elements this struct,
3479 * because the offset info is used in find_capa() */
3480 struct lustre_capa {
3481 struct lu_fid lc_fid; /** fid */
3482 __u64 lc_opc; /** operations allowed */
3483 __u64 lc_uid; /** file owner */
3484 __u64 lc_gid; /** file group */
3485 __u32 lc_flags; /** HMAC algorithm & flags */
3486 __u32 lc_keyid; /** key# used for the capability */
3487 __u32 lc_timeout; /** capa timeout value (sec) */
3488 __u32 lc_expiry; /** expiry time (sec) */
3489 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
3490 } __attribute__((packed));
3492 extern void lustre_swab_lustre_capa(struct lustre_capa *c);
3494 /** lustre_capa::lc_opc */
3496 CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
3497 CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
3498 CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
3499 CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
3500 CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
3501 CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
3502 CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
3503 CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
3504 CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
3505 CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
3506 CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
3509 #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3510 #define CAPA_OPC_MDS_ONLY \
3511 (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3512 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3513 #define CAPA_OPC_OSS_ONLY \
3514 (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
3515 CAPA_OPC_OSS_DESTROY)
3516 #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3517 #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3519 /* MDS capability covers object capability for operations of body r/w
3520 * (dir readpage/sendpage), index lookup/insert/delete and meta data r/w,
3521 * while OSS capability only covers object capability for operations of
3522 * oss data(file content) r/w/truncate.
3524 static inline int capa_for_mds(struct lustre_capa *c)
3526 return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) != 0;
3529 static inline int capa_for_oss(struct lustre_capa *c)
3531 return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0;
3534 /* lustre_capa::lc_hmac_alg */
3536 CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */
3540 #define CAPA_FL_MASK 0x00ffffff
3541 #define CAPA_HMAC_ALG_MASK 0xff000000
3543 struct lustre_capa_key {
3544 __u64 lk_seq; /**< mds# */
3545 __u32 lk_keyid; /**< key# */
3547 __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
3548 } __attribute__((packed));
3550 extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
3552 /** The link ea holds 1 \a link_ea_entry for each hardlink */
3553 #define LINK_EA_MAGIC 0x11EAF1DFUL
3554 struct link_ea_header {
3557 __u64 leh_len; /* total size */
3563 /** Hardlink data is name and parent fid.
3564 * Stored in this crazy struct for maximum packing and endian-neutrality
3566 struct link_ea_entry {
3567 /** __u16 stored big-endian, unaligned */
3568 unsigned char lee_reclen[2];
3569 unsigned char lee_parent_fid[sizeof(struct lu_fid)];
3571 }__attribute__((packed));
3573 /** fid2path request/reply structure */
3574 struct getinfo_fid2path {
3575 struct lu_fid gf_fid;
3580 } __attribute__((packed));
3582 void lustre_swab_fid2path (struct getinfo_fid2path *gf);
3585 LAYOUT_INTENT_ACCESS = 0,
3586 LAYOUT_INTENT_READ = 1,
3587 LAYOUT_INTENT_WRITE = 2,
3588 LAYOUT_INTENT_GLIMPSE = 3,
3589 LAYOUT_INTENT_TRUNC = 4,
3590 LAYOUT_INTENT_RELEASE = 5,
3591 LAYOUT_INTENT_RESTORE = 6
3594 /* enqueue layout lock with intent */
3595 struct layout_intent {
3596 __u32 li_opc; /* intent operation for enqueue, read, write etc */
3602 void lustre_swab_layout_intent(struct layout_intent *li);
3605 * On the wire version of hsm_progress structure.
3607 * Contains the userspace hsm_progress and some internal fields.
3609 struct hsm_progress_kernel {
3610 /* Field taken from struct hsm_progress */
3613 struct hsm_extent hpk_extent;
3615 __u16 hpk_errval; /* positive val */
3617 /* Additional fields */
3618 __u64 hpk_data_version;
3620 } __attribute__((packed));
3622 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3623 extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3624 extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3625 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3626 extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3627 extern void lustre_swab_hsm_request(struct hsm_request *hr);
3630 * These are object update opcode under UPDATE_OBJ, which is currently
3631 * being used by cross-ref operations between MDT.
3633 * During the cross-ref operation, the Master MDT, which the client send the
3634 * request to, will disassembly the operation into object updates, then OSP
3635 * will send these updates to the remote MDT to be executed.
3637 * Update request format
3638 * magic: UPDATE_BUFFER_MAGIC_V1
3639 * Count: How many updates in the req.
3640 * bufs[0] : following are packets of object.
3642 * type: object_update_op, the op code of update
3643 * fid: The object fid of the update.
3644 * lens/bufs: other parameters of the update.
3646 * type: object_update_op, the op code of update
3647 * fid: The object fid of the update.
3648 * lens/bufs: other parameters of the update.
3650 * update[7]: type: object_update_op, the op code of update
3651 * fid: The object fid of the update.
3652 * lens/bufs: other parameters of the update.
3653 * Current 8 maxim updates per object update request.
3655 *******************************************************************
3656 * update reply format:
3658 * ur_version: UPDATE_REPLY_V1
3659 * ur_count: The count of the reply, which is usually equal
3660 * to the number of updates in the request.
3661 * ur_lens: The reply lengths of each object update.
3663 * replies: 1st update reply [4bytes_ret: other body]
3664 * 2nd update reply [4bytes_ret: other body]
3666 * nth update reply [4bytes_ret: other body]
3668 * For each reply of the update, the format would be
3669 * result(4 bytes):Other stuff
3672 #define UPDATE_MAX_OPS 10
3673 #define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001
3674 #define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1
3675 #define UPDATE_BUF_COUNT 8
3676 enum object_update_op {
3685 OBJ_INDEX_LOOKUP = 9,
3686 OBJ_INDEX_INSERT = 10,
3687 OBJ_INDEX_DELETE = 11,
3694 struct lu_fid u_fid;
3695 __u32 u_lens[UPDATE_BUF_COUNT];
3705 #define UPDATE_REPLY_V1 0x00BD0001
3706 struct update_reply {
3712 void lustre_swab_update_buf(struct update_buf *ub);
3713 void lustre_swab_update_reply_buf(struct update_reply *ur);
3715 /** layout swap request structure
3716 * fid1 and fid2 are in mdt_body
3718 struct mdc_swap_layouts {
3722 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
3725 struct lustre_handle cd_handle;
3726 struct lu_fid cd_fid;
3727 __u64 cd_data_version;
3728 __u64 cd_reserved[8];
3731 void lustre_swab_close_data(struct close_data *data);