Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / fs / xfs / xfs_trans.c
diff --git a/kernel/fs/xfs/xfs_trans.c b/kernel/fs/xfs/xfs_trans.c
new file mode 100644 (file)
index 0000000..220ef2c
--- /dev/null
@@ -0,0 +1,1105 @@
+/*
+ * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * Copyright (C) 2010 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_extent_busy.h"
+#include "xfs_quota.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
+#include "xfs_trace.h"
+#include "xfs_error.h"
+
+kmem_zone_t    *xfs_trans_zone;
+kmem_zone_t    *xfs_log_item_desc_zone;
+
+/*
+ * Initialize the precomputed transaction reservation values
+ * in the mount structure.
+ */
+void
+xfs_trans_init(
+       struct xfs_mount        *mp)
+{
+       xfs_trans_resv_calc(mp, M_RES(mp));
+}
+
+/*
+ * This routine is called to allocate a transaction structure.
+ * The type parameter indicates the type of the transaction.  These
+ * are enumerated in xfs_trans.h.
+ *
+ * Dynamically allocate the transaction structure from the transaction
+ * zone, initialize it, and return it to the caller.
+ */
+xfs_trans_t *
+xfs_trans_alloc(
+       xfs_mount_t     *mp,
+       uint            type)
+{
+       xfs_trans_t     *tp;
+
+       sb_start_intwrite(mp->m_super);
+       tp = _xfs_trans_alloc(mp, type, KM_SLEEP);
+       tp->t_flags |= XFS_TRANS_FREEZE_PROT;
+       return tp;
+}
+
+xfs_trans_t *
+_xfs_trans_alloc(
+       xfs_mount_t     *mp,
+       uint            type,
+       xfs_km_flags_t  memflags)
+{
+       xfs_trans_t     *tp;
+
+       WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+       atomic_inc(&mp->m_active_trans);
+
+       tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
+       tp->t_magic = XFS_TRANS_HEADER_MAGIC;
+       tp->t_type = type;
+       tp->t_mountp = mp;
+       INIT_LIST_HEAD(&tp->t_items);
+       INIT_LIST_HEAD(&tp->t_busy);
+       return tp;
+}
+
+/*
+ * Free the transaction structure.  If there is more clean up
+ * to do when the structure is freed, add it here.
+ */
+STATIC void
+xfs_trans_free(
+       struct xfs_trans        *tp)
+{
+       xfs_extent_busy_sort(&tp->t_busy);
+       xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
+
+       atomic_dec(&tp->t_mountp->m_active_trans);
+       if (tp->t_flags & XFS_TRANS_FREEZE_PROT)
+               sb_end_intwrite(tp->t_mountp->m_super);
+       xfs_trans_free_dqinfo(tp);
+       kmem_zone_free(xfs_trans_zone, tp);
+}
+
+/*
+ * This is called to create a new transaction which will share the
+ * permanent log reservation of the given transaction.  The remaining
+ * unused block and rt extent reservations are also inherited.  This
+ * implies that the original transaction is no longer allowed to allocate
+ * blocks.  Locks and log items, however, are no inherited.  They must
+ * be added to the new transaction explicitly.
+ */
+xfs_trans_t *
+xfs_trans_dup(
+       xfs_trans_t     *tp)
+{
+       xfs_trans_t     *ntp;
+
+       ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
+
+       /*
+        * Initialize the new transaction structure.
+        */
+       ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
+       ntp->t_type = tp->t_type;
+       ntp->t_mountp = tp->t_mountp;
+       INIT_LIST_HEAD(&ntp->t_items);
+       INIT_LIST_HEAD(&ntp->t_busy);
+
+       ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+       ASSERT(tp->t_ticket != NULL);
+
+       ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
+                      (tp->t_flags & XFS_TRANS_RESERVE) |
+                      (tp->t_flags & XFS_TRANS_FREEZE_PROT);
+       /* We gave our writer reference to the new transaction */
+       tp->t_flags &= ~XFS_TRANS_FREEZE_PROT;
+       ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
+       ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
+       tp->t_blk_res = tp->t_blk_res_used;
+       ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
+       tp->t_rtx_res = tp->t_rtx_res_used;
+       ntp->t_pflags = tp->t_pflags;
+
+       xfs_trans_dup_dqinfo(tp, ntp);
+
+       atomic_inc(&tp->t_mountp->m_active_trans);
+       return ntp;
+}
+
+/*
+ * This is called to reserve free disk blocks and log space for the
+ * given transaction.  This must be done before allocating any resources
+ * within the transaction.
+ *
+ * This will return ENOSPC if there are not enough blocks available.
+ * It will sleep waiting for available log space.
+ * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
+ * is used by long running transactions.  If any one of the reservations
+ * fails then they will all be backed out.
+ *
+ * This does not do quota reservations. That typically is done by the
+ * caller afterwards.
+ */
+int
+xfs_trans_reserve(
+       struct xfs_trans        *tp,
+       struct xfs_trans_res    *resp,
+       uint                    blocks,
+       uint                    rtextents)
+{
+       int             error = 0;
+       bool            rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
+
+       /* Mark this thread as being in a transaction */
+       current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
+
+       /*
+        * Attempt to reserve the needed disk blocks by decrementing
+        * the number needed from the number available.  This will
+        * fail if the count would go below zero.
+        */
+       if (blocks > 0) {
+               error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
+               if (error != 0) {
+                       current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+                       return -ENOSPC;
+               }
+               tp->t_blk_res += blocks;
+       }
+
+       /*
+        * Reserve the log space needed for this transaction.
+        */
+       if (resp->tr_logres > 0) {
+               bool    permanent = false;
+
+               ASSERT(tp->t_log_res == 0 ||
+                      tp->t_log_res == resp->tr_logres);
+               ASSERT(tp->t_log_count == 0 ||
+                      tp->t_log_count == resp->tr_logcount);
+
+               if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
+                       tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
+                       permanent = true;
+               } else {
+                       ASSERT(tp->t_ticket == NULL);
+                       ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
+               }
+
+               if (tp->t_ticket != NULL) {
+                       ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
+                       error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
+               } else {
+                       error = xfs_log_reserve(tp->t_mountp,
+                                               resp->tr_logres,
+                                               resp->tr_logcount,
+                                               &tp->t_ticket, XFS_TRANSACTION,
+                                               permanent, tp->t_type);
+               }
+
+               if (error)
+                       goto undo_blocks;
+
+               tp->t_log_res = resp->tr_logres;
+               tp->t_log_count = resp->tr_logcount;
+       }
+
+       /*
+        * Attempt to reserve the needed realtime extents by decrementing
+        * the number needed from the number available.  This will
+        * fail if the count would go below zero.
+        */
+       if (rtextents > 0) {
+               error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
+               if (error) {
+                       error = -ENOSPC;
+                       goto undo_log;
+               }
+               tp->t_rtx_res += rtextents;
+       }
+
+       return 0;
+
+       /*
+        * Error cases jump to one of these labels to undo any
+        * reservations which have already been performed.
+        */
+undo_log:
+       if (resp->tr_logres > 0) {
+               int             log_flags;
+
+               if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
+                       log_flags = XFS_LOG_REL_PERM_RESERV;
+               } else {
+                       log_flags = 0;
+               }
+               xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags);
+               tp->t_ticket = NULL;
+               tp->t_log_res = 0;
+               tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
+       }
+
+undo_blocks:
+       if (blocks > 0) {
+               xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
+               tp->t_blk_res = 0;
+       }
+
+       current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+
+       return error;
+}
+
+/*
+ * Record the indicated change to the given field for application
+ * to the file system's superblock when the transaction commits.
+ * For now, just store the change in the transaction structure.
+ *
+ * Mark the transaction structure to indicate that the superblock
+ * needs to be updated before committing.
+ *
+ * Because we may not be keeping track of allocated/free inodes and
+ * used filesystem blocks in the superblock, we do not mark the
+ * superblock dirty in this transaction if we modify these fields.
+ * We still need to update the transaction deltas so that they get
+ * applied to the incore superblock, but we don't want them to
+ * cause the superblock to get locked and logged if these are the
+ * only fields in the superblock that the transaction modifies.
+ */
+void
+xfs_trans_mod_sb(
+       xfs_trans_t     *tp,
+       uint            field,
+       int64_t         delta)
+{
+       uint32_t        flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
+       xfs_mount_t     *mp = tp->t_mountp;
+
+       switch (field) {
+       case XFS_TRANS_SB_ICOUNT:
+               tp->t_icount_delta += delta;
+               if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+                       flags &= ~XFS_TRANS_SB_DIRTY;
+               break;
+       case XFS_TRANS_SB_IFREE:
+               tp->t_ifree_delta += delta;
+               if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+                       flags &= ~XFS_TRANS_SB_DIRTY;
+               break;
+       case XFS_TRANS_SB_FDBLOCKS:
+               /*
+                * Track the number of blocks allocated in the
+                * transaction.  Make sure it does not exceed the
+                * number reserved.
+                */
+               if (delta < 0) {
+                       tp->t_blk_res_used += (uint)-delta;
+                       ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
+               }
+               tp->t_fdblocks_delta += delta;
+               if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+                       flags &= ~XFS_TRANS_SB_DIRTY;
+               break;
+       case XFS_TRANS_SB_RES_FDBLOCKS:
+               /*
+                * The allocation has already been applied to the
+                * in-core superblock's counter.  This should only
+                * be applied to the on-disk superblock.
+                */
+               ASSERT(delta < 0);
+               tp->t_res_fdblocks_delta += delta;
+               if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+                       flags &= ~XFS_TRANS_SB_DIRTY;
+               break;
+       case XFS_TRANS_SB_FREXTENTS:
+               /*
+                * Track the number of blocks allocated in the
+                * transaction.  Make sure it does not exceed the
+                * number reserved.
+                */
+               if (delta < 0) {
+                       tp->t_rtx_res_used += (uint)-delta;
+                       ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
+               }
+               tp->t_frextents_delta += delta;
+               break;
+       case XFS_TRANS_SB_RES_FREXTENTS:
+               /*
+                * The allocation has already been applied to the
+                * in-core superblock's counter.  This should only
+                * be applied to the on-disk superblock.
+                */
+               ASSERT(delta < 0);
+               tp->t_res_frextents_delta += delta;
+               break;
+       case XFS_TRANS_SB_DBLOCKS:
+               ASSERT(delta > 0);
+               tp->t_dblocks_delta += delta;
+               break;
+       case XFS_TRANS_SB_AGCOUNT:
+               ASSERT(delta > 0);
+               tp->t_agcount_delta += delta;
+               break;
+       case XFS_TRANS_SB_IMAXPCT:
+               tp->t_imaxpct_delta += delta;
+               break;
+       case XFS_TRANS_SB_REXTSIZE:
+               tp->t_rextsize_delta += delta;
+               break;
+       case XFS_TRANS_SB_RBMBLOCKS:
+               tp->t_rbmblocks_delta += delta;
+               break;
+       case XFS_TRANS_SB_RBLOCKS:
+               tp->t_rblocks_delta += delta;
+               break;
+       case XFS_TRANS_SB_REXTENTS:
+               tp->t_rextents_delta += delta;
+               break;
+       case XFS_TRANS_SB_REXTSLOG:
+               tp->t_rextslog_delta += delta;
+               break;
+       default:
+               ASSERT(0);
+               return;
+       }
+
+       tp->t_flags |= flags;
+}
+
+/*
+ * xfs_trans_apply_sb_deltas() is called from the commit code
+ * to bring the superblock buffer into the current transaction
+ * and modify it as requested by earlier calls to xfs_trans_mod_sb().
+ *
+ * For now we just look at each field allowed to change and change
+ * it if necessary.
+ */
+STATIC void
+xfs_trans_apply_sb_deltas(
+       xfs_trans_t     *tp)
+{
+       xfs_dsb_t       *sbp;
+       xfs_buf_t       *bp;
+       int             whole = 0;
+
+       bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
+       sbp = XFS_BUF_TO_SBP(bp);
+
+       /*
+        * Check that superblock mods match the mods made to AGF counters.
+        */
+       ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
+              (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
+               tp->t_ag_btree_delta));
+
+       /*
+        * Only update the superblock counters if we are logging them
+        */
+       if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
+               if (tp->t_icount_delta)
+                       be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
+               if (tp->t_ifree_delta)
+                       be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
+               if (tp->t_fdblocks_delta)
+                       be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
+               if (tp->t_res_fdblocks_delta)
+                       be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
+       }
+
+       if (tp->t_frextents_delta)
+               be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
+       if (tp->t_res_frextents_delta)
+               be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
+
+       if (tp->t_dblocks_delta) {
+               be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
+               whole = 1;
+       }
+       if (tp->t_agcount_delta) {
+               be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
+               whole = 1;
+       }
+       if (tp->t_imaxpct_delta) {
+               sbp->sb_imax_pct += tp->t_imaxpct_delta;
+               whole = 1;
+       }
+       if (tp->t_rextsize_delta) {
+               be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
+               whole = 1;
+       }
+       if (tp->t_rbmblocks_delta) {
+               be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
+               whole = 1;
+       }
+       if (tp->t_rblocks_delta) {
+               be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
+               whole = 1;
+       }
+       if (tp->t_rextents_delta) {
+               be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
+               whole = 1;
+       }
+       if (tp->t_rextslog_delta) {
+               sbp->sb_rextslog += tp->t_rextslog_delta;
+               whole = 1;
+       }
+
+       xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
+       if (whole)
+               /*
+                * Log the whole thing, the fields are noncontiguous.
+                */
+               xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
+       else
+               /*
+                * Since all the modifiable fields are contiguous, we
+                * can get away with this.
+                */
+               xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
+                                 offsetof(xfs_dsb_t, sb_frextents) +
+                                 sizeof(sbp->sb_frextents) - 1);
+}
+
+STATIC int
+xfs_sb_mod8(
+       uint8_t                 *field,
+       int8_t                  delta)
+{
+       int8_t                  counter = *field;
+
+       counter += delta;
+       if (counter < 0) {
+               ASSERT(0);
+               return -EINVAL;
+       }
+       *field = counter;
+       return 0;
+}
+
+STATIC int
+xfs_sb_mod32(
+       uint32_t                *field,
+       int32_t                 delta)
+{
+       int32_t                 counter = *field;
+
+       counter += delta;
+       if (counter < 0) {
+               ASSERT(0);
+               return -EINVAL;
+       }
+       *field = counter;
+       return 0;
+}
+
+STATIC int
+xfs_sb_mod64(
+       uint64_t                *field,
+       int64_t                 delta)
+{
+       int64_t                 counter = *field;
+
+       counter += delta;
+       if (counter < 0) {
+               ASSERT(0);
+               return -EINVAL;
+       }
+       *field = counter;
+       return 0;
+}
+
+/*
+ * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
+ * and apply superblock counter changes to the in-core superblock.  The
+ * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
+ * applied to the in-core superblock.  The idea is that that has already been
+ * done.
+ *
+ * If we are not logging superblock counters, then the inode allocated/free and
+ * used block counts are not updated in the on disk superblock. In this case,
+ * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
+ * still need to update the incore superblock with the changes.
+ */
+void
+xfs_trans_unreserve_and_mod_sb(
+       struct xfs_trans        *tp)
+{
+       struct xfs_mount        *mp = tp->t_mountp;
+       bool                    rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
+       int64_t                 blkdelta = 0;
+       int64_t                 rtxdelta = 0;
+       int64_t                 idelta = 0;
+       int64_t                 ifreedelta = 0;
+       int                     error;
+
+       /* calculate deltas */
+       if (tp->t_blk_res > 0)
+               blkdelta = tp->t_blk_res;
+       if ((tp->t_fdblocks_delta != 0) &&
+           (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+            (tp->t_flags & XFS_TRANS_SB_DIRTY)))
+               blkdelta += tp->t_fdblocks_delta;
+
+       if (tp->t_rtx_res > 0)
+               rtxdelta = tp->t_rtx_res;
+       if ((tp->t_frextents_delta != 0) &&
+           (tp->t_flags & XFS_TRANS_SB_DIRTY))
+               rtxdelta += tp->t_frextents_delta;
+
+       if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+            (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
+               idelta = tp->t_icount_delta;
+               ifreedelta = tp->t_ifree_delta;
+       }
+
+       /* apply the per-cpu counters */
+       if (blkdelta) {
+               error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
+               if (error)
+                       goto out;
+       }
+
+       if (idelta) {
+               error = xfs_mod_icount(mp, idelta);
+               if (error)
+                       goto out_undo_fdblocks;
+       }
+
+       if (ifreedelta) {
+               error = xfs_mod_ifree(mp, ifreedelta);
+               if (error)
+                       goto out_undo_icount;
+       }
+
+       if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
+               return;
+
+       /* apply remaining deltas */
+       spin_lock(&mp->m_sb_lock);
+       if (rtxdelta) {
+               error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
+               if (error)
+                       goto out_undo_ifree;
+       }
+
+       if (tp->t_dblocks_delta != 0) {
+               error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
+               if (error)
+                       goto out_undo_frextents;
+       }
+       if (tp->t_agcount_delta != 0) {
+               error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
+               if (error)
+                       goto out_undo_dblocks;
+       }
+       if (tp->t_imaxpct_delta != 0) {
+               error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
+               if (error)
+                       goto out_undo_agcount;
+       }
+       if (tp->t_rextsize_delta != 0) {
+               error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
+                                    tp->t_rextsize_delta);
+               if (error)
+                       goto out_undo_imaxpct;
+       }
+       if (tp->t_rbmblocks_delta != 0) {
+               error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
+                                    tp->t_rbmblocks_delta);
+               if (error)
+                       goto out_undo_rextsize;
+       }
+       if (tp->t_rblocks_delta != 0) {
+               error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
+               if (error)
+                       goto out_undo_rbmblocks;
+       }
+       if (tp->t_rextents_delta != 0) {
+               error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
+                                    tp->t_rextents_delta);
+               if (error)
+                       goto out_undo_rblocks;
+       }
+       if (tp->t_rextslog_delta != 0) {
+               error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
+                                    tp->t_rextslog_delta);
+               if (error)
+                       goto out_undo_rextents;
+       }
+       spin_unlock(&mp->m_sb_lock);
+       return;
+
+out_undo_rextents:
+       if (tp->t_rextents_delta)
+               xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
+out_undo_rblocks:
+       if (tp->t_rblocks_delta)
+               xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
+out_undo_rbmblocks:
+       if (tp->t_rbmblocks_delta)
+               xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
+out_undo_rextsize:
+       if (tp->t_rextsize_delta)
+               xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
+out_undo_imaxpct:
+       if (tp->t_rextsize_delta)
+               xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
+out_undo_agcount:
+       if (tp->t_agcount_delta)
+               xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
+out_undo_dblocks:
+       if (tp->t_dblocks_delta)
+               xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
+out_undo_frextents:
+       if (rtxdelta)
+               xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
+out_undo_ifree:
+       spin_unlock(&mp->m_sb_lock);
+       if (ifreedelta)
+               xfs_mod_ifree(mp, -ifreedelta);
+out_undo_icount:
+       if (idelta)
+               xfs_mod_icount(mp, -idelta);
+out_undo_fdblocks:
+       if (blkdelta)
+               xfs_mod_fdblocks(mp, -blkdelta, rsvd);
+out:
+       ASSERT(error == 0);
+       return;
+}
+
+/*
+ * Add the given log item to the transaction's list of log items.
+ *
+ * The log item will now point to its new descriptor with its li_desc field.
+ */
+void
+xfs_trans_add_item(
+       struct xfs_trans        *tp,
+       struct xfs_log_item     *lip)
+{
+       struct xfs_log_item_desc *lidp;
+
+       ASSERT(lip->li_mountp == tp->t_mountp);
+       ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
+
+       lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
+
+       lidp->lid_item = lip;
+       lidp->lid_flags = 0;
+       list_add_tail(&lidp->lid_trans, &tp->t_items);
+
+       lip->li_desc = lidp;
+}
+
+STATIC void
+xfs_trans_free_item_desc(
+       struct xfs_log_item_desc *lidp)
+{
+       list_del_init(&lidp->lid_trans);
+       kmem_zone_free(xfs_log_item_desc_zone, lidp);
+}
+
+/*
+ * Unlink and free the given descriptor.
+ */
+void
+xfs_trans_del_item(
+       struct xfs_log_item     *lip)
+{
+       xfs_trans_free_item_desc(lip->li_desc);
+       lip->li_desc = NULL;
+}
+
+/*
+ * Unlock all of the items of a transaction and free all the descriptors
+ * of that transaction.
+ */
+void
+xfs_trans_free_items(
+       struct xfs_trans        *tp,
+       xfs_lsn_t               commit_lsn,
+       int                     flags)
+{
+       struct xfs_log_item_desc *lidp, *next;
+
+       list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
+               struct xfs_log_item     *lip = lidp->lid_item;
+
+               lip->li_desc = NULL;
+
+               if (commit_lsn != NULLCOMMITLSN)
+                       lip->li_ops->iop_committing(lip, commit_lsn);
+               if (flags & XFS_TRANS_ABORT)
+                       lip->li_flags |= XFS_LI_ABORTED;
+               lip->li_ops->iop_unlock(lip);
+
+               xfs_trans_free_item_desc(lidp);
+       }
+}
+
+static inline void
+xfs_log_item_batch_insert(
+       struct xfs_ail          *ailp,
+       struct xfs_ail_cursor   *cur,
+       struct xfs_log_item     **log_items,
+       int                     nr_items,
+       xfs_lsn_t               commit_lsn)
+{
+       int     i;
+
+       spin_lock(&ailp->xa_lock);
+       /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
+       xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
+
+       for (i = 0; i < nr_items; i++) {
+               struct xfs_log_item *lip = log_items[i];
+
+               lip->li_ops->iop_unpin(lip, 0);
+       }
+}
+
+/*
+ * Bulk operation version of xfs_trans_committed that takes a log vector of
+ * items to insert into the AIL. This uses bulk AIL insertion techniques to
+ * minimise lock traffic.
+ *
+ * If we are called with the aborted flag set, it is because a log write during
+ * a CIL checkpoint commit has failed. In this case, all the items in the
+ * checkpoint have already gone through iop_commited and iop_unlock, which
+ * means that checkpoint commit abort handling is treated exactly the same
+ * as an iclog write error even though we haven't started any IO yet. Hence in
+ * this case all we need to do is iop_committed processing, followed by an
+ * iop_unpin(aborted) call.
+ *
+ * The AIL cursor is used to optimise the insert process. If commit_lsn is not
+ * at the end of the AIL, the insert cursor avoids the need to walk
+ * the AIL to find the insertion point on every xfs_log_item_batch_insert()
+ * call. This saves a lot of needless list walking and is a net win, even
+ * though it slightly increases that amount of AIL lock traffic to set it up
+ * and tear it down.
+ */
+void
+xfs_trans_committed_bulk(
+       struct xfs_ail          *ailp,
+       struct xfs_log_vec      *log_vector,
+       xfs_lsn_t               commit_lsn,
+       int                     aborted)
+{
+#define LOG_ITEM_BATCH_SIZE    32
+       struct xfs_log_item     *log_items[LOG_ITEM_BATCH_SIZE];
+       struct xfs_log_vec      *lv;
+       struct xfs_ail_cursor   cur;
+       int                     i = 0;
+
+       spin_lock(&ailp->xa_lock);
+       xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
+       spin_unlock(&ailp->xa_lock);
+
+       /* unpin all the log items */
+       for (lv = log_vector; lv; lv = lv->lv_next ) {
+               struct xfs_log_item     *lip = lv->lv_item;
+               xfs_lsn_t               item_lsn;
+
+               if (aborted)
+                       lip->li_flags |= XFS_LI_ABORTED;
+               item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
+
+               /* item_lsn of -1 means the item needs no further processing */
+               if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
+                       continue;
+
+               /*
+                * if we are aborting the operation, no point in inserting the
+                * object into the AIL as we are in a shutdown situation.
+                */
+               if (aborted) {
+                       ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
+                       lip->li_ops->iop_unpin(lip, 1);
+                       continue;
+               }
+
+               if (item_lsn != commit_lsn) {
+
+                       /*
+                        * Not a bulk update option due to unusual item_lsn.
+                        * Push into AIL immediately, rechecking the lsn once
+                        * we have the ail lock. Then unpin the item. This does
+                        * not affect the AIL cursor the bulk insert path is
+                        * using.
+                        */
+                       spin_lock(&ailp->xa_lock);
+                       if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
+                               xfs_trans_ail_update(ailp, lip, item_lsn);
+                       else
+                               spin_unlock(&ailp->xa_lock);
+                       lip->li_ops->iop_unpin(lip, 0);
+                       continue;
+               }
+
+               /* Item is a candidate for bulk AIL insert.  */
+               log_items[i++] = lv->lv_item;
+               if (i >= LOG_ITEM_BATCH_SIZE) {
+                       xfs_log_item_batch_insert(ailp, &cur, log_items,
+                                       LOG_ITEM_BATCH_SIZE, commit_lsn);
+                       i = 0;
+               }
+       }
+
+       /* make sure we insert the remainder! */
+       if (i)
+               xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
+
+       spin_lock(&ailp->xa_lock);
+       xfs_trans_ail_cursor_done(&cur);
+       spin_unlock(&ailp->xa_lock);
+}
+
+/*
+ * Commit the given transaction to the log.
+ *
+ * XFS disk error handling mechanism is not based on a typical
+ * transaction abort mechanism. Logically after the filesystem
+ * gets marked 'SHUTDOWN', we can't let any new transactions
+ * be durable - ie. committed to disk - because some metadata might
+ * be inconsistent. In such cases, this returns an error, and the
+ * caller may assume that all locked objects joined to the transaction
+ * have already been unlocked as if the commit had succeeded.
+ * Do not reference the transaction structure after this call.
+ */
+int
+xfs_trans_commit(
+       struct xfs_trans        *tp,
+       uint                    flags)
+{
+       struct xfs_mount        *mp = tp->t_mountp;
+       xfs_lsn_t               commit_lsn = -1;
+       int                     error = 0;
+       int                     log_flags = 0;
+       int                     sync = tp->t_flags & XFS_TRANS_SYNC;
+
+       /*
+        * Determine whether this commit is releasing a permanent
+        * log reservation or not.
+        */
+       if (flags & XFS_TRANS_RELEASE_LOG_RES) {
+               ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+               log_flags = XFS_LOG_REL_PERM_RESERV;
+       }
+
+       /*
+        * If there is nothing to be logged by the transaction,
+        * then unlock all of the items associated with the
+        * transaction and free the transaction structure.
+        * Also make sure to return any reserved blocks to
+        * the free pool.
+        */
+       if (!(tp->t_flags & XFS_TRANS_DIRTY))
+               goto out_unreserve;
+
+       if (XFS_FORCED_SHUTDOWN(mp)) {
+               error = -EIO;
+               goto out_unreserve;
+       }
+
+       ASSERT(tp->t_ticket != NULL);
+
+       /*
+        * If we need to update the superblock, then do it now.
+        */
+       if (tp->t_flags & XFS_TRANS_SB_DIRTY)
+               xfs_trans_apply_sb_deltas(tp);
+       xfs_trans_apply_dquot_deltas(tp);
+
+       xfs_log_commit_cil(mp, tp, &commit_lsn, flags);
+
+       current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+       xfs_trans_free(tp);
+
+       /*
+        * If the transaction needs to be synchronous, then force the
+        * log out now and wait for it.
+        */
+       if (sync) {
+               error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
+               XFS_STATS_INC(xs_trans_sync);
+       } else {
+               XFS_STATS_INC(xs_trans_async);
+       }
+
+       return error;
+
+out_unreserve:
+       xfs_trans_unreserve_and_mod_sb(tp);
+
+       /*
+        * It is indeed possible for the transaction to be not dirty but
+        * the dqinfo portion to be.  All that means is that we have some
+        * (non-persistent) quota reservations that need to be unreserved.
+        */
+       xfs_trans_unreserve_and_mod_dquots(tp);
+       if (tp->t_ticket) {
+               commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
+               if (commit_lsn == -1 && !error)
+                       error = -EIO;
+       }
+       current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+       xfs_trans_free_items(tp, NULLCOMMITLSN, error ? XFS_TRANS_ABORT : 0);
+       xfs_trans_free(tp);
+
+       XFS_STATS_INC(xs_trans_empty);
+       return error;
+}
+
+/*
+ * Unlock all of the transaction's items and free the transaction.
+ * The transaction must not have modified any of its items, because
+ * there is no way to restore them to their previous state.
+ *
+ * If the transaction has made a log reservation, make sure to release
+ * it as well.
+ */
+void
+xfs_trans_cancel(
+       xfs_trans_t             *tp,
+       int                     flags)
+{
+       int                     log_flags;
+       xfs_mount_t             *mp = tp->t_mountp;
+
+       /*
+        * See if the caller is being too lazy to figure out if
+        * the transaction really needs an abort.
+        */
+       if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY))
+               flags &= ~XFS_TRANS_ABORT;
+       /*
+        * See if the caller is relying on us to shut down the
+        * filesystem.  This happens in paths where we detect
+        * corruption and decide to give up.
+        */
+       if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) {
+               XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
+               xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+       }
+#ifdef DEBUG
+       if (!(flags & XFS_TRANS_ABORT) && !XFS_FORCED_SHUTDOWN(mp)) {
+               struct xfs_log_item_desc *lidp;
+
+               list_for_each_entry(lidp, &tp->t_items, lid_trans)
+                       ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
+       }
+#endif
+       xfs_trans_unreserve_and_mod_sb(tp);
+       xfs_trans_unreserve_and_mod_dquots(tp);
+
+       if (tp->t_ticket) {
+               if (flags & XFS_TRANS_RELEASE_LOG_RES) {
+                       ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+                       log_flags = XFS_LOG_REL_PERM_RESERV;
+               } else {
+                       log_flags = 0;
+               }
+               xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
+       }
+
+       /* mark this thread as no longer being in a transaction */
+       current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+
+       xfs_trans_free_items(tp, NULLCOMMITLSN, flags);
+       xfs_trans_free(tp);
+}
+
+/*
+ * Roll from one trans in the sequence of PERMANENT transactions to
+ * the next: permanent transactions are only flushed out when
+ * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
+ * as possible to let chunks of it go to the log. So we commit the
+ * chunk we've been working on and get a new transaction to continue.
+ */
+int
+xfs_trans_roll(
+       struct xfs_trans        **tpp,
+       struct xfs_inode        *dp)
+{
+       struct xfs_trans        *trans;
+       struct xfs_trans_res    tres;
+       int                     error;
+
+       /*
+        * Ensure that the inode is always logged.
+        */
+       trans = *tpp;
+       xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
+
+       /*
+        * Copy the critical parameters from one trans to the next.
+        */
+       tres.tr_logres = trans->t_log_res;
+       tres.tr_logcount = trans->t_log_count;
+       *tpp = xfs_trans_dup(trans);
+
+       /*
+        * Commit the current transaction.
+        * If this commit failed, then it'd just unlock those items that
+        * are not marked ihold. That also means that a filesystem shutdown
+        * is in progress. The caller takes the responsibility to cancel
+        * the duplicate transaction that gets returned.
+        */
+       error = xfs_trans_commit(trans, 0);
+       if (error)
+               return error;
+
+       trans = *tpp;
+
+       /*
+        * transaction commit worked ok so we can drop the extra ticket
+        * reference that we gained in xfs_trans_dup()
+        */
+       xfs_log_ticket_put(trans->t_ticket);
+
+
+       /*
+        * Reserve space in the log for th next transaction.
+        * This also pushes items in the "AIL", the list of logged items,
+        * out to disk if they are taking up space at the tail of the log
+        * that we want to use.  This requires that either nothing be locked
+        * across this call, or that anything that is locked be logged in
+        * the prior and the next transactions.
+        */
+       tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+       error = xfs_trans_reserve(trans, &tres, 0, 0);
+       /*
+        *  Ensure that the inode is in the new transaction and locked.
+        */
+       if (error)
+               return error;
+
+       xfs_trans_ijoin(trans, dp, 0);
+       return 0;
+}