1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2014 CohortFS, LLC
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
27 #include "common/likely.h"
29 static inline int xpool_alloc(struct xio_mempool *pool, uint64_t size,
30 struct xio_reg_mem* mp);
31 static inline void xpool_free(uint64_t size, struct xio_reg_mem* mp);
36 struct xio_mempool *handle;
39 static bool trace_mempool;
40 static bool trace_msgcnt;
41 static const int MB = 8;
44 struct xio_reg_mem mp[1];
45 struct xio_piece *next;
50 explicit XioPool(struct xio_mempool *_handle) :
51 handle(_handle), first(0)
59 if (unlikely(trace_mempool)) {
60 memset(p->payload, 0xcf, p->s); // guard bytes
62 xpool_free(sizeof(struct xio_piece)+(p->s)-MB, p->mp);
65 void *alloc(size_t _s)
68 struct xio_reg_mem mp[1];
70 int e = xpool_alloc(handle, (sizeof(struct xio_piece)-MB) + _s, mp);
74 x = reinterpret_cast<struct xio_piece *>(mp->addr);
97 std::atomic<unsigned> ctr_set[NUM_SLABS] = {};
98 std::atomic<unsigned> msg_cnt = { 0 }; // send msgs
99 std::atomic<unsigned> hook_cnt = { 0 }; // recv msgs
102 void dump(const char* tag, uint64_t serial);
104 void inc(uint64_t size) {
106 (ctr_set[SLAB_64])++;
110 (ctr_set[SLAB_256])++;
114 (ctr_set[SLAB_1024])++;
118 (ctr_set[SLAB_PAGE])++;
121 (ctr_set[SLAB_MAX])++;
124 void dec(uint64_t size) {
126 (ctr_set[SLAB_64])--;
130 (ctr_set[SLAB_256])--;
134 (ctr_set[SLAB_1024])--;
138 (ctr_set[SLAB_PAGE])--;
141 (ctr_set[SLAB_MAX])--;
144 void inc_overflow() { ctr_set[SLAB_OVERFLOW]++; }
145 void dec_overflow() { ctr_set[SLAB_OVERFLOW]--; }
148 if (unlikely(XioPool::trace_msgcnt)) {
154 if (unlikely(XioPool::trace_msgcnt)) {
160 if (unlikely(XioPool::trace_msgcnt)) {
166 if (unlikely(XioPool::trace_msgcnt)) {
172 extern XioPoolStats xp_stats;
174 static inline int xpool_alloc(struct xio_mempool *pool, uint64_t size,
175 struct xio_reg_mem* mp)
177 // try to allocate from the xio pool
178 int r = xio_mempool_alloc(pool, size, mp);
180 if (unlikely(XioPool::trace_mempool))
184 // fall back to malloc on errors
185 mp->addr = malloc(size);
188 if (unlikely(XioPool::trace_mempool))
189 xp_stats.inc_overflow();
193 static inline void xpool_free(uint64_t size, struct xio_reg_mem* mp)
196 if (unlikely(XioPool::trace_mempool))
198 xio_mempool_free(mp);
199 } else { // from malloc
200 if (unlikely(XioPool::trace_mempool))
201 xp_stats.dec_overflow();
206 #define xpool_inc_msgcnt() \
207 do { xp_stats.inc_msgcnt(); } while (0)
209 #define xpool_dec_msgcnt() \
210 do { xp_stats.dec_msgcnt(); } while (0)
212 #define xpool_inc_hookcnt() \
213 do { xp_stats.inc_hookcnt(); } while (0)
215 #define xpool_dec_hookcnt() \
216 do { xp_stats.dec_hookcnt(); } while (0)
218 #endif /* XIO_POOL_H */