1 /* Copyright 2000-2005 The Apache Software Foundation or its licensors, as
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "apr_general.h"
19 #include "apr_errno.h"
21 #include "apr_strings.h"
23 typedef struct rmm_block_t {
29 /* Always at our apr_rmm_off(0):
31 typedef struct rmm_hdr_block_t {
33 apr_rmm_off_t /* rmm_block_t */ firstused;
34 apr_rmm_off_t /* rmm_block_t */ firstfree;
37 #define RMM_HDR_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
38 #define RMM_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
42 rmm_hdr_block_t *base;
47 static apr_rmm_off_t find_block_by_offset(apr_rmm_t *rmm, apr_rmm_off_t next,
48 apr_rmm_off_t find, int includes)
50 apr_rmm_off_t prev = 0;
53 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
60 return includes ? prev : 0;
65 return includes ? prev : 0;
68 static apr_rmm_off_t find_block_of_size(apr_rmm_t *rmm, apr_size_t size)
70 apr_rmm_off_t next = rmm->base->firstfree;
71 apr_rmm_off_t best = 0;
72 apr_rmm_off_t bestsize = 0;
75 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
77 if (blk->size == size)
80 if (blk->size >= size) {
81 /* XXX: sub optimal algorithm
82 * We need the most thorough best-fit logic, since we can
83 * never grow our rmm, we are SOL when we hit the wall.
85 if (!bestsize || (blk->size < bestsize)) {
94 if (bestsize > RMM_BLOCK_SIZE + size) {
95 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + best);
96 struct rmm_block_t *new = (rmm_block_t*)((char*)rmm->base + best + size);
98 new->size = blk->size - size;
99 new->next = blk->next;
103 blk->next = best + size;
106 blk = (rmm_block_t*)((char*)rmm->base + new->next);
107 blk->prev = best + size;
114 static void move_block(apr_rmm_t *rmm, apr_rmm_off_t this, int free)
116 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + this);
120 struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
121 prev->next = blk->next;
125 rmm->base->firstused = blk->next;
128 rmm->base->firstfree = blk->next;
132 struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
133 next->prev = blk->prev;
136 /* now find it in the other list, pushing it to the head if required */
138 blk->prev = find_block_by_offset(rmm, rmm->base->firstfree, this, 1);
140 blk->next = rmm->base->firstfree;
141 rmm->base->firstfree = this;
145 blk->prev = find_block_by_offset(rmm, rmm->base->firstused, this, 1);
147 blk->next = rmm->base->firstused;
148 rmm->base->firstused = this;
154 struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
155 if (free && (blk->prev + prev->size == this)) {
156 /* Collapse us into our predecessor */
157 prev->size += blk->size;
162 blk->next = prev->next;
168 struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
169 if (free && (this + blk->size == blk->next)) {
170 /* Collapse us into our successor */
171 blk->size += next->size;
172 blk->next = next->next;
174 next = (rmm_block_t*)((char*)rmm->base + blk->next);
184 APU_DECLARE(apr_status_t) apr_rmm_init(apr_rmm_t **rmm, apr_anylock_t *lock,
185 void *base, apr_size_t size,
190 apr_anylock_t nulllock;
193 nulllock.type = apr_anylock_none;
194 nulllock.lock.pm = NULL;
197 if ((rv = APR_ANYLOCK_LOCK(lock)) != APR_SUCCESS)
200 (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
204 (*rmm)->lock = *lock;
206 (*rmm)->base->abssize = size;
207 (*rmm)->base->firstused = 0;
208 (*rmm)->base->firstfree = RMM_HDR_BLOCK_SIZE;
210 blk = (rmm_block_t *)((char*)base + (*rmm)->base->firstfree);
212 blk->size = size - (*rmm)->base->firstfree;
216 return APR_ANYLOCK_UNLOCK(lock);
219 APU_DECLARE(apr_status_t) apr_rmm_destroy(apr_rmm_t *rmm)
224 if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
227 /* Blast it all --- no going back :) */
228 if (rmm->base->firstused) {
229 apr_rmm_off_t this = rmm->base->firstused;
231 blk = (rmm_block_t *)((char*)rmm->base + this);
233 blk->next = blk->prev = 0;
235 rmm->base->firstused = 0;
237 if (rmm->base->firstfree) {
238 apr_rmm_off_t this = rmm->base->firstfree;
240 blk = (rmm_block_t *)((char*)rmm->base + this);
242 blk->next = blk->prev = 0;
244 rmm->base->firstfree = 0;
246 rmm->base->abssize = 0;
249 return APR_ANYLOCK_UNLOCK(&rmm->lock);
252 APU_DECLARE(apr_status_t) apr_rmm_attach(apr_rmm_t **rmm, apr_anylock_t *lock,
253 void *base, apr_pool_t *p)
255 apr_anylock_t nulllock;
258 nulllock.type = apr_anylock_none;
259 nulllock.lock.pm = NULL;
263 /* sanity would be good here */
264 (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
267 (*rmm)->size = (*rmm)->base->abssize;
268 (*rmm)->lock = *lock;
272 APU_DECLARE(apr_status_t) apr_rmm_detach(apr_rmm_t *rmm)
274 /* A noop until we introduce locked/refcounts */
278 APU_DECLARE(apr_rmm_off_t) apr_rmm_malloc(apr_rmm_t *rmm, apr_size_t reqsize)
283 size = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
284 if (size < reqsize) {
288 APR_ANYLOCK_LOCK(&rmm->lock);
290 this = find_block_of_size(rmm, size);
293 move_block(rmm, this, 0);
294 this += RMM_BLOCK_SIZE;
297 APR_ANYLOCK_UNLOCK(&rmm->lock);
301 APU_DECLARE(apr_rmm_off_t) apr_rmm_calloc(apr_rmm_t *rmm, apr_size_t reqsize)
306 size = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
307 if (size < reqsize) {
311 APR_ANYLOCK_LOCK(&rmm->lock);
313 this = find_block_of_size(rmm, size);
316 move_block(rmm, this, 0);
317 this += RMM_BLOCK_SIZE;
318 memset((char*)rmm->base + this, 0, size - RMM_BLOCK_SIZE);
321 APR_ANYLOCK_UNLOCK(&rmm->lock);
325 APU_DECLARE(apr_rmm_off_t) apr_rmm_realloc(apr_rmm_t *rmm, void *entity,
330 struct rmm_block_t *blk;
331 apr_size_t size, oldsize;
334 return apr_rmm_malloc(rmm, reqsize);
337 size = APR_ALIGN_DEFAULT(reqsize);
338 if (size < reqsize) {
341 old = apr_rmm_offset_get(rmm, entity);
343 if ((this = apr_rmm_malloc(rmm, size)) == 0) {
347 blk = (rmm_block_t*)((char*)rmm->base + old - RMM_BLOCK_SIZE);
350 memcpy(apr_rmm_addr_get(rmm, this),
351 apr_rmm_addr_get(rmm, old), oldsize < size ? oldsize : size);
352 apr_rmm_free(rmm, old);
357 APU_DECLARE(apr_status_t) apr_rmm_free(apr_rmm_t *rmm, apr_rmm_off_t this)
360 struct rmm_block_t *blk;
362 /* A little sanity check is always healthy, especially here.
363 * If we really cared, we could make this compile-time
365 if (this < RMM_HDR_BLOCK_SIZE + RMM_BLOCK_SIZE) {
369 this -= RMM_BLOCK_SIZE;
371 blk = (rmm_block_t*)((char*)rmm->base + this);
373 if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
377 struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
378 if (prev->next != this) {
379 APR_ANYLOCK_UNLOCK(&rmm->lock);
384 if (rmm->base->firstused != this) {
385 APR_ANYLOCK_UNLOCK(&rmm->lock);
391 struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
392 if (next->prev != this) {
393 APR_ANYLOCK_UNLOCK(&rmm->lock);
398 /* Ok, it remained [apparently] sane, so unlink it
400 move_block(rmm, this, 1);
402 return APR_ANYLOCK_UNLOCK(&rmm->lock);
405 APU_DECLARE(void *) apr_rmm_addr_get(apr_rmm_t *rmm, apr_rmm_off_t entity)
407 /* debug-sanity checking here would be good
409 return (void*)((char*)rmm->base + entity);
412 APU_DECLARE(apr_rmm_off_t) apr_rmm_offset_get(apr_rmm_t *rmm, void* entity)
414 /* debug, or always, sanity checking here would be good
415 * since the primitive is apr_rmm_off_t, I don't mind penalizing
416 * inverse conversions for safety, unless someone can prove that
417 * there is no choice in some cases.
419 return ((char*)entity - (char*)rmm->base);
422 APU_DECLARE(apr_size_t) apr_rmm_overhead_get(int n)
424 /* overhead per block is at most APR_ALIGN_DEFAULT(1) wasted bytes
425 * for alignment overhead, plus the size of the rmm_block_t
427 return RMM_HDR_BLOCK_SIZE + n * (RMM_BLOCK_SIZE + APR_ALIGN_DEFAULT(1));