1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * _ __ ___ ___ __| | ___ ___| | mod_ssl
19 * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
20 * | | | | | | (_) | (_| | \__ \__ \ |
21 * |_| |_| |_|\___/ \__,_|___|___/___/_|
24 * Session Cache via Shared Memory (Cyclic Buffer Variant)
30 * This shared memory based SSL session cache implementation was
31 * originally written by Geoff Thorpe <geoff@geoffthorpe.net> for C2Net
32 * Europe as a contribution to Ralf Engelschall's mod_ssl project.
36 * The shared-memory segment header can be cast to and from the
37 * SHMCBHeader type, all other structures need to be initialised by
40 * The "header" looks like this;
42 * data applying to the overall structure:
43 * - division_offset (unsigned int):
44 * how far into the shared memory segment the first division is.
45 * - division_size (unsigned int):
46 * how many bytes each division occupies.
47 * (NB: This includes the queue and the cache)
48 * - division_mask (unsigned char):
49 * the "mask" in the next line. Add one to this,
50 * and that's the number of divisions.
52 * data applying to within each division:
53 * - queue_size (unsigned int):
54 * how big each "queue" is. NB: The queue is the first block in each
55 * division and is followed immediately by the cache itself so so
56 * there's no cache_offset value.
58 * data applying to within each queue:
59 * - index_num (unsigned char):
60 * how many indexes in each cache's queue
61 * - index_offset (unsigned char):
62 * how far into the queue the first index is.
64 * how big each index is.
66 * data applying to within each cache:
67 * - cache_data_offset (unsigned int):
68 * how far into the cache the session-data array is stored.
69 * - cache_data_size (unsigned int):
70 * how big each cache's data block is.
72 * statistics data (this will eventually be per-division but right now
73 * there's only one mutex):
74 * - stores (unsigned long):
75 * how many stores have been performed in the cache.
76 * - expiries (unsigned long):
77 * how many session have been expired from the cache.
78 * - scrolled (unsigned long):
79 * how many sessions have been scrolled out of full cache during a
80 * "store" operation. This is different to the "removes" stats as
81 * they are requested by mod_ssl/Apache, these are done because of
82 * cache logistics. (NB: Also, this value should be deducible from
83 * the others if my code has no bugs, but I count it anyway - plus
84 * it helps debugging :-).
85 * - retrieves_hit (unsigned long):
86 * how many session-retrieves have succeeded.
87 * - retrieves_miss (unsigned long):
88 * how many session-retrieves have failed.
89 * - removes_hit (unsigned long):
90 * - removes_miss (unsigned long):
92 * Following immediately after the header is an array of "divisions".
93 * Each division is simply a "queue" immediately followed by its
94 * corresponding "cache". Each division handles some pre-defined band
95 * of sessions by using the "division_mask" in the header. Eg. if
96 * division_mask=0x1f then there are 32 divisions, the first of which
97 * will store sessions whose least-significant 5 bits are 0, the second
98 * stores session whose LS 5 bits equal 1, etc. A queue is an indexing
99 * structure referring to its corresponding cache.
101 * A "queue" looks like this;
103 * - first_pos (unsigned int):
104 * the location within the array of indexes where the virtual
105 * "left-hand-edge" of the cyclic buffer is.
106 * - pos_count (unsigned int):
107 * the number of indexes occupied from first_pos onwards.
109 * ...followed by an array of indexes, each of which can be
110 * memcpy'd to and from an SHMCBIndex, and look like this;
112 * - expires (time_t):
113 * the time() value at which this session expires.
114 * - offset (unsigned int):
115 * the offset within the cache data block where the corresponding
117 * - s_id2 (unsigned char):
118 * the second byte of the session_id, stored as an optimisation to
119 * reduce the number of d2i_SSL_SESSION calls that are made when doing
121 * - removed (unsigned char):
122 * a byte used to indicate whether a session has been "passively"
123 * removed. Ie. it is still in the cache but is to be disregarded by
124 * any "retrieve" operation.
126 * A "cache" looks like this;
128 * - first_pos (unsigned int):
129 * the location within the data block where the virtual
130 * "left-hand-edge" of the cyclic buffer is.
131 * - pos_count (unsigned int):
132 * the number of bytes used in the data block from first_pos onwards.
134 * ...followed by the data block in which actual DER-encoded SSL
135 * sessions are stored.
139 * Header - can be memcpy'd to and from the front of the shared
140 * memory segment. NB: The first copy (commented out) has the
141 * elements in a meaningful order, but due to data-alignment
142 * braindeadness, the second (uncommented) copy has the types grouped
143 * so as to decrease "struct-bloat". sigh.
146 unsigned long num_stores;
147 unsigned long num_expiries;
148 unsigned long num_scrolled;
149 unsigned long num_retrieves_hit;
150 unsigned long num_retrieves_miss;
151 unsigned long num_removes_hit;
152 unsigned long num_removes_miss;
153 unsigned int division_offset;
154 unsigned int division_size;
155 unsigned int queue_size;
156 unsigned int cache_data_offset;
157 unsigned int cache_data_size;
158 unsigned char division_mask;
159 unsigned int index_num;
160 unsigned int index_offset;
161 unsigned int index_size;
165 * Index - can be memcpy'd to and from an index inside each
166 * queue's index array.
172 unsigned char removed;
176 * Queue - must be populated by a call to shmcb_get_division
177 * and the structure's pointers are used for updating (ie.
178 * the structure doesn't need any "set" to update values).
182 unsigned int *first_pos;
183 unsigned int *pos_count;
188 * Cache - same comment as for Queue. 'Queue's are in a 1-1
189 * correspondance with 'Cache's and are usually carried round
190 * in a pair, they are only seperated for clarity.
194 unsigned int *first_pos;
195 unsigned int *pos_count;
200 * Forward function prototypes.
203 /* Functions for working around data-alignment-picky systems (sparcs,
204 Irix, etc). These use "memcpy" as a way of foxing these systems into
205 treating the composite types as byte-arrays rather than higher-level
206 primitives that it prefers to have 4-(or 8-)byte aligned. I don't
207 envisage this being a performance issue as a couple of 2 or 4 byte
208 memcpys can hardly make a dent on the massive memmove operations this
209 cache technique avoids, nor the overheads of ASN en/decoding. */
210 static unsigned int shmcb_get_safe_uint(unsigned int *);
211 static void shmcb_set_safe_uint_ex(unsigned char *, const unsigned char *);
212 #define shmcb_set_safe_uint(pdest, src) \
214 unsigned int tmp_uint = src; \
215 shmcb_set_safe_uint_ex((unsigned char *)pdest, \
216 (const unsigned char *)(&tmp_uint)); \
218 #if 0 /* Unused so far */
219 static unsigned long shmcb_get_safe_ulong(unsigned long *);
220 static void shmcb_set_safe_ulong_ex(unsigned char *, const unsigned char *);
221 #define shmcb_set_safe_ulong(pdest, src) \
223 unsigned long tmp_ulong = src; \
224 shmcb_set_safe_ulong_ex((unsigned char *)pdest, \
225 (const unsigned char *)(&tmp_ulong)); \
228 static time_t shmcb_get_safe_time(time_t *);
229 static void shmcb_set_safe_time_ex(unsigned char *, const unsigned char *);
230 #define shmcb_set_safe_time(pdest, src) \
232 time_t tmp_time = src; \
233 shmcb_set_safe_time_ex((unsigned char *)pdest, \
234 (const unsigned char *)(&tmp_time)); \
237 /* This is necessary simply so that the size passed to memset() is not a
238 * compile-time constant, preventing the compiler from optimising it. */
239 static void shmcb_safe_clear(void *ptr, size_t size)
241 memset(ptr, 0, size);
244 /* Underlying functions for session-caching */
245 static BOOL shmcb_init_memory(server_rec *, void *, unsigned int);
246 static BOOL shmcb_store_session(server_rec *, void *, UCHAR *, int, SSL_SESSION *, time_t);
247 static SSL_SESSION *shmcb_retrieve_session(server_rec *, void *, UCHAR *, int);
248 static BOOL shmcb_remove_session(server_rec *, void *, UCHAR *, int);
250 /* Utility functions for manipulating the structures */
251 static void shmcb_get_header(void *, SHMCBHeader **);
252 static BOOL shmcb_get_division(SHMCBHeader *, SHMCBQueue *, SHMCBCache *, unsigned int);
253 static SHMCBIndex *shmcb_get_index(const SHMCBQueue *, unsigned int);
254 static unsigned int shmcb_expire_division(server_rec *, SHMCBQueue *, SHMCBCache *);
255 static BOOL shmcb_insert_encoded_session(server_rec *, SHMCBQueue *, SHMCBCache *, unsigned char *, unsigned int, unsigned char *, time_t);
256 static SSL_SESSION *shmcb_lookup_session_id(server_rec *, SHMCBQueue *, SHMCBCache *, UCHAR *, unsigned int);
257 static BOOL shmcb_remove_session_id(server_rec *, SHMCBQueue *, SHMCBCache *, UCHAR *, unsigned int);
260 * Data-alignment functions (a.k.a. avoidance tactics)
262 * NB: On HPUX (and possibly others) there is a *very* mischievous little
263 * "optimisation" in the compilers where it will convert the following;
264 * memcpy(dest_ptr, &source, sizeof(unsigned int));
265 * (where dest_ptr is of type (unsigned int *) and source is (unsigned int))
267 * *dest_ptr = source; (or *dest_ptr = *(&source), not sure).
268 * Either way, it completely destroys the whole point of these _safe_
269 * functions, because the assignment operation will fall victim to the
270 * architecture's byte-alignment dictations, whereas the memcpy (as a
271 * byte-by-byte copy) should not. sigh. So, if you're wondering about the
272 * apparently unnecessary conversions to (unsigned char *) in these
273 * functions, you now have an explanation. Don't just revert them back and
274 * say "ooh look, it still works" - if you try it on HPUX (well, 32-bit
275 * HPUX 11.00 at least) you may find it fails with a SIGBUS. :-(
278 static unsigned int shmcb_get_safe_uint(unsigned int *ptr)
281 shmcb_set_safe_uint_ex((unsigned char *)(&ret),
282 (const unsigned char *)ptr);
286 static void shmcb_set_safe_uint_ex(unsigned char *dest,
287 const unsigned char *src)
289 memcpy(dest, src, sizeof(unsigned int));
292 #if 0 /* Unused so far */
293 static unsigned long shmcb_get_safe_ulong(unsigned long *ptr)
296 shmcb_set_safe_ulong_ex((unsigned char *)(&ret),
297 (const unsigned char *)ptr);
301 static void shmcb_set_safe_ulong_ex(unsigned char *dest,
302 const unsigned char *src)
304 memcpy(dest, src, sizeof(unsigned long));
308 static time_t shmcb_get_safe_time(time_t * ptr)
311 shmcb_set_safe_time_ex((unsigned char *)(&ret),
312 (const unsigned char *)ptr);
316 static void shmcb_set_safe_time_ex(unsigned char *dest,
317 const unsigned char *src)
319 memcpy(dest, src, sizeof(time_t));
323 ** High-Level "handlers" as per ssl_scache.c
327 void ssl_scache_shmcb_init(server_rec *s, apr_pool_t *p)
329 SSLModConfigRec *mc = myModConfig(s);
331 apr_size_t shm_segsize;
335 * Create shared memory segment
337 if (mc->szSessionCacheDataFile == NULL) {
338 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
339 "SSLSessionCache required");
343 /* Use anonymous shm by default, fall back on name-based. */
344 rv = apr_shm_create(&(mc->pSessionCacheDataMM),
345 mc->nSessionCacheDataSize,
348 if (APR_STATUS_IS_ENOTIMPL(rv)) {
349 rv = apr_shm_create(&(mc->pSessionCacheDataMM),
350 mc->nSessionCacheDataSize,
351 mc->szSessionCacheDataFile,
355 if (rv != APR_SUCCESS) {
357 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
358 "Cannot allocate shared memory: (%d)%s", rv,
359 apr_strerror(rv, buf, sizeof(buf)));
362 shm_segment = apr_shm_baseaddr_get(mc->pSessionCacheDataMM);
363 shm_segsize = apr_shm_size_get(mc->pSessionCacheDataMM);
365 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
366 "shmcb_init allocated %" APR_SIZE_T_FMT
367 " bytes of shared memory",
369 if (!shmcb_init_memory(s, shm_segment, shm_segsize)) {
370 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
371 "Failure initialising 'shmcb' shared memory");
374 ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
375 "Shared memory session cache initialised");
378 * Success ... we hack the memory block into place by cheating for
379 * now and stealing a member variable the original shared memory
380 * cache was using. :-)
382 mc->tSessionCacheDataTable = (table_t *) shm_segment;
386 void ssl_scache_shmcb_kill(server_rec *s)
388 SSLModConfigRec *mc = myModConfig(s);
390 if (mc->pSessionCacheDataMM != NULL) {
391 apr_shm_destroy(mc->pSessionCacheDataMM);
392 mc->pSessionCacheDataMM = NULL;
397 BOOL ssl_scache_shmcb_store(server_rec *s, UCHAR *id, int idlen,
398 time_t timeout, SSL_SESSION * pSession)
400 SSLModConfigRec *mc = myModConfig(s);
402 BOOL to_return = FALSE;
404 /* We've kludged our pointer into the other cache's member variable. */
405 shm_segment = (void *) mc->tSessionCacheDataTable;
407 if (!shmcb_store_session(s, shm_segment, id, idlen, pSession, timeout))
408 /* in this cache engine, "stores" should never fail. */
409 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
410 "'shmcb' code was unable to store a "
411 "session in the cache.");
413 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
414 "shmcb_store successful");
421 SSL_SESSION *ssl_scache_shmcb_retrieve(server_rec *s, UCHAR *id, int idlen)
423 SSLModConfigRec *mc = myModConfig(s);
425 SSL_SESSION *pSession;
427 /* We've kludged our pointer into the other cache's member variable. */
428 shm_segment = (void *) mc->tSessionCacheDataTable;
430 pSession = shmcb_retrieve_session(s, shm_segment, id, idlen);
433 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
434 "shmcb_retrieve had a hit");
436 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
437 "shmcb_retrieve had a miss");
438 ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
439 "Client requested a 'session-resume' but "
440 "we have no such session.");
445 void ssl_scache_shmcb_remove(server_rec *s, UCHAR *id, int idlen)
447 SSLModConfigRec *mc = myModConfig(s);
450 /* We've kludged our pointer into the other cache's member variable. */
451 shm_segment = (void *) mc->tSessionCacheDataTable;
453 shmcb_remove_session(s, shm_segment, id, idlen);
457 void ssl_scache_shmcb_expire(server_rec *s)
463 void ssl_scache_shmcb_status(server_rec *s, apr_pool_t *p,
464 void (*func) (char *, void *), void *arg)
466 SSLModConfigRec *mc = myModConfig(s);
472 unsigned int loop, total, cache_total, non_empty_divisions;
473 int index_pct, cache_pct;
475 time_t average_expiry, now, max_expiry, min_expiry, idxexpiry;
477 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
478 "inside ssl_scache_shmcb_status");
480 /* We've kludged our pointer into the other cache's member variable. */
481 shm_segment = (void *) mc->tSessionCacheDataTable;
483 /* Get the header structure. */
484 shmcb_get_header(shm_segment, &header);
485 total = cache_total = non_empty_divisions = 0;
486 average_expiry = max_expiry = min_expiry = 0;
489 /* It may seem strange to grab "now" at this point, but in theory
490 * we should never have a negative threshold but grabbing "now" after
491 * the loop (which performs expiries) could allow that chance. */
493 for (loop = 0; loop <= header->division_mask; loop++) {
494 if (shmcb_get_division(header, &queue, &cache, loop)) {
495 shmcb_expire_division(s, &queue, &cache);
496 total += shmcb_get_safe_uint(queue.pos_count);
497 cache_total += shmcb_get_safe_uint(cache.pos_count);
498 if (shmcb_get_safe_uint(queue.pos_count) > 0) {
499 idx = shmcb_get_index(&queue,
500 shmcb_get_safe_uint(queue.first_pos));
501 non_empty_divisions++;
502 idxexpiry = shmcb_get_safe_time(&(idx->expires));
503 expiry_total += (double) idxexpiry;
504 max_expiry = (idxexpiry > max_expiry ? idxexpiry :
507 min_expiry = idxexpiry;
509 min_expiry = (idxexpiry < min_expiry ? idxexpiry :
514 index_pct = (100 * total) / (header->index_num * (header->division_mask + 1));
515 cache_pct = (100 * cache_total) / (header->cache_data_size * (header->division_mask + 1));
516 func(apr_psprintf(p, "cache type: <b>SHMCB</b>, shared memory: <b>%d</b> "
517 "bytes, current sessions: <b>%d</b><br>",
518 mc->nSessionCacheDataSize, total), arg);
519 func(apr_psprintf(p, "sub-caches: <b>%d</b>, indexes per sub-cache: "
520 "<b>%d</b><br>", (int) header->division_mask + 1,
521 (int) header->index_num), arg);
522 if (non_empty_divisions != 0) {
523 average_expiry = (time_t)(expiry_total / (double)non_empty_divisions);
524 func(apr_psprintf(p, "time left on oldest entries' SSL sessions: "), arg);
525 if (now < average_expiry)
526 func(apr_psprintf(p, "avg: <b>%d</b> seconds, (range: %d...%d)<br>",
527 (int)(average_expiry - now), (int) (min_expiry - now),
528 (int)(max_expiry - now)), arg);
530 func(apr_psprintf(p, "expiry threshold: <b>Calculation Error!</b>"
534 func(apr_psprintf(p, "index usage: <b>%d%%</b>, cache usage: <b>%d%%</b>"
535 "<br>", index_pct, cache_pct), arg);
536 func(apr_psprintf(p, "total sessions stored since starting: <b>%lu</b><br>",
537 header->num_stores), arg);
538 func(apr_psprintf(p,"total sessions expired since starting: <b>%lu</b><br>",
539 header->num_expiries), arg);
540 func(apr_psprintf(p, "total (pre-expiry) sessions scrolled out of the "
541 "cache: <b>%lu</b><br>", header->num_scrolled), arg);
542 func(apr_psprintf(p, "total retrieves since starting: <b>%lu</b> hit, "
543 "<b>%lu</b> miss<br>", header->num_retrieves_hit,
544 header->num_retrieves_miss), arg);
545 func(apr_psprintf(p, "total removes since starting: <b>%lu</b> hit, "
546 "<b>%lu</b> miss<br>", header->num_removes_hit,
547 header->num_removes_miss), arg);
548 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
549 "leaving shmcb_status");
555 ** Memory manipulation and low-level cache operations
559 static BOOL shmcb_init_memory(
560 server_rec *s, void *shm_mem,
561 unsigned int shm_mem_size)
566 unsigned int temp, loop, granularity;
568 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
569 "entered shmcb_init_memory()");
571 /* Calculate some sizes... */
572 temp = sizeof(SHMCBHeader);
574 /* If the segment is ridiculously too small, bail out */
575 if (shm_mem_size < (2*temp)) {
576 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
577 "shared memory segment too small");
581 /* Make temp the amount of memory without the header */
582 temp = shm_mem_size - temp;
584 /* Work on the basis that you need 10 bytes index for each session
585 * (approx 150 bytes), which is to divide temp by 160 - and then
586 * make sure we err on having too index space to burn even when
587 * the cache is full, which is a lot less stupid than having
588 * having not enough index space to utilise the whole cache!. */
590 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
591 "for %u bytes, recommending %u indexes",
594 /* We should divide these indexes evenly amongst the queues. Try
595 * to get it so that there are roughly half the number of divisions
596 * as there are indexes in each division. */
598 while ((temp / granularity) < (2 * granularity))
601 /* So we have 'granularity' divisions, set 'temp' equal to the
602 * number of indexes in each division. */
605 /* Too small? Bail ... */
607 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
608 "shared memory segment too small");
612 /* OK, we're sorted - from here on in, the return should be TRUE */
613 header = (SHMCBHeader *)shm_mem;
614 header->division_mask = (unsigned char)(granularity - 1);
615 header->division_offset = sizeof(SHMCBHeader);
616 header->index_num = temp;
617 header->index_offset = (2 * sizeof(unsigned int));
618 header->index_size = sizeof(SHMCBIndex);
619 header->queue_size = header->index_offset +
620 (header->index_num * header->index_size);
622 /* Now calculate the space for each division */
623 temp = shm_mem_size - header->division_offset;
624 header->division_size = temp / granularity;
626 /* Calculate the space left in each division for the cache */
627 temp -= header->queue_size;
628 header->cache_data_offset = (2 * sizeof(unsigned int));
629 header->cache_data_size = header->division_size -
630 header->queue_size - header->cache_data_offset;
632 /* Output trace info */
633 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
634 "shmcb_init_memory choices follow");
635 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
636 "division_mask = 0x%02X", header->division_mask);
637 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
638 "division_offset = %u", header->division_offset);
639 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
640 "division_size = %u", header->division_size);
641 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
642 "queue_size = %u", header->queue_size);
643 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
644 "index_num = %u", header->index_num);
645 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
646 "index_offset = %u", header->index_offset);
647 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
648 "index_size = %u", header->index_size);
649 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
650 "cache_data_offset = %u", header->cache_data_offset);
651 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
652 "cache_data_size = %u", header->cache_data_size);
654 /* The header is done, make the caches empty */
655 for (loop = 0; loop < granularity; loop++) {
656 if (!shmcb_get_division(header, &queue, &cache, loop))
657 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "shmcb_init_memory, " "internal error");
658 shmcb_set_safe_uint(cache.first_pos, 0);
659 shmcb_set_safe_uint(cache.pos_count, 0);
660 shmcb_set_safe_uint(queue.first_pos, 0);
661 shmcb_set_safe_uint(queue.pos_count, 0);
664 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
665 "leaving shmcb_init_memory()");
669 static BOOL shmcb_store_session(
670 server_rec *s, void *shm_segment, UCHAR *id,
671 int idlen, SSL_SESSION * pSession,
677 unsigned char masked_index;
678 unsigned char encoded[SSL_SESSION_MAX_DER];
679 unsigned char *ptr_encoded;
680 unsigned int len_encoded;
682 unsigned char *session_id = SSL_SESSION_get_session_id(pSession);
684 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
685 "inside shmcb_store_session");
687 /* Get the header structure, which division this session will fall into etc. */
688 shmcb_get_header(shm_segment, &header);
689 masked_index = session_id[0] & header->division_mask;
690 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
691 "session_id[0]=%u, masked index=%u",
692 session_id[0], masked_index);
693 if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) {
694 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
695 "shmcb_store_session internal error");
699 /* Serialise the session, work out how much we're dealing
700 * with. NB: This check could be removed if we're not paranoid
701 * or we find some assurance that it will never be necessary. */
702 len_encoded = i2d_SSL_SESSION(pSession, NULL);
703 if (len_encoded > SSL_SESSION_MAX_DER) {
704 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
705 "session is too big (%u bytes)", len_encoded);
708 ptr_encoded = encoded;
709 len_encoded = i2d_SSL_SESSION(pSession, &ptr_encoded);
710 expiry_time = timeout;
711 if (!shmcb_insert_encoded_session(s, &queue, &cache, encoded,
712 len_encoded, session_id,
714 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
715 "can't store a session!");
718 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
719 "leaving shmcb_store successfully");
720 header->num_stores++;
724 static SSL_SESSION *shmcb_retrieve_session(
725 server_rec *s, void *shm_segment,
726 UCHAR *id, int idlen)
731 unsigned char masked_index;
732 SSL_SESSION *pSession;
734 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
735 "inside shmcb_retrieve_session");
737 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "unusably short session_id provided "
738 "(%u bytes)", idlen);
742 /* Get the header structure, which division this session lookup
743 * will come from etc. */
744 shmcb_get_header(shm_segment, &header);
745 masked_index = id[0] & header->division_mask;
746 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
747 "id[0]=%u, masked index=%u", id[0], masked_index);
748 if (!shmcb_get_division(header, &queue, &cache, (unsigned int) masked_index)) {
749 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
750 "shmcb_retrieve_session internal error");
751 header->num_retrieves_miss++;
755 /* Get the session corresponding to the session_id or NULL if it
756 * doesn't exist (or is flagged as "removed"). */
757 pSession = shmcb_lookup_session_id(s, &queue, &cache, id, idlen);
759 header->num_retrieves_hit++;
761 header->num_retrieves_miss++;
762 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
763 "leaving shmcb_retrieve_session");
767 static BOOL shmcb_remove_session(
768 server_rec *s, void *shm_segment,
769 UCHAR *id, int idlen)
774 unsigned char masked_index;
777 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
778 "inside shmcb_remove_session");
780 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "remove called with NULL session_id!");
784 /* Get the header structure, which division this session remove
785 * will happen in etc. */
786 shmcb_get_header(shm_segment, &header);
787 masked_index = id[0] & header->division_mask;
788 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
789 "id[0]=%u, masked index=%u", id[0], masked_index);
790 if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) {
791 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "shmcb_remove_session, internal error");
792 header->num_removes_miss++;
795 res = shmcb_remove_session_id(s, &queue, &cache, id, idlen);
797 header->num_removes_hit++;
799 header->num_removes_miss++;
800 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
801 "leaving shmcb_remove_session");
808 ** Weirdo cyclic buffer functions
812 /* This gets used in the cyclic "index array" (in the 'Queue's) and
813 * in the cyclic 'Cache's too ... you provide the "width" of the
814 * cyclic store, the starting position and how far to move (with
815 * wrapping if necessary). Basically it's addition modulo buf_size. */
816 static unsigned int shmcb_cyclic_increment(
817 unsigned int buf_size,
818 unsigned int start_pos,
822 while (start_pos >= buf_size)
823 start_pos -= buf_size;
827 /* Given two positions in a cyclic buffer, calculate the "distance".
828 * This is to cover the case ("non-trivial") where the 'next' offset
829 * is to the left of the 'start' offset. NB: This calculates the
830 * space inclusive of one end-point but not the other. There is an
831 * ambiguous case (which is why we use the <start_pos,offset>
832 * coordinate system rather than <start_pos,end_pos> one) when 'start'
833 * is the same as 'next'. It could indicate the buffer is full or it
834 * can indicate the buffer is empty ... I choose the latter as it's
835 * easier and usually necessary to check if the buffer is full anyway
836 * before doing incremental logic (which is this useful for), but we
837 * definitely need the empty case handled - in fact it's our starting
839 static unsigned int shmcb_cyclic_space(
840 unsigned int buf_size,
841 unsigned int start_offset,
842 unsigned int next_offset)
844 /* Is it the trivial case? */
845 if (start_offset <= next_offset)
846 return (next_offset - start_offset); /* yes */
848 return ((buf_size - start_offset) + next_offset); /* no */
851 /* A "normal-to-cyclic" memcpy ... this takes a linear block of
852 * memory and copies it onto a cyclic buffer. The purpose and
853 * function of this is pretty obvious, you need to cover the case
854 * that the destination (cyclic) buffer has to wrap round. */
855 static void shmcb_cyclic_ntoc_memcpy(
856 unsigned int buf_size,
858 unsigned int dest_offset,
859 unsigned char *src, unsigned int src_len)
861 /* Cover the case that src_len > buf_size */
862 if (src_len > buf_size)
865 /* Can it be copied all in one go? */
866 if (dest_offset + src_len < buf_size)
868 memcpy(data + dest_offset, src, src_len);
871 memcpy(data + dest_offset, src, buf_size - dest_offset);
872 memcpy(data, src + buf_size - dest_offset,
873 src_len + dest_offset - buf_size);
878 /* A "cyclic-to-normal" memcpy ... given the last function, this
879 * one's purpose is clear, it copies out of a cyclic buffer handling
881 static void shmcb_cyclic_cton_memcpy(
882 unsigned int buf_size,
885 unsigned int src_offset,
886 unsigned int src_len)
888 /* Cover the case that src_len > buf_size */
889 if (src_len > buf_size)
892 /* Can it be copied all in one go? */
893 if (src_offset + src_len < buf_size)
895 memcpy(dest, data + src_offset, src_len);
898 memcpy(dest, data + src_offset, buf_size - src_offset);
899 memcpy(dest + buf_size - src_offset, data,
900 src_len + src_offset - buf_size);
905 /* Here's the cool hack that makes it all work ... by simply
906 * making the first collection of bytes *be* our header structure
907 * (casting it into the C structure), we have the perfect way to
908 * maintain state in a shared-memory session cache from one call
909 * (and process) to the next, use the shared memory itself! The
910 * original mod_ssl shared-memory session cache uses variables
911 * inside the context, but we simply use that for storing the
912 * pointer to the shared memory itself. And don't forget, after
913 * Apache's initialisation, this "header" is constant/read-only
914 * so we can read it outside any locking.
915 * <grin> - sometimes I just *love* coding y'know?! */
916 static void shmcb_get_header(void *shm_mem, SHMCBHeader **header)
918 *header = (SHMCBHeader *)shm_mem;
922 /* This is what populates our "interesting" structures. Given a
923 * pointer to the header, and an index into the appropriate
924 * division (this must have already been masked using the
925 * division_mask by the caller!), we can populate the provided
926 * SHMCBQueue and SHMCBCache structures with values and
927 * pointers to the underlying shared memory. Upon returning
928 * (if not FALSE), the caller can meddle with the pointer
929 * values and they will map into the shared-memory directly,
930 * as such there's no need to "free" or "set" the Queue or
931 * Cache values, they were themselves references to the *real*
933 static BOOL shmcb_get_division(
934 SHMCBHeader *header, SHMCBQueue *queue,
935 SHMCBCache *cache, unsigned int idx)
937 unsigned char *pQueue;
938 unsigned char *pCache;
941 if (idx > (unsigned int) header->division_mask)
944 /* Locate the blocks of memory storing the corresponding data */
945 pQueue = ((unsigned char *) header) + header->division_offset +
946 (idx * header->division_size);
947 pCache = pQueue + header->queue_size;
949 /* Populate the structures with appropriate pointers */
950 queue->first_pos = (unsigned int *) pQueue;
952 /* Our structures stay packed, no matter what the system's
953 * data-alignment regime is. */
954 queue->pos_count = (unsigned int *) (pQueue + sizeof(unsigned int));
955 queue->indexes = (SHMCBIndex *) (pQueue + (2 * sizeof(unsigned int)));
956 cache->first_pos = (unsigned int *) pCache;
957 cache->pos_count = (unsigned int *) (pCache + sizeof(unsigned int));
958 cache->data = (unsigned char *) (pCache + (2 * sizeof(unsigned int)));
959 queue->header = cache->header = header;
964 /* This returns a pointer to the piece of shared memory containing
965 * a specified 'Index'. SHMCBIndex, like SHMCBHeader, is a fixed
966 * width non-referencing structure of primitive types that can be
967 * cast onto the corresponding block of shared memory. Thus, by
968 * returning a cast pointer to that section of shared memory, the
969 * caller can read and write values to and from the "structure" and
970 * they are actually reading and writing the underlying shared
972 static SHMCBIndex *shmcb_get_index(
973 const SHMCBQueue *queue, unsigned int idx)
976 if (idx > queue->header->index_num)
979 /* Return a pointer to the index. NB: I am being horribly pendantic
980 * here so as to avoid any potential data-alignment assumptions being
981 * placed on the pointer arithmetic by the compiler (sigh). */
982 return (SHMCBIndex *)(((unsigned char *) queue->indexes) +
983 (idx * sizeof(SHMCBIndex)));
986 /* This functions rolls expired cache (and index) entries off the front
987 * of the cyclic buffers in a division. The function returns the number
988 * of expired sessions. */
989 static unsigned int shmcb_expire_division(
990 server_rec *s, SHMCBQueue *queue, SHMCBCache *cache)
994 unsigned int loop, index_num, pos_count, new_pos;
997 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
998 "entering shmcb_expire_division");
1000 /* We must calculate num and space ourselves based on expiry times. */
1003 new_pos = shmcb_get_safe_uint(queue->first_pos);
1005 /* Cache useful values */
1006 header = queue->header;
1007 index_num = header->index_num;
1008 pos_count = shmcb_get_safe_uint(queue->pos_count);
1009 while (loop < pos_count) {
1010 idx = shmcb_get_index(queue, new_pos);
1011 if (shmcb_get_safe_time(&(idx->expires)) > now)
1012 /* it hasn't expired yet, we're done iterating */
1014 /* This one should be expired too. Shift to the next entry. */
1016 new_pos = shmcb_cyclic_increment(index_num, new_pos, 1);
1019 /* Find the new_offset and make the expiries happen. */
1021 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1022 "will be expiring %u sessions", loop);
1023 /* We calculate the new_offset by "peeking" (or in the
1024 * case it's the last entry, "sneaking" ;-). */
1025 if (loop == pos_count) {
1026 /* We are expiring everything! This is easy to do... */
1027 shmcb_set_safe_uint(queue->pos_count, 0);
1028 shmcb_set_safe_uint(cache->pos_count, 0);
1031 /* The Queue is easy to adjust */
1032 shmcb_set_safe_uint(queue->pos_count,
1033 shmcb_get_safe_uint(queue->pos_count) - loop);
1034 shmcb_set_safe_uint(queue->first_pos, new_pos);
1035 /* peek to the start of the next session */
1036 idx = shmcb_get_index(queue, new_pos);
1037 /* We can use shmcb_cyclic_space because we've guaranteed
1038 * we don't fit the ambiguous full/empty case. */
1039 shmcb_set_safe_uint(cache->pos_count,
1040 shmcb_get_safe_uint(cache->pos_count) -
1041 shmcb_cyclic_space(header->cache_data_size,
1042 shmcb_get_safe_uint(cache->first_pos),
1043 shmcb_get_safe_uint(&(idx->offset))));
1044 shmcb_set_safe_uint(cache->first_pos, shmcb_get_safe_uint(&(idx->offset)));
1046 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1047 "we now have %u sessions",
1048 shmcb_get_safe_uint(queue->pos_count));
1050 header->num_expiries += loop;
1054 /* Inserts a new encoded session into a queue/cache pair - expiring
1055 * (early or otherwise) any leading sessions as necessary to ensure
1056 * there is room. An error return (FALSE) should only happen in the
1057 * event of surreal values being passed on, or ridiculously small
1058 * cache sizes. NB: For tracing purposes, this function is also given
1059 * the server_rec to allow "ssl_log()". */
1060 static BOOL shmcb_insert_encoded_session(
1061 server_rec *s, SHMCBQueue * queue,
1063 unsigned char *encoded,
1064 unsigned int encoded_len,
1065 unsigned char *session_id,
1068 SHMCBHeader *header;
1069 SHMCBIndex *idx = NULL;
1070 unsigned int gap, new_pos, loop, new_offset;
1073 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1074 "entering shmcb_insert_encoded_session, "
1075 "*queue->pos_count = %u",
1076 shmcb_get_safe_uint(queue->pos_count));
1078 /* If there's entries to expire, ditch them first thing. */
1079 shmcb_expire_division(s, queue, cache);
1080 header = cache->header;
1081 gap = header->cache_data_size - shmcb_get_safe_uint(cache->pos_count);
1082 if (gap < encoded_len) {
1083 new_pos = shmcb_get_safe_uint(queue->first_pos);
1085 need = (int) encoded_len - (int) gap;
1086 while ((need > 0) && (loop + 1 < shmcb_get_safe_uint(queue->pos_count))) {
1087 new_pos = shmcb_cyclic_increment(header->index_num, new_pos, 1);
1089 idx = shmcb_get_index(queue, new_pos);
1090 need = (int) encoded_len - (int) gap -
1091 shmcb_cyclic_space(header->cache_data_size,
1092 shmcb_get_safe_uint(cache->first_pos),
1093 shmcb_get_safe_uint(&(idx->offset)));
1096 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1097 "about to scroll %u sessions from %u",
1098 loop, shmcb_get_safe_uint(queue->pos_count));
1099 /* We are removing "loop" items from the cache. */
1100 shmcb_set_safe_uint(cache->pos_count,
1101 shmcb_get_safe_uint(cache->pos_count) -
1102 shmcb_cyclic_space(header->cache_data_size,
1103 shmcb_get_safe_uint(cache->first_pos),
1104 shmcb_get_safe_uint(&(idx->offset))));
1105 shmcb_set_safe_uint(cache->first_pos, shmcb_get_safe_uint(&(idx->offset)));
1106 shmcb_set_safe_uint(queue->pos_count, shmcb_get_safe_uint(queue->pos_count) - loop);
1107 shmcb_set_safe_uint(queue->first_pos, new_pos);
1108 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1109 "now only have %u sessions",
1110 shmcb_get_safe_uint(queue->pos_count));
1111 /* Update the stats!!! */
1112 header->num_scrolled += loop;
1116 /* probably unecessary checks, but I'll leave them until this code
1118 if (shmcb_get_safe_uint(cache->pos_count) + encoded_len >
1119 header->cache_data_size) {
1120 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1121 "shmcb_insert_encoded_session internal error");
1124 if (shmcb_get_safe_uint(queue->pos_count) == header->index_num) {
1125 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1126 "shmcb_insert_encoded_session internal error");
1129 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1130 "we have %u bytes and %u indexes free - enough",
1131 header->cache_data_size -
1132 shmcb_get_safe_uint(cache->pos_count), header->index_num -
1133 shmcb_get_safe_uint(queue->pos_count));
1136 /* HERE WE ASSUME THAT THE NEW SESSION SHOULD GO ON THE END! I'M NOT
1137 * CHECKING WHETHER IT SHOULD BE GENUINELY "INSERTED" SOMEWHERE.
1139 * We either fix that, or find out at a "higher" (read "mod_ssl")
1140 * level whether it is possible to have distinct session caches for
1141 * any attempted tomfoolery to do with different session timeouts.
1142 * Knowing in advance that we can have a cache-wide constant timeout
1143 * would make this stuff *MUCH* more efficient. Mind you, it's very
1144 * efficient right now because I'm ignoring this problem!!!
1147 /* Increment to the first unused byte */
1148 new_offset = shmcb_cyclic_increment(header->cache_data_size,
1149 shmcb_get_safe_uint(cache->first_pos),
1150 shmcb_get_safe_uint(cache->pos_count));
1151 /* Copy the DER-encoded session into place */
1152 shmcb_cyclic_ntoc_memcpy(header->cache_data_size, cache->data,
1153 new_offset, encoded, encoded_len);
1154 /* Get the new index that this session is stored in. */
1155 new_pos = shmcb_cyclic_increment(header->index_num,
1156 shmcb_get_safe_uint(queue->first_pos),
1157 shmcb_get_safe_uint(queue->pos_count));
1158 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1159 "storing in index %u, at offset %u",
1160 new_pos, new_offset);
1161 idx = shmcb_get_index(queue, new_pos);
1163 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1164 "shmcb_insert_encoded_session internal error");
1167 shmcb_safe_clear(idx, sizeof(SHMCBIndex));
1168 shmcb_set_safe_time(&(idx->expires), expiry_time);
1169 shmcb_set_safe_uint(&(idx->offset), new_offset);
1171 /* idx->removed = (unsigned char)0; */ /* Not needed given the memset above. */
1172 idx->s_id2 = session_id[1];
1173 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1174 "session_id[0]=%u, idx->s_id2=%u",
1175 session_id[0], session_id[1]);
1177 /* All that remains is to adjust the cache's and queue's "pos_count"s. */
1178 shmcb_set_safe_uint(cache->pos_count,
1179 shmcb_get_safe_uint(cache->pos_count) + encoded_len);
1180 shmcb_set_safe_uint(queue->pos_count,
1181 shmcb_get_safe_uint(queue->pos_count) + 1);
1183 /* And just for good debugging measure ... */
1184 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1185 "leaving now with %u bytes in the cache and %u indexes",
1186 shmcb_get_safe_uint(cache->pos_count),
1187 shmcb_get_safe_uint(queue->pos_count));
1188 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1189 "leaving shmcb_insert_encoded_session");
1193 /* Performs a lookup into a queue/cache pair for a
1194 * session_id. If found, the session is deserialised
1195 * and returned, otherwise NULL. */
1196 static SSL_SESSION *shmcb_lookup_session_id(
1197 server_rec *s, SHMCBQueue *queue,
1198 SHMCBCache *cache, UCHAR *id,
1201 unsigned char tempasn[SSL_SESSION_MAX_DER];
1203 SHMCBHeader *header;
1204 SSL_SESSION *pSession = NULL;
1205 unsigned int curr_pos, loop, count;
1206 MODSSL_D2I_SSL_SESSION_CONST unsigned char *ptr;
1209 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1210 "entering shmcb_lookup_session_id");
1212 /* If there are entries to expire, ditch them first thing. */
1213 shmcb_expire_division(s, queue, cache);
1215 curr_pos = shmcb_get_safe_uint(queue->first_pos);
1216 count = shmcb_get_safe_uint(queue->pos_count);
1217 header = queue->header;
1218 for (loop = 0; loop < count; loop++) {
1219 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1220 "loop=%u, count=%u, curr_pos=%u",
1221 loop, count, curr_pos);
1222 idx = shmcb_get_index(queue, curr_pos);
1223 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1224 "idx->s_id2=%u, id[1]=%u, offset=%u",
1225 idx->s_id2, id[1], shmcb_get_safe_uint(&(idx->offset)));
1226 /* Only look into the session further if;
1227 * (a) the second byte of the session_id matches,
1228 * (b) the "removed" flag isn't set,
1229 * (c) the session hasn't expired yet.
1230 * We do (c) like this so that it saves us having to
1231 * do natural expiries ... naturally expired sessions
1232 * scroll off the front anyway when the cache is full and
1233 * "rotating", the only real issue that remains is the
1234 * removal or disabling of forcibly killed sessions. */
1235 if ((idx->s_id2 == id[1]) && !idx->removed &&
1236 (shmcb_get_safe_time(&(idx->expires)) > now)) {
1237 unsigned int session_id_length;
1238 unsigned char *session_id;
1240 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1241 "at index %u, found possible session match",
1243 shmcb_cyclic_cton_memcpy(header->cache_data_size,
1244 tempasn, cache->data,
1245 shmcb_get_safe_uint(&(idx->offset)),
1246 SSL_SESSION_MAX_DER);
1248 pSession = d2i_SSL_SESSION(NULL, &ptr, SSL_SESSION_MAX_DER);
1249 session_id_length = SSL_SESSION_get_session_id_length(pSession);
1250 session_id = SSL_SESSION_get_session_id(pSession);
1252 if (pSession == NULL) {
1253 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1254 "scach2_lookup_session_id internal error");
1257 if ((session_id_length == idlen) &&
1258 (memcmp(session_id, id, idlen) == 0)) {
1259 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1263 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1265 SSL_SESSION_free(pSession);
1268 curr_pos = shmcb_cyclic_increment(header->index_num, curr_pos, 1);
1270 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1271 "no matching sessions were found");
1275 static BOOL shmcb_remove_session_id(
1276 server_rec *s, SHMCBQueue *queue,
1277 SHMCBCache *cache, UCHAR *id, unsigned int idlen)
1279 unsigned char tempasn[SSL_SESSION_MAX_DER];
1280 SSL_SESSION *pSession = NULL;
1282 SHMCBHeader *header;
1283 unsigned int curr_pos, loop, count;
1284 MODSSL_D2I_SSL_SESSION_CONST unsigned char *ptr;
1285 BOOL to_return = FALSE;
1287 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1288 "entering shmcb_remove_session_id");
1290 /* If there's entries to expire, ditch them first thing. */
1291 /* shmcb_expire_division(s, queue, cache); */
1293 /* Regarding the above ... hmmm ... I know my expiry code is slightly
1294 * "faster" than all this remove stuff ... but if the higher level
1295 * code calls a "remove" operation (and this *only* seems to happen
1296 * when it has spotted an expired session before we had a chance to)
1297 * then it should get credit for a remove (stats-wise). Also, in the
1298 * off-chance that the server *requests* a renegotiate and wants to
1299 * wipe the session clean we should give that priority over our own
1300 * routine expiry handling. So I've moved the expiry check to *after*
1301 * this general remove stuff. */
1302 curr_pos = shmcb_get_safe_uint(queue->first_pos);
1303 count = shmcb_get_safe_uint(queue->pos_count);
1304 header = cache->header;
1305 for (loop = 0; loop < count; loop++) {
1306 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1307 "loop=%u, count=%u, curr_pos=%u",
1308 loop, count, curr_pos);
1309 idx = shmcb_get_index(queue, curr_pos);
1310 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1311 "idx->s_id2=%u, id[1]=%u", idx->s_id2,
1313 /* Only look into the session further if the second byte of the
1314 * session_id matches. */
1315 if (idx->s_id2 == id[1]) {
1316 unsigned int session_id_length;
1317 unsigned char *session_id;
1319 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1320 "at index %u, found possible "
1321 "session match", curr_pos);
1322 shmcb_cyclic_cton_memcpy(header->cache_data_size,
1323 tempasn, cache->data,
1324 shmcb_get_safe_uint(&(idx->offset)),
1325 SSL_SESSION_MAX_DER);
1327 pSession = d2i_SSL_SESSION(NULL, &ptr, SSL_SESSION_MAX_DER);
1328 if (pSession == NULL) {
1329 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
1330 "shmcb_remove_session_id, internal error");
1333 session_id_length = SSL_SESSION_get_session_id_length(pSession);
1334 session_id = SSL_SESSION_get_session_id(pSession);
1336 if ((session_id_length == idlen)
1337 && (memcmp(id, session_id, idlen) == 0)) {
1338 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1340 /* Scrub out this session "quietly" */
1341 idx->removed = (unsigned char) 1;
1342 SSL_SESSION_free(pSession);
1346 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1348 SSL_SESSION_free(pSession);
1351 curr_pos = shmcb_cyclic_increment(header->index_num, curr_pos, 1);
1353 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1354 "no matching sessions were found");
1356 /* If there's entries to expire, ditch them now. */
1357 shmcb_expire_division(s, queue, cache);
1359 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
1360 "leaving shmcb_remove_session_id");