1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 struct fd_queue_info_t {
21 apr_thread_mutex_t *idlers_mutex;
22 apr_thread_cond_t *wait_for_idler;
25 apr_pool_t **recycled_pools;
29 static apr_status_t queue_info_cleanup(void *data_)
31 fd_queue_info_t *qi = data_;
33 apr_thread_cond_destroy(qi->wait_for_idler);
34 apr_thread_mutex_destroy(qi->idlers_mutex);
35 for (i = 0; i < qi->num_recycled; i++) {
36 apr_pool_destroy(qi->recycled_pools[i]);
41 apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
42 apr_pool_t *pool, int max_idlers)
47 qi = apr_palloc(pool, sizeof(*qi));
48 memset(qi, 0, sizeof(*qi));
50 rv = apr_thread_mutex_create(&qi->idlers_mutex, APR_THREAD_MUTEX_DEFAULT,
52 if (rv != APR_SUCCESS) {
55 rv = apr_thread_cond_create(&qi->wait_for_idler, pool);
56 if (rv != APR_SUCCESS) {
59 qi->recycled_pools = (apr_pool_t **)apr_palloc(pool, max_idlers *
60 sizeof(apr_pool_t *));
62 qi->max_idlers = max_idlers;
63 apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
64 apr_pool_cleanup_null);
71 apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
72 apr_pool_t *pool_to_recycle)
75 rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
76 if (rv != APR_SUCCESS) {
79 AP_DEBUG_ASSERT(queue_info->idlers >= 0);
80 AP_DEBUG_ASSERT(queue_info->num_recycled < queue_info->max_idlers);
81 if (pool_to_recycle) {
82 queue_info->recycled_pools[queue_info->num_recycled++] =
85 if (queue_info->idlers++ == 0) {
86 /* Only signal if we had no idlers before. */
87 apr_thread_cond_signal(queue_info->wait_for_idler);
89 rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
90 if (rv != APR_SUCCESS) {
96 apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
97 apr_pool_t **recycled_pool)
100 *recycled_pool = NULL;
101 rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
102 if (rv != APR_SUCCESS) {
105 AP_DEBUG_ASSERT(queue_info->idlers >= 0);
106 while ((queue_info->idlers == 0) && (!queue_info->terminated)) {
107 rv = apr_thread_cond_wait(queue_info->wait_for_idler,
108 queue_info->idlers_mutex);
109 if (rv != APR_SUCCESS) {
111 rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
112 if (rv2 != APR_SUCCESS) {
118 queue_info->idlers--; /* Oh, and idler? Let's take 'em! */
119 if (queue_info->num_recycled) {
121 queue_info->recycled_pools[--queue_info->num_recycled];
123 rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
124 if (rv != APR_SUCCESS) {
127 else if (queue_info->terminated) {
135 apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info)
138 rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
139 if (rv != APR_SUCCESS) {
142 queue_info->terminated = 1;
143 apr_thread_cond_broadcast(queue_info->wait_for_idler);
144 rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
145 if (rv != APR_SUCCESS) {
152 * Detects when the fd_queue_t is full. This utility function is expected
153 * to be called from within critical sections, and is not threadsafe.
155 #define ap_queue_full(queue) ((queue)->nelts == (queue)->bounds)
158 * Detects when the fd_queue_t is empty. This utility function is expected
159 * to be called from within critical sections, and is not threadsafe.
161 #define ap_queue_empty(queue) ((queue)->nelts == 0)
164 * Callback routine that is called to destroy this
165 * fd_queue_t when its pool is destroyed.
167 static apr_status_t ap_queue_destroy(void *data)
169 fd_queue_t *queue = data;
171 /* Ignore errors here, we can't do anything about them anyway.
172 * XXX: We should at least try to signal an error here, it is
173 * indicative of a programmer error. -aaron */
174 apr_thread_cond_destroy(queue->not_empty);
175 apr_thread_mutex_destroy(queue->one_big_mutex);
181 * Initialize the fd_queue_t.
183 apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a)
188 if ((rv = apr_thread_mutex_create(&queue->one_big_mutex,
189 APR_THREAD_MUTEX_DEFAULT, a)) != APR_SUCCESS) {
192 if ((rv = apr_thread_cond_create(&queue->not_empty, a)) != APR_SUCCESS) {
196 queue->data = apr_palloc(a, queue_capacity * sizeof(fd_queue_elem_t));
197 queue->bounds = queue_capacity;
200 /* Set all the sockets in the queue to NULL */
201 for (i = 0; i < queue_capacity; ++i)
202 queue->data[i].sd = NULL;
204 apr_pool_cleanup_register(a, queue, ap_queue_destroy, apr_pool_cleanup_null);
210 * Push a new socket onto the queue. Blocks if the queue is full. Once
211 * the push operation has completed, it signals other threads waiting
212 * in ap_queue_pop() that they may continue consuming sockets.
214 apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p)
216 fd_queue_elem_t *elem;
219 if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
223 AP_DEBUG_ASSERT(!queue->terminated);
224 AP_DEBUG_ASSERT(!ap_queue_full(queue));
226 elem = &queue->data[queue->nelts];
231 apr_thread_cond_signal(queue->not_empty);
233 if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
241 * Retrieves the next available socket from the queue. If there are no
242 * sockets available, it will block until one becomes available.
243 * Once retrieved, the socket is placed into the address specified by
246 apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p)
248 fd_queue_elem_t *elem;
251 if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
255 /* Keep waiting until we wake up and find that the queue is not empty. */
256 if (ap_queue_empty(queue)) {
257 if (!queue->terminated) {
258 apr_thread_cond_wait(queue->not_empty, queue->one_big_mutex);
260 /* If we wake up and it's still empty, then we were interrupted */
261 if (ap_queue_empty(queue)) {
262 rv = apr_thread_mutex_unlock(queue->one_big_mutex);
263 if (rv != APR_SUCCESS) {
266 if (queue->terminated) {
267 return APR_EOF; /* no more elements ever again */
275 elem = &queue->data[--queue->nelts];
281 #endif /* AP_DEBUG */
283 rv = apr_thread_mutex_unlock(queue->one_big_mutex);
287 apr_status_t ap_queue_interrupt_all(fd_queue_t *queue)
291 if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
294 apr_thread_cond_broadcast(queue->not_empty);
295 if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
301 apr_status_t ap_queue_term(fd_queue_t *queue)
305 if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
308 /* we must hold one_big_mutex when setting this... otherwise,
309 * we could end up setting it and waking everybody up just after a
310 * would-be popper checks it but right before they block
312 queue->terminated = 1;
313 if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
316 return ap_queue_interrupt_all(queue);