summaryrefslogtreecommitdiff
path: root/usr.sbin/httpd
diff options
context:
space:
mode:
authorBob Beck <beck@cvs.openbsd.org>2000-12-15 22:18:21 +0000
committerBob Beck <beck@cvs.openbsd.org>2000-12-15 22:18:21 +0000
commit62b2dc3d6a63b189fff9c8dd45936718011a8b7c (patch)
tree5fb7464cebf3a76c67f8a2bdcd7f85c454785720 /usr.sbin/httpd
parent5b87bd903c8034ffb0d13b16a6244f7d0334a240 (diff)
import apache 1.3.27 and mod_ssl 2.8.11
Diffstat (limited to 'usr.sbin/httpd')
-rw-r--r--usr.sbin/httpd/src/modules/ssl/ssl_scache.c218
-rw-r--r--usr.sbin/httpd/src/modules/ssl/ssl_scache_dbm.c440
-rw-r--r--usr.sbin/httpd/src/modules/ssl/ssl_scache_shmcb.c1347
-rw-r--r--usr.sbin/httpd/src/modules/ssl/ssl_scache_shmht.c347
4 files changed, 2352 insertions, 0 deletions
diff --git a/usr.sbin/httpd/src/modules/ssl/ssl_scache.c b/usr.sbin/httpd/src/modules/ssl/ssl_scache.c
new file mode 100644
index 00000000000..5df6419feeb
--- /dev/null
+++ b/usr.sbin/httpd/src/modules/ssl/ssl_scache.c
@@ -0,0 +1,218 @@
+/* _ _
+** _ __ ___ ___ __| | ___ ___| | mod_ssl
+** | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+** | | | | | | (_) | (_| | \__ \__ \ | www.modssl.org
+** |_| |_| |_|\___/ \__,_|___|___/___/_| ftp.modssl.org
+** |_____|
+** ssl_scache.c
+** Session Cache Abstraction
+*/
+
+/* ====================================================================
+ * Copyright (c) 1998-2000 Ralf S. Engelschall. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by
+ * Ralf S. Engelschall <rse@engelschall.com> for use in the
+ * mod_ssl project (http://www.modssl.org/)."
+ *
+ * 4. The names "mod_ssl" must not be used to endorse or promote
+ * products derived from this software without prior written
+ * permission. For written permission, please contact
+ * rse@engelschall.com.
+ *
+ * 5. Products derived from this software may not be called "mod_ssl"
+ * nor may "mod_ssl" appear in their names without prior
+ * written permission of Ralf S. Engelschall.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by
+ * Ralf S. Engelschall <rse@engelschall.com> for use in the
+ * mod_ssl project (http://www.modssl.org/)."
+ *
+ * THIS SOFTWARE IS PROVIDED BY RALF S. ENGELSCHALL ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RALF S. ENGELSCHALL OR
+ * HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ */
+ /* ``Open-Source Software: generous
+ programmers from around the world all
+ join forces to help you shoot
+ yourself in the foot for free.''
+ -- Unknown */
+#include "mod_ssl.h"
+
+/* _________________________________________________________________
+**
+** Session Cache: Common Abstraction Layer
+** _________________________________________________________________
+*/
+
+void ssl_scache_init(server_rec *s, pool *p)
+{
+ SSLModConfigRec *mc = myModConfig();
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_init(s, p);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_init(s, p);
+#ifdef SSL_EXPERIMENTAL_SHMCB
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_init(s, p);
+#endif
+#ifdef SSL_VENDOR
+ else
+ ap_hook_use("ap::mod_ssl::vendor::scache_init",
+ AP_HOOK_SIG3(void,ptr,ptr), AP_HOOK_ALL, s, p);
+#endif
+ return;
+}
+
+void ssl_scache_kill(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig();
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_kill(s);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_kill(s);
+#ifdef SSL_EXPERIMENTAL_SHMCB
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_kill(s);
+#endif
+#ifdef SSL_VENDOR
+ else
+ ap_hook_use("ap::mod_ssl::vendor::scache_kill",
+ AP_HOOK_SIG2(void,ptr), AP_HOOK_ALL, s);
+#endif
+ return;
+}
+
+BOOL ssl_scache_store(server_rec *s, UCHAR *id, int idlen, time_t expiry, SSL_SESSION *sess)
+{
+ SSLModConfigRec *mc = myModConfig();
+ BOOL rv = FALSE;
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ rv = ssl_scache_dbm_store(s, id, idlen, expiry, sess);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ rv = ssl_scache_shmht_store(s, id, idlen, expiry, sess);
+#ifdef SSL_EXPERIMENTAL_SHMCB
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ rv = ssl_scache_shmcb_store(s, id, idlen, expiry, sess);
+#endif
+#ifdef SSL_VENDOR
+ else
+ ap_hook_use("ap::mod_ssl::vendor::scache_store",
+ AP_HOOK_SIG6(int,ptr,ptr,int,int,ptr), AP_HOOK_ALL,
+ (int *)&rv, s, id, idlen, (int)expiry, sess);
+#endif
+ return rv;
+}
+
+SSL_SESSION *ssl_scache_retrieve(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig();
+ SSL_SESSION *sess = NULL;
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ sess = ssl_scache_dbm_retrieve(s, id, idlen);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ sess = ssl_scache_shmht_retrieve(s, id, idlen);
+#ifdef SSL_EXPERIMENTAL_SHMCB
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ sess = ssl_scache_shmcb_retrieve(s, id, idlen);
+#endif
+#ifdef SSL_VENDOR
+ else
+ ap_hook_use("ap::mod_ssl::vendor::scache_retrieve",
+ AP_HOOK_SIG4(ptr,ptr,ptr,int), AP_HOOK_ALL,
+ &sess, s, id, idlen);
+#endif
+ return sess;
+}
+
+void ssl_scache_remove(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig();
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_remove(s, id, idlen);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_remove(s, id, idlen);
+#ifdef SSL_EXPERIMENTAL_SHMCB
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_remove(s, id, idlen);
+#endif
+#ifdef SSL_VENDOR
+ else
+ ap_hook_use("ap::mod_ssl::vendor::scache_remove",
+ AP_HOOK_SIG4(void,ptr,ptr,int), AP_HOOK_ALL, s, id, idlen);
+#endif
+ return;
+}
+
+void ssl_scache_status(server_rec *s, pool *p, void (*func)(char *, void *), void *arg)
+{
+ SSLModConfigRec *mc = myModConfig();
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_status(s, p, func, arg);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_status(s, p, func, arg);
+#ifdef SSL_EXPERIMENTAL_SHMCB
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_status(s, p, func, arg);
+#endif
+#ifdef SSL_VENDOR
+ else
+ ap_hook_use("ap::mod_ssl::vendor::scache_status",
+ AP_HOOK_SIG5(void,ptr,ptr,ptr,ptr), AP_HOOK_ALL,
+ s, p, func, arg);
+#endif
+ return;
+}
+
+void ssl_scache_expire(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig();
+
+ if (mc->nSessionCacheMode == SSL_SCMODE_DBM)
+ ssl_scache_dbm_expire(s);
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMHT)
+ ssl_scache_shmht_expire(s);
+#ifdef SSL_EXPERIMENTAL_SHMCB
+ else if (mc->nSessionCacheMode == SSL_SCMODE_SHMCB)
+ ssl_scache_shmcb_expire(s);
+#endif
+#ifdef SSL_VENDOR
+ else
+ ap_hook_use("ap::mod_ssl::vendor::scache_expire",
+ AP_HOOK_SIG2(void,ptr), AP_HOOK_ALL, s);
+#endif
+ return;
+}
+
diff --git a/usr.sbin/httpd/src/modules/ssl/ssl_scache_dbm.c b/usr.sbin/httpd/src/modules/ssl/ssl_scache_dbm.c
new file mode 100644
index 00000000000..225eaf14b13
--- /dev/null
+++ b/usr.sbin/httpd/src/modules/ssl/ssl_scache_dbm.c
@@ -0,0 +1,440 @@
+/* _ _
+** _ __ ___ ___ __| | ___ ___| | mod_ssl
+** | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+** | | | | | | (_) | (_| | \__ \__ \ | www.modssl.org
+** |_| |_| |_|\___/ \__,_|___|___/___/_| ftp.modssl.org
+** |_____|
+** ssl_scache_dbm.c
+** Session Cache via DBM
+*/
+
+/* ====================================================================
+ * Copyright (c) 1998-2000 Ralf S. Engelschall. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by
+ * Ralf S. Engelschall <rse@engelschall.com> for use in the
+ * mod_ssl project (http://www.modssl.org/)."
+ *
+ * 4. The names "mod_ssl" must not be used to endorse or promote
+ * products derived from this software without prior written
+ * permission. For written permission, please contact
+ * rse@engelschall.com.
+ *
+ * 5. Products derived from this software may not be called "mod_ssl"
+ * nor may "mod_ssl" appear in their names without prior
+ * written permission of Ralf S. Engelschall.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by
+ * Ralf S. Engelschall <rse@engelschall.com> for use in the
+ * mod_ssl project (http://www.modssl.org/)."
+ *
+ * THIS SOFTWARE IS PROVIDED BY RALF S. ENGELSCHALL ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RALF S. ENGELSCHALL OR
+ * HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ */
+
+#include "mod_ssl.h"
+
+void ssl_scache_dbm_init(server_rec *s, pool *p)
+{
+ SSLModConfigRec *mc = myModConfig();
+ DBM *dbm;
+
+ /* for the DBM we need the data file */
+ if (mc->szSessionCacheDataFile == NULL) {
+ ssl_log(s, SSL_LOG_ERROR, "SSLSessionCache required");
+ ssl_die();
+ }
+
+ /* open it once to create it and to make sure it _can_ be created */
+ ssl_mutex_on(s);
+ if ((dbm = ssl_dbm_open(mc->szSessionCacheDataFile,
+ O_RDWR|O_CREAT, SSL_DBM_FILE_MODE)) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR|SSL_ADD_ERRNO,
+ "Cannot create SSLSessionCache DBM file `%s'",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ return;
+ }
+ ssl_dbm_close(dbm);
+
+#if !defined(OS2) && !defined(WIN32)
+ /*
+ * We have to make sure the Apache child processes have access to
+ * the DBM file. But because there are brain-dead platforms where we
+ * cannot exactly determine the suffixes we try all possibilities.
+ */
+ if (geteuid() == 0 /* is superuser */) {
+ chown(mc->szSessionCacheDataFile, ap_user_id, -1 /* no gid change */);
+ if (chown(ap_pstrcat(p, mc->szSessionCacheDataFile, SSL_DBM_FILE_SUFFIX_DIR, NULL),
+ ap_user_id, -1) == -1) {
+ if (chown(ap_pstrcat(p, mc->szSessionCacheDataFile, ".db", NULL),
+ ap_user_id, -1) == -1)
+ chown(ap_pstrcat(p, mc->szSessionCacheDataFile, ".dir", NULL),
+ ap_user_id, -1);
+ }
+ if (chown(ap_pstrcat(p, mc->szSessionCacheDataFile, SSL_DBM_FILE_SUFFIX_PAG, NULL),
+ ap_user_id, -1) == -1) {
+ if (chown(ap_pstrcat(p, mc->szSessionCacheDataFile, ".db", NULL),
+ ap_user_id, -1) == -1)
+ chown(ap_pstrcat(p, mc->szSessionCacheDataFile, ".pag", NULL),
+ ap_user_id, -1);
+ }
+ }
+#endif
+ ssl_mutex_off(s);
+ ssl_scache_dbm_expire(s);
+ return;
+}
+
+void ssl_scache_dbm_kill(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig();
+ pool *p;
+
+ if ((p = ap_make_sub_pool(NULL)) != NULL) {
+ /* the correct way */
+ unlink(ap_pstrcat(p, mc->szSessionCacheDataFile, SSL_DBM_FILE_SUFFIX_DIR, NULL));
+ unlink(ap_pstrcat(p, mc->szSessionCacheDataFile, SSL_DBM_FILE_SUFFIX_PAG, NULL));
+ /* the additional ways to be sure */
+ unlink(ap_pstrcat(p, mc->szSessionCacheDataFile, ".dir", NULL));
+ unlink(ap_pstrcat(p, mc->szSessionCacheDataFile, ".pag", NULL));
+ unlink(ap_pstrcat(p, mc->szSessionCacheDataFile, ".db", NULL));
+ unlink(mc->szSessionCacheDataFile);
+ ap_destroy_pool(p);
+ }
+ return;
+}
+
+BOOL ssl_scache_dbm_store(server_rec *s, UCHAR *id, int idlen, time_t expiry, SSL_SESSION *sess)
+{
+ SSLModConfigRec *mc = myModConfig();
+ DBM *dbm;
+ datum dbmkey;
+ datum dbmval;
+ UCHAR ucaData[SSL_SESSION_MAX_DER];
+ int nData;
+ UCHAR *ucp;
+
+ /* streamline session data */
+ ucp = ucaData;
+ nData = i2d_SSL_SESSION(sess, &ucp);
+
+ /* be careful: do not try to store too much bytes in a DBM file! */
+#ifdef SSL_USE_SDBM
+ if ((idlen + nData) >= PAIRMAX)
+ return FALSE;
+#else
+ if ((idlen + nData) >= 950 /* at least less than approx. 1KB */)
+ return FALSE;
+#endif
+
+ /* create DBM key */
+ dbmkey.dptr = (char *)id;
+ dbmkey.dsize = idlen;
+
+ /* create DBM value */
+ dbmval.dsize = sizeof(time_t) + nData;
+ dbmval.dptr = (char *)malloc(dbmval.dsize);
+ if (dbmval.dptr == NULL)
+ return FALSE;
+ memcpy((char *)dbmval.dptr, &expiry, sizeof(time_t));
+ memcpy((char *)dbmval.dptr+sizeof(time_t), ucaData, nData);
+
+ /* and store it to the DBM file */
+ ssl_mutex_on(s);
+ if ((dbm = ssl_dbm_open(mc->szSessionCacheDataFile,
+ O_RDWR, SSL_DBM_FILE_MODE)) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR|SSL_ADD_ERRNO,
+ "Cannot open SSLSessionCache DBM file `%s' for writing (store)",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ free(dbmval.dptr);
+ return FALSE;
+ }
+ if (ssl_dbm_store(dbm, dbmkey, dbmval, DBM_INSERT) < 0) {
+ ssl_log(s, SSL_LOG_ERROR|SSL_ADD_ERRNO,
+ "Cannot store SSL session to DBM file `%s'",
+ mc->szSessionCacheDataFile);
+ ssl_dbm_close(dbm);
+ ssl_mutex_off(s);
+ free(dbmval.dptr);
+ return FALSE;
+ }
+ ssl_dbm_close(dbm);
+ ssl_mutex_off(s);
+
+ /* free temporary buffers */
+ free(dbmval.dptr);
+
+ /* allow the regular expiring to occur */
+ ssl_scache_dbm_expire(s);
+
+ return TRUE;
+}
+
+SSL_SESSION *ssl_scache_dbm_retrieve(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig();
+ DBM *dbm;
+ datum dbmkey;
+ datum dbmval;
+ SSL_SESSION *sess = NULL;
+ UCHAR *ucpData;
+ int nData;
+ time_t expiry;
+ time_t now;
+
+ /* allow the regular expiring to occur */
+ ssl_scache_dbm_expire(s);
+
+ /* create DBM key and values */
+ dbmkey.dptr = (char *)id;
+ dbmkey.dsize = idlen;
+
+ /* and fetch it from the DBM file */
+ ssl_mutex_on(s);
+ if ((dbm = ssl_dbm_open(mc->szSessionCacheDataFile,
+ O_RDONLY, SSL_DBM_FILE_MODE)) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR|SSL_ADD_ERRNO,
+ "Cannot open SSLSessionCache DBM file `%s' for reading (fetch)",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ return NULL;
+ }
+ dbmval = ssl_dbm_fetch(dbm, dbmkey);
+ ssl_dbm_close(dbm);
+ ssl_mutex_off(s);
+
+ /* immediately return if not found */
+ if (dbmval.dptr == NULL || dbmval.dsize <= sizeof(time_t))
+ return NULL;
+
+ /* parse resulting data */
+ nData = dbmval.dsize-sizeof(time_t);
+ ucpData = (UCHAR *)malloc(nData);
+ if (ucpData == NULL)
+ return NULL;
+ memcpy(ucpData, (char *)dbmval.dptr+sizeof(time_t), nData);
+ memcpy(&expiry, dbmval.dptr, sizeof(time_t));
+
+ /* make sure the stuff is still not expired */
+ now = time(NULL);
+ if (expiry <= now) {
+ ssl_scache_dbm_remove(s, id, idlen);
+ return NULL;
+ }
+
+ /* unstreamed SSL_SESSION */
+ sess = d2i_SSL_SESSION(NULL, &ucpData, nData);
+
+ return sess;
+}
+
+void ssl_scache_dbm_remove(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig();
+ DBM *dbm;
+ datum dbmkey;
+
+ /* create DBM key and values */
+ dbmkey.dptr = (char *)id;
+ dbmkey.dsize = idlen;
+
+ /* and delete it from the DBM file */
+ ssl_mutex_on(s);
+ if ((dbm = ssl_dbm_open(mc->szSessionCacheDataFile,
+ O_RDWR, SSL_DBM_FILE_MODE)) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR|SSL_ADD_ERRNO,
+ "Cannot open SSLSessionCache DBM file `%s' for writing (delete)",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ return;
+ }
+ ssl_dbm_delete(dbm, dbmkey);
+ ssl_dbm_close(dbm);
+ ssl_mutex_off(s);
+
+ return;
+}
+
+void ssl_scache_dbm_expire(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig();
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+ static time_t tLast = 0;
+ DBM *dbm;
+ datum dbmkey;
+ datum dbmval;
+ pool *p;
+ time_t tExpiresAt;
+ int nElements = 0;
+ int nDeleted = 0;
+ int bDelete;
+ datum *keylist;
+ int keyidx;
+ int i;
+ time_t tNow;
+
+ /*
+ * make sure the expiration for still not-accessed session
+ * cache entries is done only from time to time
+ */
+ tNow = time(NULL);
+ if (tNow < tLast+sc->nSessionCacheTimeout)
+ return;
+ tLast = tNow;
+
+ /*
+ * Here we have to be very carefully: Not all DBM libraries are
+ * smart enough to allow one to iterate over the elements and at the
+ * same time delete expired ones. Some of them get totally crazy
+ * while others have no problems. So we have to do it the slower but
+ * more safe way: we first iterate over all elements and remember
+ * those which have to be expired. Then in a second pass we delete
+ * all those expired elements. Additionally we reopen the DBM file
+ * to be really safe in state.
+ */
+
+#define KEYMAX 1024
+
+ ssl_mutex_on(s);
+ for (;;) {
+ /* allocate the key array in a memory sub pool */
+ if ((p = ap_make_sub_pool(NULL)) == NULL)
+ break;
+ if ((keylist = ap_palloc(p, sizeof(dbmkey)*KEYMAX)) == NULL) {
+ ap_destroy_pool(p);
+ break;
+ }
+
+ /* pass 1: scan DBM database */
+ keyidx = 0;
+ if ((dbm = ssl_dbm_open(mc->szSessionCacheDataFile,
+ O_RDWR, SSL_DBM_FILE_MODE)) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR|SSL_ADD_ERRNO,
+ "Cannot open SSLSessionCache DBM file `%s' for scanning",
+ mc->szSessionCacheDataFile);
+ ap_destroy_pool(p);
+ break;
+ }
+ dbmkey = ssl_dbm_firstkey(dbm);
+ while (dbmkey.dptr != NULL) {
+ nElements++;
+ bDelete = FALSE;
+ dbmval = ssl_dbm_fetch(dbm, dbmkey);
+ if (dbmval.dsize <= sizeof(time_t) || dbmval.dptr == NULL)
+ bDelete = TRUE;
+ else {
+ memcpy(&tExpiresAt, dbmval.dptr, sizeof(time_t));
+ if (tExpiresAt <= tNow)
+ bDelete = TRUE;
+ }
+ if (bDelete) {
+ if ((keylist[keyidx].dptr = ap_palloc(p, dbmkey.dsize)) != NULL) {
+ memcpy(keylist[keyidx].dptr, dbmkey.dptr, dbmkey.dsize);
+ keylist[keyidx].dsize = dbmkey.dsize;
+ keyidx++;
+ if (keyidx == KEYMAX)
+ break;
+ }
+ }
+ dbmkey = ssl_dbm_nextkey(dbm);
+ }
+ ssl_dbm_close(dbm);
+
+ /* pass 2: delete expired elements */
+ if ((dbm = ssl_dbm_open(mc->szSessionCacheDataFile,
+ O_RDWR, SSL_DBM_FILE_MODE)) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR|SSL_ADD_ERRNO,
+ "Cannot re-open SSLSessionCache DBM file `%s' for expiring",
+ mc->szSessionCacheDataFile);
+ ap_destroy_pool(p);
+ break;
+ }
+ for (i = 0; i < keyidx; i++) {
+ ssl_dbm_delete(dbm, keylist[i]);
+ nDeleted++;
+ }
+ ssl_dbm_close(dbm);
+
+ /* destroy temporary pool */
+ ap_destroy_pool(p);
+
+ if (keyidx < KEYMAX)
+ break;
+ }
+ ssl_mutex_off(s);
+
+ ssl_log(s, SSL_LOG_TRACE, "Inter-Process Session Cache (DBM) Expiry: "
+ "old: %d, new: %d, removed: %d", nElements, nElements-nDeleted, nDeleted);
+ return;
+}
+
+void ssl_scache_dbm_status(server_rec *s, pool *p, void (*func)(char *, void *), void *arg)
+{
+ SSLModConfigRec *mc = myModConfig();
+ DBM *dbm;
+ datum dbmkey;
+ datum dbmval;
+ int nElem;
+ int nSize;
+ int nAverage;
+
+ nElem = 0;
+ nSize = 0;
+ ssl_mutex_on(s);
+ if ((dbm = ssl_dbm_open(mc->szSessionCacheDataFile,
+ O_RDONLY, SSL_DBM_FILE_MODE)) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR|SSL_ADD_ERRNO,
+ "Cannot open SSLSessionCache DBM file `%s' for status retrival",
+ mc->szSessionCacheDataFile);
+ ssl_mutex_off(s);
+ return;
+ }
+ dbmkey = ssl_dbm_firstkey(dbm);
+ for ( ; dbmkey.dptr != NULL; dbmkey = ssl_dbm_nextkey(dbm)) {
+ dbmval = ssl_dbm_fetch(dbm, dbmkey);
+ if (dbmval.dptr == NULL)
+ continue;
+ nElem += 1;
+ nSize += dbmval.dsize;
+ }
+ ssl_dbm_close(dbm);
+ ssl_mutex_off(s);
+ if (nSize > 0 && nElem > 0)
+ nAverage = nSize / nElem;
+ else
+ nAverage = 0;
+ func(ap_psprintf(p, "cache type: <b>DBM</b>, maximum size: <b>unlimited</b><br>"), arg);
+ func(ap_psprintf(p, "current sessions: <b>%d</b>, current size: <b>%d</b> bytes<br>", nElem, nSize), arg);
+ func(ap_psprintf(p, "average session size: <b>%d</b> bytes<br>", nAverage), arg);
+ return;
+}
+
diff --git a/usr.sbin/httpd/src/modules/ssl/ssl_scache_shmcb.c b/usr.sbin/httpd/src/modules/ssl/ssl_scache_shmcb.c
new file mode 100644
index 00000000000..d9307f4b617
--- /dev/null
+++ b/usr.sbin/httpd/src/modules/ssl/ssl_scache_shmcb.c
@@ -0,0 +1,1347 @@
+/* _ _
+** _ __ ___ ___ __| | ___ ___| | mod_ssl
+** | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+** | | | | | | (_) | (_| | \__ \__ \ | www.modssl.org
+** |_| |_| |_|\___/ \__,_|___|___/___/_| ftp.modssl.org
+** |_____|
+** ssl_scache_shmcb.c
+** Session Cache via Shared Memory (Cyclic Buffer Variant)
+*/
+
+/* ====================================================================
+ * Copyright (c) 2000 Ralf S. Engelschall. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by
+ * Ralf S. Engelschall <rse@engelschall.com> for use in the
+ * mod_ssl project (http://www.modssl.org/)."
+ *
+ * 4. The names "mod_ssl" must not be used to endorse or promote
+ * products derived from this software without prior written
+ * permission. For written permission, please contact
+ * rse@engelschall.com.
+ *
+ * 5. Products derived from this software may not be called "mod_ssl"
+ * nor may "mod_ssl" appear in their names without prior
+ * written permission of Ralf S. Engelschall.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by
+ * Ralf S. Engelschall <rse@engelschall.com> for use in the
+ * mod_ssl project (http://www.modssl.org/)."
+ *
+ * THIS SOFTWARE IS PROVIDED BY RALF S. ENGELSCHALL ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RALF S. ENGELSCHALL OR
+ * HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ */
+
+#include "mod_ssl.h"
+
+/*
+ * This shared memory based SSL session cache implementation was
+ * originally written by Geoff Thorpe <geoff@eu.c2.net> for C2Net Europe
+ * and as a contribution to Ralf Engelschall's mod_ssl project.
+ */
+
+#ifdef SSL_EXPERIMENTAL_SHMCB
+
+/*
+ * The shared-memory segment header can be cast to and from the
+ * SHMCBHeader type, all other structures need to be initialised by
+ * utility functions.
+ *
+ * The "header" looks like this;
+ *
+ * data applying to the overall structure:
+ * - division_offset (unsigned int):
+ * how far into the shared memory segment the first division is.
+ * - division_size (unsigned int):
+ * how many bytes each division occupies.
+ * (NB: This includes the queue and the cache)
+ * - division_mask (unsigned char):
+ * the "mask" in the next line. Add one to this,
+ * and that's the number of divisions.
+ *
+ * data applying to within each division:
+ * - queue_size (unsigned int):
+ * how big each "queue" is. NB: The queue is the first block in each
+ * division and is followed immediately by the cache itself so so
+ * there's no cache_offset value.
+ *
+ * data applying to within each queue:
+ * - index_num (unsigned char):
+ * how many indexes in each cache's queue
+ * - index_offset (unsigned char):
+ * how far into the queue the first index is.
+ * - index_size:
+ * how big each index is.
+ *
+ * data applying to within each cache:
+ * - cache_data_offset (unsigned int):
+ * how far into the cache the session-data array is stored.
+ * - cache_data_size (unsigned int):
+ * how big each cache's data block is.
+ *
+ * statistics data (this will eventually be per-division but right now
+ * there's only one mutex):
+ * - stores (unsigned long):
+ * how many stores have been performed in the cache.
+ * - expiries (unsigned long):
+ * how many session have been expired from the cache.
+ * - scrolled (unsigned long):
+ * how many sessions have been scrolled out of full cache during a
+ * "store" operation. This is different to the "removes" stats as
+ * they are requested by mod_ssl/Apache, these are done because of
+ * cache logistics. (NB: Also, this value should be deducible from
+ * the others if my code has no bugs, but I count it anyway - plus
+ * it helps debugging :-).
+ * - retrieves_hit (unsigned long):
+ * how many session-retrieves have succeeded.
+ * - retrieves_miss (unsigned long):
+ * how many session-retrieves have failed.
+ * - removes_hit (unsigned long):
+ * - removes_miss (unsigned long):
+ *
+ * Following immediately after the header is an array of "divisions".
+ * Each division is simply a "queue" immediately followed by its
+ * corresponding "cache". Each division handles some pre-defined band
+ * of sessions by using the "division_mask" in the header. Eg. if
+ * division_mask=0x1f then there are 32 divisions, the first of which
+ * will store sessions whose least-significant 5 bits are 0, the second
+ * stores session whose LS 5 bits equal 1, etc. A queue is an indexing
+ * structure referring to its corresponding cache.
+ *
+ * A "queue" looks like this;
+ *
+ * - first_pos (unsigned int):
+ * the location within the array of indexes where the virtual
+ * "left-hand-edge" of the cyclic buffer is.
+ * - pos_count (unsigned int):
+ * the number of indexes occupied from first_pos onwards.
+ *
+ * ...followed by an array of indexes, each of which can be
+ * memcpy'd to and from an SHMCBIndex, and look like this;
+ *
+ * - expires (time_t):
+ * the time() value at which this session expires.
+ * - offset (unsigned int):
+ * the offset within the cache data block where the corresponding
+ * session is stored.
+ * - s_id2 (unsigned char):
+ * the second byte of the session_id, stored as an optimisation to
+ * reduce the number of d2i_SSL_SESSION calls that are made when doing
+ * a lookup.
+ * - removed (unsigned char):
+ * a byte used to indicate whether a session has been "passively"
+ * removed. Ie. it is still in the cache but is to be disregarded by
+ * any "retrieve" operation.
+ *
+ * A "cache" looks like this;
+ *
+ * - first_pos (unsigned int):
+ * the location within the data block where the virtual
+ * "left-hand-edge" of the cyclic buffer is.
+ * - pos_count (unsigned int):
+ * the number of bytes used in the data block from first_pos onwards.
+ *
+ * ...followed by the data block in which actual DER-encoded SSL
+ * sessions are stored.
+ */
+
+/*
+ * Header - can be memcpy'd to and from the front of the shared
+ * memory segment. NB: The first copy (commented out) has the
+ * elements in a meaningful order, but due to data-alignment
+ * braindeadness, the second (uncommented) copy has the types grouped
+ * so as to decrease "struct-bloat". sigh.
+ */
+typedef struct {
+#if 0
+ unsigned char division_mask;
+ unsigned int division_offset;
+ unsigned int division_size;
+ unsigned int queue_size;
+ unsigned char index_num;
+ unsigned char index_offset;
+ unsigned char index_size;
+ unsigned int cache_data_offset;
+ unsigned int cache_data_size;
+ unsigned long num_stores;
+ unsigned long num_expiries;
+ unsigned long num_scrolled;
+ unsigned long num_retrieves_hit;
+ unsigned long num_retrieves_miss;
+ unsigned long num_removes_hit;
+ unsigned long num_removes_miss;
+#else
+ unsigned long num_stores;
+ unsigned long num_expiries;
+ unsigned long num_scrolled;
+ unsigned long num_retrieves_hit;
+ unsigned long num_retrieves_miss;
+ unsigned long num_removes_hit;
+ unsigned long num_removes_miss;
+ unsigned int division_offset;
+ unsigned int division_size;
+ unsigned int queue_size;
+ unsigned int cache_data_offset;
+ unsigned int cache_data_size;
+ unsigned char division_mask;
+ unsigned char index_num;
+ unsigned char index_offset;
+ unsigned char index_size;
+#endif
+} SHMCBHeader;
+
+/*
+ * Index - can be memcpy'd to and from an index inside each
+ * queue's index array.
+ */
+typedef struct {
+ time_t expires;
+ unsigned int offset;
+ unsigned char s_id2;
+ unsigned char removed;
+} SHMCBIndex;
+
+/*
+ * Queue - must be populated by a call to shmcb_get_division
+ * and the structure's pointers are used for updating (ie.
+ * the structure doesn't need any "set" to update values).
+ */
+typedef struct {
+ SHMCBHeader *header;
+ unsigned int *first_pos;
+ unsigned int *pos_count;
+ SHMCBIndex *indexes;
+} SHMCBQueue;
+
+/*
+ * Cache - same comment as for Queue. 'Queue's are in a 1-1
+ * correspondance with 'Cache's and are usually carried round
+ * in a pair, they are only seperated for clarity.
+ */
+typedef struct {
+ SHMCBHeader *header;
+ unsigned int *first_pos;
+ unsigned int *pos_count;
+ unsigned char *data;
+} SHMCBCache;
+
+/*
+ * Forward function prototypes.
+ */
+
+/* Functions for working around data-alignment-picky systems (sparcs,
+ Irix, etc). These use "memcpy" as a way of foxing these systems into
+ treating the composite types as byte-arrays rather than higher-level
+ primitives that it prefers to have 4-(or 8-)byte aligned. I don't
+ envisage this being a performance issue as a couple of 2 or 4 byte
+ memcpys can hardly make a dent on the massive memmove operations this
+ cache technique avoids, nor the overheads of ASN en/decoding. */
+static unsigned int shmcb_get_safe_uint(unsigned int *);
+static void shmcb_set_safe_uint(unsigned int *, unsigned int);
+#if 0 /* Unused so far */
+static unsigned long shmcb_get_safe_ulong(unsigned long *);
+static void shmcb_set_safe_ulong(unsigned long *, unsigned long);
+#endif
+static time_t shmcb_get_safe_time(time_t *);
+static void shmcb_set_safe_time(time_t *, time_t);
+
+/* Underlying functions for session-caching */
+static BOOL shmcb_init_memory(server_rec *, void *, unsigned int);
+static BOOL shmcb_store_session(server_rec *, void *, UCHAR *, int, SSL_SESSION *, time_t);
+static SSL_SESSION *shmcb_retrieve_session(server_rec *, void *, UCHAR *, int);
+static BOOL shmcb_remove_session(server_rec *, void *, UCHAR *, int);
+
+/* Utility functions for manipulating the structures */
+static void shmcb_get_header(void *, SHMCBHeader **);
+static BOOL shmcb_get_division(SHMCBHeader *, SHMCBQueue *, SHMCBCache *, unsigned int);
+static SHMCBIndex *shmcb_get_index(const SHMCBQueue *, unsigned int);
+static unsigned int shmcb_expire_division(server_rec *, SHMCBQueue *, SHMCBCache *);
+static BOOL shmcb_insert_encoded_session(server_rec *, SHMCBQueue *, SHMCBCache *, unsigned char *, unsigned int, unsigned char *, time_t);
+static SSL_SESSION *shmcb_lookup_session_id(server_rec *, SHMCBQueue *, SHMCBCache *, UCHAR *, int);
+static BOOL shmcb_remove_session_id(server_rec *, SHMCBQueue *, SHMCBCache *, UCHAR *, int);
+
+/*
+ * Data-alignment functions (a.k.a. avoidance tactics)
+ *
+ * NB: On HPUX (and possibly others) there is a *very* mischievous little
+ * "optimisation" in the compilers where it will convert the following;
+ * memcpy(dest_ptr, &source, sizeof(unsigned int));
+ * (where dest_ptr is of type (unsigned int *) and source is (unsigned int))
+ * into;
+ * *dest_ptr = source; (or *dest_ptr = *(&source), not sure).
+ * Either way, it completely destroys the whole point of these _safe_
+ * functions, because the assignment operation will fall victim to the
+ * architecture's byte-alignment dictations, whereas the memcpy (as a
+ * byte-by-byte copy) should not. sigh. So, if you're wondering about the
+ * apparently unnecessary conversions to (unsigned char *) in these
+ * functions, you now have an explanation. Don't just revert them back and
+ * say "ooh look, it still works" - if you try it on HPUX (well, 32-bit
+ * HPUX 11.00 at least) you may find it fails with a SIGBUS. :-(
+ */
+
+static unsigned int shmcb_get_safe_uint(unsigned int *ptr)
+{
+ unsigned char *from;
+ unsigned int ret;
+
+ from = (unsigned char *)ptr;
+ memcpy(&ret, from, sizeof(unsigned int));
+ return ret;
+}
+
+static void shmcb_set_safe_uint(unsigned int *ptr, unsigned int val)
+{
+ unsigned char *to, *from;
+
+ to = (unsigned char *)ptr;
+ from = (unsigned char *)(&val);
+ memcpy(to, from, sizeof(unsigned int));
+}
+
+#if 0 /* Unused so far */
+static unsigned long shmcb_get_safe_ulong(unsigned long *ptr)
+{
+ unsigned char *from;
+ unsigned long ret;
+
+ from = (unsigned char *)ptr;
+ memcpy(&ret, from, sizeof(unsigned long));
+ return ret;
+}
+
+static void shmcb_set_safe_ulong(unsigned long *ptr, unsigned long val)
+{
+ unsigned char *to, *from;
+
+ to = (unsigned char *)ptr;
+ from = (unsigned char *)(&val);
+ memcpy(to, from, sizeof(unsigned long));
+}
+#endif
+
+static time_t shmcb_get_safe_time(time_t * ptr)
+{
+ unsigned char *from;
+ time_t ret;
+
+ from = (unsigned char *)ptr;
+ memcpy(&ret, from, sizeof(time_t));
+ return ret;
+}
+
+static void shmcb_set_safe_time(time_t * ptr, time_t val)
+{
+ unsigned char *to, *from;
+
+ to = (unsigned char *)ptr;
+ from = (unsigned char *)(&val);
+ memcpy(to, from, sizeof(time_t));
+}
+
+/*
+**
+** High-Level "handlers" as per ssl_scache.c
+**
+*/
+
+static void *shmcb_malloc(size_t size)
+{
+ SSLModConfigRec *mc = myModConfig();
+ return ap_mm_malloc(mc->pSessionCacheDataMM, size);
+}
+
+void ssl_scache_shmcb_init(server_rec *s, pool *p)
+{
+ SSLModConfigRec *mc = myModConfig();
+ AP_MM *mm;
+ void *shm_segment = NULL;
+ int avail, avail_orig;
+
+ /*
+ * Create shared memory segment
+ */
+ if (mc->szSessionCacheDataFile == NULL) {
+ ssl_log(s, SSL_LOG_ERROR, "SSLSessionCache required");
+ ssl_die();
+ }
+ if ((mm = ap_mm_create(mc->nSessionCacheDataSize,
+ mc->szSessionCacheDataFile)) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR,
+ "Cannot allocate shared memory: %s", ap_mm_error());
+ ssl_die();
+ }
+ mc->pSessionCacheDataMM = mm;
+
+ /*
+ * Make sure the child processes have access to the underlying files
+ */
+ ap_mm_permission(mm, SSL_MM_FILE_MODE, ap_user_id, -1);
+
+ /*
+ * Create cache inside the shared memory segment
+ */
+ avail = avail_orig = ap_mm_available(mm);
+ ssl_log(s, SSL_LOG_TRACE, "Shared-memory segment has %u available",
+ avail);
+
+ /*
+ * For some reason to do with MM's internal management, I can't
+ * allocate the full amount. Implement a reasonable form of trial
+ * and error and output trace information.
+ */
+ while ((shm_segment == NULL) && ((avail_orig - avail) * 100 < avail_orig)) {
+ shm_segment = shmcb_malloc(avail);
+ if (shm_segment == NULL) {
+ ssl_log(s, SSL_LOG_TRACE,
+ "shmcb_malloc attempt for %u bytes failed", avail);
+ avail -= 2;
+ }
+ }
+ if (shm_segment == NULL) {
+ ssl_log(s, SSL_LOG_ERROR,
+ "Cannot allocate memory for the 'shmcb' session cache\n");
+ ssl_die();
+ }
+ ssl_log(s, SSL_LOG_TRACE, "shmcb_init allocated %u bytes of shared "
+ "memory", avail);
+ if (!shmcb_init_memory(s, shm_segment, avail)) {
+ ssl_log(s, SSL_LOG_ERROR,
+ "Failure initialising 'shmcb' shared memory");
+ ssl_die();
+ }
+ ssl_log(s, SSL_LOG_INFO, "Shared memory session cache initialised");
+
+ /*
+ * Success ... we hack the memory block into place by cheating for
+ * now and stealing a member variable the original shared memory
+ * cache was using. :-)
+ */
+ mc->tSessionCacheDataTable = (table_t *) shm_segment;
+ return;
+}
+
+void ssl_scache_shmcb_kill(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig();
+
+ if (mc->pSessionCacheDataMM != NULL) {
+ ap_mm_destroy(mc->pSessionCacheDataMM);
+ mc->pSessionCacheDataMM = NULL;
+ }
+ return;
+}
+
+BOOL ssl_scache_shmcb_store(server_rec *s, UCHAR * id, int idlen,
+ time_t timeout, SSL_SESSION * pSession)
+{
+ SSLModConfigRec *mc = myModConfig();
+ void *shm_segment;
+ BOOL to_return = FALSE;
+
+ /* We've kludged our pointer into the other cache's member variable. */
+ shm_segment = (void *) mc->tSessionCacheDataTable;
+ ssl_mutex_on(s);
+ if (!shmcb_store_session(s, shm_segment, id, idlen, pSession, timeout))
+ /* in this cache engine, "stores" should never fail. */
+ ssl_log(s, SSL_LOG_ERROR, "'shmcb' code was unable to store a "
+ "session in the cache.");
+ else {
+ ssl_log(s, SSL_LOG_TRACE, "shmcb_store successful");
+ to_return = TRUE;
+ }
+ ssl_mutex_off(s);
+ return to_return;
+}
+
+SSL_SESSION *ssl_scache_shmcb_retrieve(server_rec *s, UCHAR * id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig();
+ void *shm_segment;
+ SSL_SESSION *pSession;
+
+ /* We've kludged our pointer into the other cache's member variable. */
+ shm_segment = (void *) mc->tSessionCacheDataTable;
+ ssl_mutex_on(s);
+ pSession = shmcb_retrieve_session(s, shm_segment, id, idlen);
+ ssl_mutex_off(s);
+ if (pSession)
+ ssl_log(s, SSL_LOG_TRACE, "shmcb_retrieve had a hit");
+ else {
+ ssl_log(s, SSL_LOG_TRACE, "shmcb_retrieve had a miss");
+ ssl_log(s, SSL_LOG_INFO, "Client requested a 'session-resume' but "
+ "we have no such session.");
+ }
+ return pSession;
+}
+
+void ssl_scache_shmcb_remove(server_rec *s, UCHAR * id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig();
+ void *shm_segment;
+
+ /* We've kludged our pointer into the other cache's member variable. */
+ shm_segment = (void *) mc->tSessionCacheDataTable;
+ shmcb_remove_session(s, shm_segment, id, idlen);
+}
+
+void ssl_scache_shmcb_expire(server_rec *s)
+{
+ /* NOP */
+ return;
+}
+
+void ssl_scache_shmcb_status(server_rec *s, pool *p,
+ void (*func) (char *, void *), void *arg)
+{
+ SSLModConfigRec *mc = myModConfig();
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ SHMCBIndex *idx;
+ void *shm_segment;
+ unsigned int loop, total, cache_total, non_empty_divisions;
+ int index_pct, cache_pct;
+ double expiry_total;
+ time_t average_expiry, now, max_expiry, min_expiry, idxexpiry;
+
+ ssl_log(s, SSL_LOG_TRACE, "inside ssl_scache_shmcb_status");
+
+ /* We've kludged our pointer into the other cache's member variable. */
+ shm_segment = (void *) mc->tSessionCacheDataTable;
+
+ /* Get the header structure. */
+ shmcb_get_header(shm_segment, &header);
+ total = cache_total = non_empty_divisions = 0;
+ average_expiry = max_expiry = min_expiry = 0;
+ expiry_total = 0;
+
+ /* It may seem strange to grab "now" at this point, but in theory
+ * we should never have a negative threshold but grabbing "now" after
+ * the loop (which performs expiries) could allow that chance. */
+ now = time(NULL);
+ for (loop = 0; loop <= header->division_mask; loop++) {
+ if (shmcb_get_division(header, &queue, &cache, loop)) {
+ shmcb_expire_division(s, &queue, &cache);
+ total += shmcb_get_safe_uint(queue.pos_count);
+ cache_total += shmcb_get_safe_uint(cache.pos_count);
+ if (shmcb_get_safe_uint(queue.pos_count) > 0) {
+ idx = shmcb_get_index(&queue,
+ shmcb_get_safe_uint(queue.first_pos));
+ non_empty_divisions++;
+ idxexpiry = shmcb_get_safe_time(&(idx->expires));
+ expiry_total += (double) idxexpiry;
+ max_expiry = (idxexpiry > max_expiry ? idxexpiry :
+ max_expiry);
+ if (min_expiry == 0)
+ min_expiry = idxexpiry;
+ else
+ min_expiry = (idxexpiry < min_expiry ? idxexpiry :
+ min_expiry);
+ }
+ }
+ }
+ index_pct = (100 * total) / (header->index_num * (header->division_mask + 1));
+ cache_pct = (100 * cache_total) / (header->cache_data_size * (header->division_mask + 1));
+ func(ap_psprintf(p, "cache type: <b>SHMCB</b>, shared memory: <b>%d</b> "
+ "bytes, current sessions: <b>%d</b><br>",
+ mc->nSessionCacheDataSize, total), arg);
+ func(ap_psprintf(p, "sub-caches: <b>%d</b>, indexes per sub-cache: "
+ "<b>%d</b><br>", (int) header->division_mask + 1,
+ (int) header->index_num), arg);
+ if (non_empty_divisions != 0) {
+ average_expiry = (time_t)(expiry_total / (double)non_empty_divisions);
+ func(ap_psprintf(p, "time left on oldest entries' SSL sessions: "), arg);
+ if (now < average_expiry)
+ func(ap_psprintf(p, "avg: <b>%d</b> seconds, (range: %d...%d)<br>",
+ (int)(average_expiry - now), (int) (min_expiry - now),
+ (int)(max_expiry - now)), arg);
+ else
+ func(ap_psprintf(p, "expiry threshold: <b>Calculation Error!</b>"
+ "<br>"), arg);
+
+ }
+ func(ap_psprintf(p, "index usage: <b>%d%%</b>, cache usage: <b>%d%%</b>"
+ "<br>", index_pct, cache_pct), arg);
+ func(ap_psprintf(p, "total sessions stored since starting: <b>%lu</b><br>",
+ header->num_stores), arg);
+ func(ap_psprintf(p, "total sessions expired since starting: <b>%lu</b><br>",
+ header->num_expiries), arg);
+ func(ap_psprintf(p, "total (pre-expiry) sessions scrolled out of the "
+ "cache: <b>%lu</b><br>", header->num_scrolled), arg);
+ func(ap_psprintf(p, "total retrieves since starting: <b>%lu</b> hit, "
+ "<b>%lu</b> miss<br>", header->num_retrieves_hit,
+ header->num_retrieves_miss), arg);
+ func(ap_psprintf(p, "total removes since starting: <b>%lu</b> hit, "
+ "<b>%lu</b> miss<br>", header->num_removes_hit,
+ header->num_removes_miss), arg);
+ ssl_log(s, SSL_LOG_TRACE, "leaving shmcb_status");
+ return;
+}
+
+/*
+**
+** Memory manipulation and low-level cache operations
+**
+*/
+
+static BOOL shmcb_init_memory(
+ server_rec *s, void *shm_mem,
+ unsigned int shm_mem_size)
+{
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ unsigned int temp, loop, granularity;
+
+ ssl_log(s, SSL_LOG_TRACE, "entered shmcb_init_memory()");
+
+ /* Calculate some sizes... */
+ temp = sizeof(SHMCBHeader);
+
+ /* If the segment is ridiculously too small, bail out */
+ if (shm_mem_size < (2*temp)) {
+ ssl_log(s, SSL_LOG_ERROR, "shared memory segment too small");
+ return FALSE;
+ }
+
+ /* Make temp the amount of memory without the header */
+ temp = shm_mem_size - temp;
+
+ /* Work on the basis that you need 10 bytes index for each session
+ * (approx 150 bytes), which is to divide temp by 160 - and then
+ * make sure we err on having too index space to burn even when
+ * the cache is full, which is a lot less stupid than having
+ * having not enough index space to utilise the whole cache!. */
+ temp /= 120;
+ ssl_log(s, SSL_LOG_TRACE, "for %u bytes, recommending %u indexes",
+ shm_mem_size, temp);
+
+ /* We should divide these indexes evenly amongst the queues. Try
+ * to get it so that there are roughly half the number of divisions
+ * as there are indexes in each division. */
+ granularity = 256;
+ while ((temp / granularity) < (2 * granularity))
+ granularity /= 2;
+
+ /* So we have 'granularity' divisions, set 'temp' equal to the
+ * number of indexes in each division. */
+ temp /= granularity;
+
+ /* Too small? Bail ... */
+ if (temp < 5) {
+ ssl_log(s, SSL_LOG_ERROR, "shared memory segment too small");
+ return FALSE;
+ }
+
+ /* OK, we're sorted - from here on in, the return should be TRUE */
+ header = (SHMCBHeader *)shm_mem;
+ header->division_mask = (unsigned char)(granularity - 1);
+ header->division_offset = sizeof(SHMCBHeader);
+ header->index_num = temp;
+ header->index_offset = (2 * sizeof(unsigned int));
+ header->index_size = sizeof(SHMCBIndex);
+ header->queue_size = header->index_offset +
+ (header->index_num * header->index_size);
+
+ /* Now calculate the space for each division */
+ temp = shm_mem_size - header->division_offset;
+ header->division_size = temp / granularity;
+
+ /* Calculate the space left in each division for the cache */
+ temp -= header->queue_size;
+ header->cache_data_offset = (2 * sizeof(unsigned int));
+ header->cache_data_size = header->division_size -
+ header->queue_size - header->cache_data_offset;
+
+ /* Output trace info */
+ ssl_log(s, SSL_LOG_TRACE, "shmcb_init_memory choices follow");
+ ssl_log(s, SSL_LOG_TRACE, "division_mask = 0x%02X", header->division_mask);
+ ssl_log(s, SSL_LOG_TRACE, "division_offset = %u", header->division_offset);
+ ssl_log(s, SSL_LOG_TRACE, "division_size = %u", header->division_size);
+ ssl_log(s, SSL_LOG_TRACE, "queue_size = %u", header->queue_size);
+ ssl_log(s, SSL_LOG_TRACE, "index_num = %u", header->index_num);
+ ssl_log(s, SSL_LOG_TRACE, "index_offset = %u", header->index_offset);
+ ssl_log(s, SSL_LOG_TRACE, "index_size = %u", header->index_size);
+ ssl_log(s, SSL_LOG_TRACE, "cache_data_offset = %u", header->cache_data_offset);
+ ssl_log(s, SSL_LOG_TRACE, "cache_data_size = %u", header->cache_data_size);
+
+ /* The header is done, make the caches empty */
+ for (loop = 0; loop < granularity; loop++) {
+ if (!shmcb_get_division(header, &queue, &cache, loop))
+ ssl_log(s, SSL_LOG_ERROR, "shmcb_init_memory, " "internal error");
+ shmcb_set_safe_uint(cache.first_pos, 0);
+ shmcb_set_safe_uint(cache.pos_count, 0);
+ shmcb_set_safe_uint(queue.first_pos, 0);
+ shmcb_set_safe_uint(queue.pos_count, 0);
+ }
+
+ ssl_log(s, SSL_LOG_TRACE, "leaving shmcb_init_memory()");
+ return TRUE;
+}
+
+static BOOL shmcb_store_session(
+ server_rec *s, void *shm_segment, UCHAR * id,
+ int idlen, SSL_SESSION * pSession,
+ time_t timeout)
+{
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ unsigned char masked_index;
+ unsigned char encoded[SSL_SESSION_MAX_DER];
+ unsigned char *ptr_encoded;
+ unsigned int len_encoded;
+ time_t expiry_time;
+
+ ssl_log(s, SSL_LOG_TRACE, "inside shmcb_store_session");
+
+ /* Get the header structure, which division this session will fall into etc. */
+ shmcb_get_header(shm_segment, &header);
+ masked_index = pSession->session_id[0] & header->division_mask;
+ ssl_log(s, SSL_LOG_TRACE, "session_id[0]=%u, masked index=%u",
+ pSession->session_id[0], masked_index);
+ if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) {
+ ssl_log(s, SSL_LOG_ERROR, "shmcb_store_session, " "internal error");
+ return FALSE;
+ }
+
+ /* Serialise the session, work out how much we're dealing
+ * with. NB: This check could be removed if we're not paranoid
+ * or we find some assurance that it will never be necessary. */
+ len_encoded = i2d_SSL_SESSION(pSession, NULL);
+ if (len_encoded > SSL_SESSION_MAX_DER) {
+ ssl_log(s, SSL_LOG_ERROR, "session is too big (%u bytes)",
+ len_encoded);
+ return FALSE;
+ }
+ ptr_encoded = encoded;
+ len_encoded = i2d_SSL_SESSION(pSession, &ptr_encoded);
+ expiry_time = timeout;
+ if (!shmcb_insert_encoded_session(s, &queue, &cache, encoded,
+ len_encoded, pSession->session_id,
+ expiry_time)) {
+ ssl_log(s, SSL_LOG_ERROR, "can't store a session!");
+ return FALSE;
+ }
+ ssl_log(s, SSL_LOG_TRACE, "leaving shmcb_store successfully");
+ header->num_stores++;
+ return TRUE;
+}
+
+static SSL_SESSION *shmcb_retrieve_session(
+ server_rec *s, void *shm_segment,
+ UCHAR * id, int idlen)
+{
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ unsigned char masked_index;
+ SSL_SESSION *pSession;
+
+ ssl_log(s, SSL_LOG_TRACE, "inside shmcb_retrieve_session");
+ if (idlen < 2) {
+ ssl_log(s, SSL_LOG_ERROR, "unusably short session_id provided "
+ "(%u bytes)", idlen);
+ return FALSE;
+ }
+
+ /* Get the header structure, which division this session lookup
+ * will come from etc. */
+ shmcb_get_header(shm_segment, &header);
+ masked_index = id[0] & header->division_mask;
+ ssl_log(s, SSL_LOG_TRACE, "id[0]=%u, masked index=%u", id[0],
+ masked_index);
+ if (!shmcb_get_division(header, &queue, &cache, (unsigned int) masked_index)) {
+ ssl_log(s, SSL_LOG_ERROR, "shmcb_retrieve_session, " "internal error");
+ header->num_retrieves_miss++;
+ return FALSE;
+ }
+
+ /* Get the session corresponding to the session_id or NULL if it
+ * doesn't exist (or is flagged as "removed"). */
+ pSession = shmcb_lookup_session_id(s, &queue, &cache, id, idlen);
+ if (pSession)
+ header->num_retrieves_hit++;
+ else
+ header->num_retrieves_miss++;
+ ssl_log(s, SSL_LOG_TRACE, "leaving shmcb_retrieve_session");
+ return pSession;
+}
+
+static BOOL shmcb_remove_session(
+ server_rec *s, void *shm_segment,
+ UCHAR * id, int idlen)
+{
+ SHMCBHeader *header;
+ SHMCBQueue queue;
+ SHMCBCache cache;
+ unsigned char masked_index;
+ BOOL res;
+
+ ssl_log(s, SSL_LOG_TRACE, "inside shmcb_remove_session");
+ if (id == NULL) {
+ ssl_log(s, SSL_LOG_ERROR, "remove called with NULL session_id!");
+ return FALSE;
+ }
+
+ /* Get the header structure, which division this session remove
+ * will happen in etc. */
+ shmcb_get_header(shm_segment, &header);
+ masked_index = id[0] & header->division_mask;
+ ssl_log(s, SSL_LOG_TRACE, "id[0]=%u, masked index=%u",
+ id[0], masked_index);
+ if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) {
+ ssl_log(s, SSL_LOG_ERROR, "shmcb_remove_session, internal error");
+ header->num_removes_miss++;
+ return FALSE;
+ }
+ res = shmcb_remove_session_id(s, &queue, &cache, id, idlen);
+ if (res)
+ header->num_removes_hit++;
+ else
+ header->num_removes_miss++;
+ ssl_log(s, SSL_LOG_TRACE, "leaving shmcb_remove_session");
+ return res;
+}
+
+
+/*
+**
+** Weirdo cyclic buffer functions
+**
+*/
+
+/* This gets used in the cyclic "index array" (in the 'Queue's) and
+ * in the cyclic 'Cache's too ... you provide the "width" of the
+ * cyclic store, the starting position and how far to move (with
+ * wrapping if necessary). Basically it's addition modulo buf_size. */
+static unsigned int shmcb_cyclic_increment(
+ unsigned int buf_size,
+ unsigned int start_pos,
+ unsigned int to_add)
+{
+ start_pos += to_add;
+ while (start_pos >= buf_size)
+ start_pos -= buf_size;
+ return start_pos;
+}
+
+/* Given two positions in a cyclic buffer, calculate the "distance".
+ * This is to cover the case ("non-trivial") where the 'next' offset
+ * is to the left of the 'start' offset. NB: This calculates the
+ * space inclusive of one end-point but not the other. There is an
+ * ambiguous case (which is why we use the <start_pos,offset>
+ * coordinate system rather than <start_pos,end_pos> one) when 'start'
+ * is the same as 'next'. It could indicate the buffer is full or it
+ * can indicate the buffer is empty ... I choose the latter as it's
+ * easier and usually necessary to check if the buffer is full anyway
+ * before doing incremental logic (which is this useful for), but we
+ * definitely need the empty case handled - in fact it's our starting
+ * state!! */
+static unsigned int shmcb_cyclic_space(
+ unsigned int buf_size,
+ unsigned int start_offset,
+ unsigned int next_offset)
+{
+ /* Is it the trivial case? */
+ if (start_offset <= next_offset)
+ return (next_offset - start_offset); /* yes */
+ else
+ return ((buf_size - start_offset) + next_offset); /* no */
+}
+
+/* A "normal-to-cyclic" memcpy ... this takes a linear block of
+ * memory and copies it onto a cyclic buffer. The purpose and
+ * function of this is pretty obvious, you need to cover the case
+ * that the destination (cyclic) buffer has to wrap round. */
+static void shmcb_cyclic_ntoc_memcpy(
+ unsigned int buf_size,
+ unsigned char *data,
+ unsigned int dest_offset,
+ unsigned char *src, unsigned int src_len)
+{
+ /* Can it be copied all in one go? */
+ if (dest_offset + src_len < buf_size)
+ /* yes */
+ memcpy(data + dest_offset, src, src_len);
+ else {
+ /* no */
+ memcpy(data + dest_offset, src, buf_size - dest_offset);
+ memcpy(data, src + buf_size - dest_offset,
+ src_len + dest_offset - buf_size);
+ }
+ return;
+}
+
+/* A "cyclic-to-normal" memcpy ... given the last function, this
+ * one's purpose is clear, it copies out of a cyclic buffer handling
+ * wrapping. */
+static void shmcb_cyclic_cton_memcpy(
+ unsigned int buf_size,
+ unsigned char *dest,
+ unsigned char *data,
+ unsigned int src_offset,
+ unsigned int src_len)
+{
+ /* Can it be copied all in one go? */
+ if (src_offset + src_len < buf_size)
+ /* yes */
+ memcpy(dest, data + src_offset, src_len);
+ else {
+ /* no */
+ memcpy(dest, data + src_offset, buf_size - src_offset);
+ memcpy(dest + buf_size - src_offset, data,
+ src_len + src_offset - buf_size);
+ }
+ return;
+}
+
+/* Here's the cool hack that makes it all work ... by simply
+ * making the first collection of bytes *be* our header structure
+ * (casting it into the C structure), we have the perfect way to
+ * maintain state in a shared-memory session cache from one call
+ * (and process) to the next, use the shared memory itself! The
+ * original mod_ssl shared-memory session cache uses variables
+ * inside the context, but we simply use that for storing the
+ * pointer to the shared memory itself. And don't forget, after
+ * Apache's initialisation, this "header" is constant/read-only
+ * so we can read it outside any locking.
+ * <grin> - sometimes I just *love* coding y'know?! */
+static void shmcb_get_header(void *shm_mem, SHMCBHeader **header)
+{
+ *header = (SHMCBHeader *)shm_mem;
+ return;
+}
+
+/* This is what populates our "interesting" structures. Given a
+ * pointer to the header, and an index into the appropriate
+ * division (this must have already been masked using the
+ * division_mask by the caller!), we can populate the provided
+ * SHMCBQueue and SHMCBCache structures with values and
+ * pointers to the underlying shared memory. Upon returning
+ * (if not FALSE), the caller can meddle with the pointer
+ * values and they will map into the shared-memory directly,
+ * as such there's no need to "free" or "set" the Queue or
+ * Cache values, they were themselves references to the *real*
+ * data. */
+static BOOL shmcb_get_division(
+ SHMCBHeader *header, SHMCBQueue *queue,
+ SHMCBCache *cache, unsigned int idx)
+{
+ unsigned char *pQueue;
+ unsigned char *pCache;
+
+ /* bounds check */
+ if (idx > (unsigned int) header->division_mask)
+ return FALSE;
+
+ /* Locate the blocks of memory storing the corresponding data */
+ pQueue = ((unsigned char *) header) + header->division_offset +
+ (idx * header->division_size);
+ pCache = pQueue + header->queue_size;
+
+ /* Populate the structures with appropriate pointers */
+ queue->first_pos = (unsigned int *) pQueue;
+
+ /* Our structures stay packed, no matter what the system's
+ * data-alignment regime is. */
+ queue->pos_count = (unsigned int *) (pQueue + sizeof(unsigned int));
+ queue->indexes = (SHMCBIndex *) (pQueue + (2 * sizeof(unsigned int)));
+ cache->first_pos = (unsigned int *) pCache;
+ cache->pos_count = (unsigned int *) (pCache + sizeof(unsigned int));
+ cache->data = (unsigned char *) (pCache + (2 * sizeof(unsigned int)));
+ queue->header = cache->header = header;
+
+ return TRUE;
+}
+
+/* This returns a pointer to the piece of shared memory containing
+ * a specified 'Index'. SHMCBIndex, like SHMCBHeader, is a fixed
+ * width non-referencing structure of primitive types that can be
+ * cast onto the corresponding block of shared memory. Thus, by
+ * returning a cast pointer to that section of shared memory, the
+ * caller can read and write values to and from the "structure" and
+ * they are actually reading and writing the underlying shared
+ * memory. */
+static SHMCBIndex *shmcb_get_index(
+ const SHMCBQueue *queue, unsigned int idx)
+{
+ /* bounds check */
+ if (idx > (unsigned int) queue->header->index_num)
+ return NULL;
+
+ /* Return a pointer to the index. NB: I am being horribly pendantic
+ * here so as to avoid any potential data-alignment assumptions being
+ * placed on the pointer arithmetic by the compiler (sigh). */
+ return (SHMCBIndex *)(((unsigned char *) queue->indexes) +
+ (idx * sizeof(SHMCBIndex)));
+}
+
+/* This functions rolls expired cache (and index) entries off the front
+ * of the cyclic buffers in a division. The function returns the number
+ * of expired sessions. */
+static unsigned int shmcb_expire_division(
+ server_rec *s, SHMCBQueue *queue, SHMCBCache *cache)
+{
+ SHMCBIndex *idx;
+ time_t now;
+ unsigned int loop, index_num, pos_count, new_pos;
+ SHMCBHeader *header;
+
+ ssl_log(s, SSL_LOG_TRACE, "entering shmcb_expire_division");
+
+ /* We must calculate num and space ourselves based on expiry times. */
+ now = time(NULL);
+ loop = 0;
+ new_pos = shmcb_get_safe_uint(queue->first_pos);
+
+ /* Cache useful values */
+ header = queue->header;
+ index_num = header->index_num;
+ pos_count = shmcb_get_safe_uint(queue->pos_count);
+ while (loop < pos_count) {
+ idx = shmcb_get_index(queue, new_pos);
+ if (shmcb_get_safe_time(&(idx->expires)) > now)
+ /* it hasn't expired yet, we're done iterating */
+ break;
+ /* This one should be expired too. Shift to the next entry. */
+ loop++;
+ new_pos = shmcb_cyclic_increment(index_num, new_pos, 1);
+ }
+
+ /* Find the new_offset and make the expiries happen. */
+ if (loop > 0) {
+ ssl_log(s, SSL_LOG_TRACE, "will be expiring %u sessions", loop);
+ /* We calculate the new_offset by "peeking" (or in the
+ * case it's the last entry, "sneaking" ;-). */
+ if (loop == pos_count) {
+ /* We are expiring everything! This is easy to do... */
+ shmcb_set_safe_uint(queue->pos_count, 0);
+ shmcb_set_safe_uint(cache->pos_count, 0);
+ }
+ else {
+ /* The Queue is easy to adjust */
+ shmcb_set_safe_uint(queue->pos_count,
+ shmcb_get_safe_uint(queue->pos_count) - loop);
+ shmcb_set_safe_uint(queue->first_pos, new_pos);
+ /* peek to the start of the next session */
+ idx = shmcb_get_index(queue, new_pos);
+ /* We can use shmcb_cyclic_space because we've guaranteed
+ * we don't fit the ambiguous full/empty case. */
+ shmcb_set_safe_uint(cache->pos_count,
+ shmcb_get_safe_uint(cache->pos_count) -
+ shmcb_cyclic_space(header->cache_data_size,
+ shmcb_get_safe_uint(cache->first_pos),
+ shmcb_get_safe_uint(&(idx->offset))));
+ shmcb_set_safe_uint(cache->first_pos, shmcb_get_safe_uint(&(idx->offset)));
+ }
+ ssl_log(s, SSL_LOG_TRACE, "we now have %u sessions",
+ shmcb_get_safe_uint(queue->pos_count));
+ }
+ header->num_expiries += loop;
+ return loop;
+}
+
+/* Inserts a new encoded session into a queue/cache pair - expiring
+ * (early or otherwise) any leading sessions as necessary to ensure
+ * there is room. An error return (FALSE) should only happen in the
+ * event of surreal values being passed on, or ridiculously small
+ * cache sizes. NB: For tracing purposes, this function is also given
+ * the server_rec to allow "ssl_log()". */
+static BOOL shmcb_insert_encoded_session(
+ server_rec *s, SHMCBQueue * queue,
+ SHMCBCache * cache,
+ unsigned char *encoded,
+ unsigned int encoded_len,
+ unsigned char *session_id,
+ time_t expiry_time)
+{
+ SHMCBHeader *header;
+ SHMCBIndex *idx = NULL;
+ unsigned int gap, new_pos, loop, new_offset;
+ int need;
+
+ ssl_log(s, SSL_LOG_TRACE, "entering shmcb_insert_encoded_session, "
+ "*queue->pos_count = %u", shmcb_get_safe_uint(queue->pos_count));
+
+ /* If there's entries to expire, ditch them first thing. */
+ shmcb_expire_division(s, queue, cache);
+ header = cache->header;
+ gap = header->cache_data_size - shmcb_get_safe_uint(cache->pos_count);
+ if (gap < encoded_len) {
+ new_pos = shmcb_get_safe_uint(queue->first_pos);
+ loop = 0;
+ need = (int) encoded_len - (int) gap;
+ while ((need > 0) && (loop + 1 < shmcb_get_safe_uint(queue->pos_count))) {
+ new_pos = shmcb_cyclic_increment(header->index_num, new_pos, 1);
+ loop += 1;
+ idx = shmcb_get_index(queue, new_pos);
+ need = (int) encoded_len - (int) gap -
+ shmcb_cyclic_space(header->cache_data_size,
+ shmcb_get_safe_uint(cache->first_pos),
+ shmcb_get_safe_uint(&(idx->offset)));
+ }
+ if (loop > 0) {
+ ssl_log(s, SSL_LOG_TRACE, "about to scroll %u sessions from %u",
+ loop, shmcb_get_safe_uint(queue->pos_count));
+ /* We are removing "loop" items from the cache. */
+ shmcb_set_safe_uint(cache->pos_count,
+ shmcb_get_safe_uint(cache->pos_count) -
+ shmcb_cyclic_space(header->cache_data_size,
+ shmcb_get_safe_uint(cache->first_pos),
+ shmcb_get_safe_uint(&(idx->offset))));
+ shmcb_set_safe_uint(cache->first_pos, shmcb_get_safe_uint(&(idx->offset)));
+ shmcb_set_safe_uint(queue->pos_count, shmcb_get_safe_uint(queue->pos_count) - loop);
+ shmcb_set_safe_uint(queue->first_pos, new_pos);
+ ssl_log(s, SSL_LOG_TRACE, "now only have %u sessions",
+ shmcb_get_safe_uint(queue->pos_count));
+ /* Update the stats!!! */
+ header->num_scrolled += loop;
+ }
+ }
+
+ /* probably unecessary checks, but I'll leave them until this code
+ * is verified. */
+ if (shmcb_get_safe_uint(cache->pos_count) + encoded_len >
+ header->cache_data_size) {
+ ssl_log(s, SSL_LOG_ERROR, "shmcb_insert_encoded_session, "
+ "internal error");
+ return FALSE;
+ }
+ if (shmcb_get_safe_uint(queue->pos_count) == header->index_num) {
+ ssl_log(s, SSL_LOG_ERROR, "shmcb_insert_encoded_session, "
+ "internal error");
+ return FALSE;
+ }
+ ssl_log(s, SSL_LOG_TRACE, "we have %u bytes and %u indexes free - "
+ "enough", header->cache_data_size -
+ shmcb_get_safe_uint(cache->pos_count), header->index_num -
+ shmcb_get_safe_uint(queue->pos_count));
+
+
+ /* HERE WE ASSUME THAT THE NEW SESSION SHOULD GO ON THE END! I'M NOT
+ * CHECKING WHETHER IT SHOULD BE GENUINELY "INSERTED" SOMEWHERE.
+ *
+ * We either fix that, or find out at a "higher" (read "mod_ssl")
+ * level whether it is possible to have distinct session caches for
+ * any attempted tomfoolery to do with different session timeouts.
+ * Knowing in advance that we can have a cache-wide constant timeout
+ * would make this stuff *MUCH* more efficient. Mind you, it's very
+ * efficient right now because I'm ignoring this problem!!!
+ */
+
+ /* Increment to the first unused byte */
+ new_offset = shmcb_cyclic_increment(header->cache_data_size,
+ shmcb_get_safe_uint(cache->first_pos),
+ shmcb_get_safe_uint(cache->pos_count));
+ /* Copy the DER-encoded session into place */
+ shmcb_cyclic_ntoc_memcpy(header->cache_data_size, cache->data,
+ new_offset, encoded, encoded_len);
+ /* Get the new index that this session is stored in. */
+ new_pos = shmcb_cyclic_increment(header->index_num,
+ shmcb_get_safe_uint(queue->first_pos),
+ shmcb_get_safe_uint(queue->pos_count));
+ ssl_log(s, SSL_LOG_TRACE, "storing in index %u, at offset %u", new_pos,
+ new_offset);
+ idx = shmcb_get_index(queue, new_pos);
+ if (idx == NULL) {
+ ssl_log(s, SSL_LOG_ERROR, "shmcb_insert_encoded_session, "
+ "internal error");
+ return FALSE;
+ }
+ memset(idx, 0, sizeof(SHMCBIndex));
+ shmcb_set_safe_time(&(idx->expires), expiry_time);
+ shmcb_set_safe_uint(&(idx->offset), new_offset);
+
+ /* idx->removed = (unsigned char)0; */ /* Not needed given the memset above. */
+ idx->s_id2 = session_id[1];
+ ssl_log(s, SSL_LOG_TRACE, "session_id[0]=%u, idx->s_id2=%u",
+ session_id[0], session_id[1]);
+
+ /* All that remains is to adjust the cache's and queue's "pos_count"s. */
+ shmcb_set_safe_uint(cache->pos_count,
+ shmcb_get_safe_uint(cache->pos_count) + encoded_len);
+ shmcb_set_safe_uint(queue->pos_count,
+ shmcb_get_safe_uint(queue->pos_count) + 1);
+
+ /* And just for good debugging measure ... */
+ ssl_log(s, SSL_LOG_TRACE, "leaving now with %u bytes in the cache and "
+ "%u indexes", shmcb_get_safe_uint(cache->pos_count),
+ shmcb_get_safe_uint(queue->pos_count));
+ ssl_log(s, SSL_LOG_TRACE, "leaving shmcb_insert_encoded_session");
+ return TRUE;
+}
+
+/* Performs a lookup into a queue/cache pair for a
+ * session_id. If found, the session is deserialised
+ * and returned, otherwise NULL. */
+static SSL_SESSION *shmcb_lookup_session_id(
+ server_rec *s, SHMCBQueue *queue,
+ SHMCBCache *cache, UCHAR *id,
+ int idlen)
+{
+ unsigned char tempasn[SSL_SESSION_MAX_DER];
+ SHMCBIndex *idx;
+ SHMCBHeader *header;
+ SSL_SESSION *pSession = NULL;
+ unsigned int curr_pos, loop, count;
+ unsigned char *ptr;
+ time_t now;
+
+ ssl_log(s, SSL_LOG_TRACE, "entering shmcb_lookup_session_id");
+
+ /* If there are entries to expire, ditch them first thing. */
+ shmcb_expire_division(s, queue, cache);
+ now = time(NULL);
+ curr_pos = shmcb_get_safe_uint(queue->first_pos);
+ count = shmcb_get_safe_uint(queue->pos_count);
+ header = queue->header;
+ for (loop = 0; loop < count; loop++) {
+ ssl_log(s, SSL_LOG_TRACE, "loop=%u, count=%u, curr_pos=%u",
+ loop, count, curr_pos);
+ idx = shmcb_get_index(queue, curr_pos);
+ ssl_log(s, SSL_LOG_TRACE, "idx->s_id2=%u, id[1]=%u, offset=%u",
+ idx->s_id2, id[1], shmcb_get_safe_uint(&(idx->offset)));
+ /* Only look into the session further if;
+ * (a) the second byte of the session_id matches,
+ * (b) the "removed" flag isn't set,
+ * (c) the session hasn't expired yet.
+ * We do (c) like this so that it saves us having to
+ * do natural expiries ... naturally expired sessions
+ * scroll off the front anyway when the cache is full and
+ * "rotating", the only real issue that remains is the
+ * removal or disabling of forcibly killed sessions. */
+ if ((idx->s_id2 == id[1]) && !idx->removed &&
+ (shmcb_get_safe_time(&(idx->expires)) > now)) {
+ ssl_log(s, SSL_LOG_TRACE, "at index %u, found possible "
+ "session match", curr_pos);
+ shmcb_cyclic_cton_memcpy(header->cache_data_size,
+ tempasn, cache->data,
+ shmcb_get_safe_uint(&(idx->offset)),
+ SSL_SESSION_MAX_DER);
+ ptr = tempasn;
+ pSession = d2i_SSL_SESSION(NULL, &ptr, SSL_SESSION_MAX_DER);
+ if (pSession == NULL) {
+ ssl_log(s, SSL_LOG_ERROR, "scach2_lookup_"
+ "session_id, internal error");
+ return NULL;
+ }
+ if ((pSession->session_id_length == idlen) &&
+ (memcmp(pSession->session_id, id, idlen) == 0)) {
+ ssl_log(s, SSL_LOG_TRACE, "a match!");
+ return pSession;
+ }
+ ssl_log(s, SSL_LOG_TRACE, "not a match");
+ SSL_SESSION_free(pSession);
+ pSession = NULL;
+ }
+ curr_pos = shmcb_cyclic_increment(header->index_num, curr_pos, 1);
+ }
+ ssl_log(s, SSL_LOG_TRACE, "no matching sessions were found");
+ return NULL;
+}
+
+static BOOL shmcb_remove_session_id(
+ server_rec *s, SHMCBQueue *queue,
+ SHMCBCache *cache, UCHAR *id, int idlen)
+{
+ unsigned char tempasn[SSL_SESSION_MAX_DER];
+ SSL_SESSION *pSession = NULL;
+ SHMCBIndex *idx;
+ SHMCBHeader *header;
+ unsigned int curr_pos, loop, count;
+ unsigned char *ptr;
+ BOOL to_return = FALSE;
+
+ ssl_log(s, SSL_LOG_TRACE, "entering shmcb_remove_session_id");
+
+ /* If there's entries to expire, ditch them first thing. */
+ /* shmcb_expire_division(s, queue, cache); */
+
+ /* Regarding the above ... hmmm ... I know my expiry code is slightly
+ * "faster" than all this remove stuff ... but if the higher level
+ * code calls a "remove" operation (and this *only* seems to happen
+ * when it has spotted an expired session before we had a chance to)
+ * then it should get credit for a remove (stats-wise). Also, in the
+ * off-chance that the server *requests* a renegotiate and wants to
+ * wipe the session clean we should give that priority over our own
+ * routine expiry handling. So I've moved the expiry check to *after*
+ * this general remove stuff. */
+ curr_pos = shmcb_get_safe_uint(queue->first_pos);
+ count = shmcb_get_safe_uint(queue->pos_count);
+ header = cache->header;
+ for (loop = 0; loop < count; loop++) {
+ ssl_log(s, SSL_LOG_TRACE, "loop=%u, count=%u, curr_pos=%u",
+ loop, count, curr_pos);
+ idx = shmcb_get_index(queue, curr_pos);
+ ssl_log(s, SSL_LOG_TRACE, "idx->s_id2=%u, id[1]=%u", idx->s_id2,
+ id[1]);
+ /* Only look into the session further if the second byte of the
+ * session_id matches. */
+ if (idx->s_id2 == id[1]) {
+ ssl_log(s, SSL_LOG_TRACE, "at index %u, found possible "
+ "session match", curr_pos);
+ shmcb_cyclic_cton_memcpy(header->cache_data_size,
+ tempasn, cache->data,
+ shmcb_get_safe_uint(&(idx->offset)),
+ SSL_SESSION_MAX_DER);
+ ptr = tempasn;
+ pSession = d2i_SSL_SESSION(NULL, &ptr, SSL_SESSION_MAX_DER);
+ if (pSession == NULL) {
+ ssl_log(s, SSL_LOG_ERROR, "shmcb_remove_session_id, "
+ "internal error");
+ goto end;
+ }
+ if ((pSession->session_id_length == idlen)
+ && (memcmp(id, pSession->session_id, idlen) == 0)) {
+ ssl_log(s, SSL_LOG_TRACE, "a match!");
+ /* Scrub out this session "quietly" */
+ idx->removed = (unsigned char) 1;
+ SSL_SESSION_free(pSession);
+ to_return = TRUE;
+ goto end;
+ }
+ ssl_log(s, SSL_LOG_TRACE, "not a match");
+ SSL_SESSION_free(pSession);
+ pSession = NULL;
+ }
+ curr_pos = shmcb_cyclic_increment(header->index_num, curr_pos, 1);
+ }
+ ssl_log(s, SSL_LOG_TRACE, "no matching sessions were found");
+
+ /* If there's entries to expire, ditch them now. */
+ shmcb_expire_division(s, queue, cache);
+end:
+ ssl_log(s, SSL_LOG_TRACE, "leaving shmcb_remove_session_id");
+ return to_return;
+}
+
+#endif /* SSL_EXPERIMENTAL_SHMCB */
+
diff --git a/usr.sbin/httpd/src/modules/ssl/ssl_scache_shmht.c b/usr.sbin/httpd/src/modules/ssl/ssl_scache_shmht.c
new file mode 100644
index 00000000000..5307ccfab48
--- /dev/null
+++ b/usr.sbin/httpd/src/modules/ssl/ssl_scache_shmht.c
@@ -0,0 +1,347 @@
+/* _ _
+** _ __ ___ ___ __| | ___ ___| | mod_ssl
+** | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
+** | | | | | | (_) | (_| | \__ \__ \ | www.modssl.org
+** |_| |_| |_|\___/ \__,_|___|___/___/_| ftp.modssl.org
+** |_____|
+** ssl_scache_shmht.c
+** Session Cache via Shared Memory (Hash Table Variant)
+*/
+
+/* ====================================================================
+ * Copyright (c) 1998-2000 Ralf S. Engelschall. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by
+ * Ralf S. Engelschall <rse@engelschall.com> for use in the
+ * mod_ssl project (http://www.modssl.org/)."
+ *
+ * 4. The names "mod_ssl" must not be used to endorse or promote
+ * products derived from this software without prior written
+ * permission. For written permission, please contact
+ * rse@engelschall.com.
+ *
+ * 5. Products derived from this software may not be called "mod_ssl"
+ * nor may "mod_ssl" appear in their names without prior
+ * written permission of Ralf S. Engelschall.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by
+ * Ralf S. Engelschall <rse@engelschall.com> for use in the
+ * mod_ssl project (http://www.modssl.org/)."
+ *
+ * THIS SOFTWARE IS PROVIDED BY RALF S. ENGELSCHALL ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RALF S. ENGELSCHALL OR
+ * HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ */
+
+#include "mod_ssl.h"
+
+/*
+ * Wrapper functions for table library which resemble malloc(3) & Co
+ * but use the variants from the MM shared memory library.
+ */
+
+static void *ssl_scache_shmht_malloc(size_t size)
+{
+ SSLModConfigRec *mc = myModConfig();
+ return ap_mm_malloc(mc->pSessionCacheDataMM, size);
+}
+
+static void *ssl_scache_shmht_calloc(size_t number, size_t size)
+{
+ SSLModConfigRec *mc = myModConfig();
+ return ap_mm_calloc(mc->pSessionCacheDataMM, number, size);
+}
+
+static void *ssl_scache_shmht_realloc(void *ptr, size_t size)
+{
+ SSLModConfigRec *mc = myModConfig();
+ return ap_mm_realloc(mc->pSessionCacheDataMM, ptr, size);
+}
+
+static void ssl_scache_shmht_free(void *ptr)
+{
+ SSLModConfigRec *mc = myModConfig();
+ ap_mm_free(mc->pSessionCacheDataMM, ptr);
+ return;
+}
+
+/*
+ * Now the actual session cache implementation
+ * based on a hash table inside a shared memory segment.
+ */
+
+void ssl_scache_shmht_init(server_rec *s, pool *p)
+{
+ SSLModConfigRec *mc = myModConfig();
+ AP_MM *mm;
+ table_t *ta;
+ int ta_errno;
+ int avail;
+ int n;
+
+ /*
+ * Create shared memory segment
+ */
+ if (mc->szSessionCacheDataFile == NULL) {
+ ssl_log(s, SSL_LOG_ERROR, "SSLSessionCache required");
+ ssl_die();
+ }
+ if ((mm = ap_mm_create(mc->nSessionCacheDataSize,
+ mc->szSessionCacheDataFile)) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR,
+ "Cannot allocate shared memory: %s", ap_mm_error());
+ ssl_die();
+ }
+ mc->pSessionCacheDataMM = mm;
+
+ /*
+ * Make sure the childs have access to the underlaying files
+ */
+ ap_mm_permission(mm, SSL_MM_FILE_MODE, ap_user_id, -1);
+
+ /*
+ * Create hash table in shared memory segment
+ */
+ avail = ap_mm_available(mm);
+ n = (avail/2) / 1024;
+ n = n < 10 ? 10 : n;
+ if ((ta = table_alloc(n, &ta_errno,
+ ssl_scache_shmht_malloc,
+ ssl_scache_shmht_calloc,
+ ssl_scache_shmht_realloc,
+ ssl_scache_shmht_free )) == NULL) {
+ ssl_log(s, SSL_LOG_ERROR,
+ "Cannot allocate hash table in shared memory: %s",
+ table_strerror(ta_errno));
+ ssl_die();
+ }
+ table_attr(ta, TABLE_FLAG_AUTO_ADJUST|TABLE_FLAG_ADJUST_DOWN);
+ table_set_data_alignment(ta, sizeof(char *));
+ table_clear(ta);
+ mc->tSessionCacheDataTable = ta;
+
+ /*
+ * Log the done work
+ */
+ ssl_log(s, SSL_LOG_INFO,
+ "Init: Created hash-table (%d buckets) "
+ "in shared memory (%d bytes) for SSL session cache", n, avail);
+ return;
+}
+
+void ssl_scache_shmht_kill(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig();
+
+ if (mc->pSessionCacheDataMM != NULL) {
+ ap_mm_destroy(mc->pSessionCacheDataMM);
+ mc->pSessionCacheDataMM = NULL;
+ }
+ return;
+}
+
+BOOL ssl_scache_shmht_store(server_rec *s, UCHAR *id, int idlen, time_t expiry, SSL_SESSION *sess)
+{
+ SSLModConfigRec *mc = myModConfig();
+ void *vp;
+ UCHAR ucaData[SSL_SESSION_MAX_DER];
+ int nData;
+ UCHAR *ucp;
+
+ /* streamline session data */
+ ucp = ucaData;
+ nData = i2d_SSL_SESSION(sess, &ucp);
+
+ ssl_mutex_on(s);
+ if (table_insert_kd(mc->tSessionCacheDataTable,
+ id, idlen, NULL, sizeof(time_t)+nData,
+ NULL, &vp, 1) != TABLE_ERROR_NONE) {
+ ssl_mutex_off(s);
+ return FALSE;
+ }
+ memcpy(vp, &expiry, sizeof(time_t));
+ memcpy((char *)vp+sizeof(time_t), ucaData, nData);
+ ssl_mutex_off(s);
+
+ /* allow the regular expiring to occur */
+ ssl_scache_shmht_expire(s);
+
+ return TRUE;
+}
+
+SSL_SESSION *ssl_scache_shmht_retrieve(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig();
+ void *vp;
+ SSL_SESSION *sess = NULL;
+ UCHAR *ucpData;
+ int nData;
+ time_t expiry;
+ time_t now;
+ int n;
+
+ /* allow the regular expiring to occur */
+ ssl_scache_shmht_expire(s);
+
+ /* lookup key in table */
+ ssl_mutex_on(s);
+ if (table_retrieve(mc->tSessionCacheDataTable,
+ id, idlen, &vp, &n) != TABLE_ERROR_NONE) {
+ ssl_mutex_off(s);
+ return NULL;
+ }
+
+ /* copy over the information to the SCI */
+ nData = n-sizeof(time_t);
+ ucpData = (UCHAR *)malloc(nData);
+ if (ucpData == NULL) {
+ ssl_mutex_off(s);
+ return NULL;
+ }
+ memcpy(&expiry, vp, sizeof(time_t));
+ memcpy(ucpData, (char *)vp+sizeof(time_t), nData);
+ ssl_mutex_off(s);
+
+ /* make sure the stuff is still not expired */
+ now = time(NULL);
+ if (expiry <= now) {
+ ssl_scache_shmht_remove(s, id, idlen);
+ return NULL;
+ }
+
+ /* unstreamed SSL_SESSION */
+ sess = d2i_SSL_SESSION(NULL, &ucpData, nData);
+
+ return sess;
+}
+
+void ssl_scache_shmht_remove(server_rec *s, UCHAR *id, int idlen)
+{
+ SSLModConfigRec *mc = myModConfig();
+
+ /* remove value under key in table */
+ ssl_mutex_on(s);
+ table_delete(mc->tSessionCacheDataTable, id, idlen, NULL, NULL);
+ ssl_mutex_off(s);
+ return;
+}
+
+void ssl_scache_shmht_expire(server_rec *s)
+{
+ SSLModConfigRec *mc = myModConfig();
+ SSLSrvConfigRec *sc = mySrvConfig(s);
+ static time_t tLast = 0;
+ table_linear_t iterator;
+ time_t tExpiresAt;
+ void *vpKey;
+ void *vpKeyThis;
+ void *vpData;
+ int nKey;
+ int nKeyThis;
+ int nData;
+ int nElements = 0;
+ int nDeleted = 0;
+ int bDelete;
+ int rc;
+ time_t tNow;
+
+ /*
+ * make sure the expiration for still not-accessed session
+ * cache entries is done only from time to time
+ */
+ tNow = time(NULL);
+ if (tNow < tLast+sc->nSessionCacheTimeout)
+ return;
+ tLast = tNow;
+
+ ssl_mutex_on(s);
+ if (table_first_r(mc->tSessionCacheDataTable, &iterator,
+ &vpKey, &nKey, &vpData, &nData) == TABLE_ERROR_NONE) {
+ do {
+ bDelete = FALSE;
+ nElements++;
+ if (nData < sizeof(time_t) || vpData == NULL)
+ bDelete = TRUE;
+ else {
+ memcpy(&tExpiresAt, vpData, sizeof(time_t));
+ if (tExpiresAt <= tNow)
+ bDelete = TRUE;
+ }
+ vpKeyThis = vpKey;
+ nKeyThis = nKey;
+ rc = table_next_r(mc->tSessionCacheDataTable, &iterator,
+ &vpKey, &nKey, &vpData, &nData);
+ if (bDelete) {
+ table_delete(mc->tSessionCacheDataTable,
+ vpKeyThis, nKeyThis, NULL, NULL);
+ nDeleted++;
+ }
+ } while (rc == TABLE_ERROR_NONE);
+ }
+ ssl_mutex_off(s);
+ ssl_log(s, SSL_LOG_TRACE, "Inter-Process Session Cache (SHMHT) Expiry: "
+ "old: %d, new: %d, removed: %d", nElements, nElements-nDeleted, nDeleted);
+ return;
+}
+
+void ssl_scache_shmht_status(server_rec *s, pool *p, void (*func)(char *, void *), void *arg)
+{
+ SSLModConfigRec *mc = myModConfig();
+ void *vpKey;
+ void *vpData;
+ int nKey;
+ int nData;
+ int nElem;
+ int nSize;
+ int nAverage;
+
+ nElem = 0;
+ nSize = 0;
+ ssl_mutex_on(s);
+ if (table_first(mc->tSessionCacheDataTable,
+ &vpKey, &nKey, &vpData, &nData) == TABLE_ERROR_NONE) {
+ do {
+ if (vpKey == NULL || vpData == NULL)
+ continue;
+ nElem += 1;
+ nSize += nData;
+ } while (table_next(mc->tSessionCacheDataTable,
+ &vpKey, &nKey, &vpData, &nData) == TABLE_ERROR_NONE);
+ }
+ ssl_mutex_off(s);
+ if (nSize > 0 && nElem > 0)
+ nAverage = nSize / nElem;
+ else
+ nAverage = 0;
+ func(ap_psprintf(p, "cache type: <b>SHMHT</b>, maximum size: <b>%d</b> bytes<br>", mc->nSessionCacheDataSize), arg);
+ func(ap_psprintf(p, "current sessions: <b>%d</b>, current size: <b>%d</b> bytes<br>", nElem, nSize), arg);
+ func(ap_psprintf(p, "average session size: <b>%d</b> bytes<br>", nAverage), arg);
+ return;
+}
+