summaryrefslogtreecommitdiff
path: root/sys/kern/sysv_shm.c
diff options
context:
space:
mode:
authorTodd C. Miller <millert@cvs.openbsd.org>2002-12-17 23:11:33 +0000
committerTodd C. Miller <millert@cvs.openbsd.org>2002-12-17 23:11:33 +0000
commit25ea8c8a69576ca14e0d418bcc7d03b82db24db2 (patch)
treed9af0722d8761b2c04ddf3def6874033dfd44219 /sys/kern/sysv_shm.c
parenta6dcb6be4019c0f02edd62f13fbb5737a42598aa (diff)
Make SysV-style shared memory and semaphore limits sysctl'able.
Instead of allocating a static amount of memory for the data structures via valloc() in allocsys(), allocate things dynamically using pool(9) when possible and malloc(9) when not. The various members of struct seminfo and struct shminfo are in kern.seminfo and kern.shminfo respectively (not all members of kern.seminfo are changable). The data structures used still leave something to be desired but things are not made worse in that respect by this commit.
Diffstat (limited to 'sys/kern/sysv_shm.c')
-rw-r--r--sys/kern/sysv_shm.c443
1 files changed, 264 insertions, 179 deletions
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index 4dce0048b20..e016dc33df7 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sysv_shm.c,v 1.29 2002/11/06 00:17:28 art Exp $ */
+/* $OpenBSD: sysv_shm.c,v 1.30 2002/12/17 23:11:31 millert Exp $ */
/* $NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $ */
/*
@@ -40,7 +40,9 @@
#include <sys/time.h>
#include <sys/malloc.h>
#include <sys/mman.h>
+#include <sys/pool.h>
#include <sys/systm.h>
+#include <sys/sysctl.h>
#include <sys/stat.h>
#include <sys/mount.h>
@@ -49,7 +51,9 @@
#include <uvm/uvm_extern.h>
struct shminfo shminfo;
-struct shmid_ds *shmsegs;
+struct shmid_ds **shmsegs; /* linear mapping of shmid -> shmseg */
+struct pool shm_pool;
+unsigned short *shmseqs; /* array of shm sequence numbers */
struct shmid_ds *shm_find_segment_by_shmid(int);
@@ -62,14 +66,11 @@ struct shmid_ds *shm_find_segment_by_shmid(int);
* shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
*
* Structures:
- * shmsegs (an array of 'struct shmid_ds')
- * per proc array of 'struct shmmap_state'
+ * shmsegs (an array of 'struct shmid_ds *')
+ * per proc 'struct shmmap_head' with an array of 'struct shmmap_state'
*/
-#define SHMSEG_FREE 0x0200
-#define SHMSEG_REMOVED 0x0400
-#define SHMSEG_ALLOCATED 0x0800
-#define SHMSEG_WANTED 0x1000
+#define SHMSEG_REMOVED 0x0200 /* can't overlap ACCESSPERMS */
int shm_last_free, shm_nused, shm_committed;
@@ -82,6 +83,11 @@ struct shmmap_state {
int shmid;
};
+struct shmmap_head {
+ int shmseg;
+ struct shmmap_state state[1];
+};
+
int shm_find_segment_by_key(key_t);
void shm_deallocate_segment(struct shmid_ds *);
int shm_delete_mapping(struct vmspace *, struct shmmap_state *);
@@ -91,39 +97,37 @@ int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
int, register_t *);
int
-shm_find_segment_by_key(key)
- key_t key;
+shm_find_segment_by_key(key_t key)
{
+ struct shmid_ds *shmseg;
int i;
- for (i = 0; i < shminfo.shmmni; i++)
- if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
- shmsegs[i].shm_perm.key == key)
- return i;
- return -1;
+ for (i = 0; i < shminfo.shmmni; i++) {
+ shmseg = shmsegs[i];
+ if (shmseg != NULL && shmseg->shm_perm.key == key)
+ return (i);
+ }
+ return (-1);
}
struct shmid_ds *
-shm_find_segment_by_shmid(shmid)
- int shmid;
+shm_find_segment_by_shmid(int shmid)
{
int segnum;
struct shmid_ds *shmseg;
segnum = IPCID_TO_IX(shmid);
- if (segnum < 0 || segnum >= shminfo.shmmni)
- return NULL;
- shmseg = &shmsegs[segnum];
- if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
- != SHMSEG_ALLOCATED ||
+ if (segnum < 0 || segnum >= shminfo.shmmni ||
+ (shmseg = shmsegs[segnum]) == NULL)
+ return (NULL);
+ if ((shmseg->shm_perm.mode & SHMSEG_REMOVED) ||
shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
- return NULL;
- return shmseg;
+ return (NULL);
+ return (shmseg);
}
void
-shm_deallocate_segment(shmseg)
- struct shmid_ds *shmseg;
+shm_deallocate_segment(struct shmid_ds *shmseg)
{
struct shm_handle *shm_handle;
size_t size;
@@ -131,24 +135,22 @@ shm_deallocate_segment(shmseg)
shm_handle = shmseg->shm_internal;
size = round_page(shmseg->shm_segsz);
uao_detach(shm_handle->shm_object);
- free((caddr_t)shm_handle, M_SHM);
- shmseg->shm_internal = NULL;
+ pool_put(&shm_pool, shmseg);
shm_committed -= btoc(size);
- shmseg->shm_perm.mode = SHMSEG_FREE;
shm_nused--;
}
int
-shm_delete_mapping(vm, shmmap_s)
- struct vmspace *vm;
- struct shmmap_state *shmmap_s;
+shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
{
struct shmid_ds *shmseg;
int segnum;
size_t size;
segnum = IPCID_TO_IX(shmmap_s->shmid);
- shmseg = &shmsegs[segnum];
+ if (segnum < 0 || segnum >= shminfo.shmmni ||
+ (shmseg = shmsegs[segnum]) == NULL)
+ return (EINVAL);
size = round_page(shmseg->shm_segsz);
uvm_deallocate(&vm->vm_map, shmmap_s->va, size);
shmmap_s->shmid = -1;
@@ -157,40 +159,37 @@ shm_delete_mapping(vm, shmmap_s)
(shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
shm_deallocate_segment(shmseg);
shm_last_free = segnum;
+ shmsegs[shm_last_free] = NULL;
}
- return 0;
+ return (0);
}
int
-sys_shmdt(p, v, retval)
- struct proc *p;
- void *v;
- register_t *retval;
+sys_shmdt(struct proc *p, void *v, register_t *retval)
{
struct sys_shmdt_args /* {
syscallarg(const void *) shmaddr;
} */ *uap = v;
+ struct shmmap_head *shmmap_h;
struct shmmap_state *shmmap_s;
int i;
- shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
- if (shmmap_s == NULL)
- return EINVAL;
+ shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
+ if (shmmap_h == NULL)
+ return (EINVAL);
- for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
+ for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
+ i++, shmmap_s++)
if (shmmap_s->shmid != -1 &&
shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
break;
- if (i == shminfo.shmseg)
- return EINVAL;
- return shm_delete_mapping(p->p_vmspace, shmmap_s);
+ if (i == shmmap_h->shmseg)
+ return (EINVAL);
+ return (shm_delete_mapping(p->p_vmspace, shmmap_s));
}
int
-sys_shmat(p, v, retval)
- struct proc *p;
- void *v;
- register_t *retval;
+sys_shmat(struct proc *p, void *v, register_t *retval)
{
struct sys_shmat_args /* {
syscallarg(int) shmid;
@@ -200,34 +199,38 @@ sys_shmat(p, v, retval)
int error, i, flags;
struct ucred *cred = p->p_ucred;
struct shmid_ds *shmseg;
- struct shmmap_state *shmmap_s = NULL;
+ struct shmmap_head *shmmap_h;
+ struct shmmap_state *shmmap_s;
struct shm_handle *shm_handle;
vaddr_t attach_va;
vm_prot_t prot;
vsize_t size;
- shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
- if (shmmap_s == NULL) {
- size = shminfo.shmseg * sizeof(struct shmmap_state);
- shmmap_s = malloc(size, M_SHM, M_WAITOK);
- for (i = 0; i < shminfo.shmseg; i++)
- shmmap_s[i].shmid = -1;
- p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
+ shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
+ if (shmmap_h == NULL) {
+ size = sizeof(int) +
+ shminfo.shmseg * sizeof(struct shmmap_state);
+ shmmap_h = malloc(size, M_SHM, M_WAITOK);
+ shmmap_h->shmseg = shminfo.shmseg;
+ for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
+ i++, shmmap_s++)
+ shmmap_s->shmid = -1;
+ p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
}
shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
if (shmseg == NULL)
- return EINVAL;
+ return (EINVAL);
error = ipcperm(cred, &shmseg->shm_perm,
(SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
if (error)
- return error;
- for (i = 0; i < shminfo.shmseg; i++) {
+ return (error);
+ for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
if (shmmap_s->shmid == -1)
break;
shmmap_s++;
}
- if (i >= shminfo.shmseg)
- return EMFILE;
+ if (i >= shmmap_h->shmseg)
+ return (EMFILE);
size = round_page(shmseg->shm_segsz);
prot = VM_PROT_READ;
if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
@@ -241,7 +244,7 @@ sys_shmat(p, v, retval)
else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
attach_va = (vaddr_t)SCARG(uap, shmaddr);
else
- return EINVAL;
+ return (EINVAL);
} else {
/* This is just a hint to uvm_map() about where to put it. */
attach_va = round_page((vaddr_t)p->p_vmspace->vm_taddr +
@@ -261,14 +264,11 @@ sys_shmat(p, v, retval)
shmseg->shm_atime = time.tv_sec;
shmseg->shm_nattch++;
*retval = attach_va;
- return 0;
+ return (0);
}
int
-sys_shmctl(p, v, retval)
- struct proc *p;
- void *v;
- register_t *retval;
+sys_shmctl(struct proc *p, void *v, register_t *retval)
{
struct sys_shmctl_args /* {
syscallarg(int) shmid;
@@ -282,23 +282,23 @@ sys_shmctl(p, v, retval)
shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
if (shmseg == NULL)
- return EINVAL;
+ return (EINVAL);
switch (SCARG(uap, cmd)) {
case IPC_STAT:
if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
- return error;
+ return (error);
error = copyout((caddr_t)shmseg, SCARG(uap, buf),
sizeof(inbuf));
if (error)
- return error;
+ return (error);
break;
case IPC_SET:
if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
- return error;
+ return (error);
error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
sizeof(inbuf));
if (error)
- return error;
+ return (error);
shmseg->shm_perm.uid = inbuf.shm_perm.uid;
shmseg->shm_perm.gid = inbuf.shm_perm.gid;
shmseg->shm_perm.mode =
@@ -308,74 +308,59 @@ sys_shmctl(p, v, retval)
break;
case IPC_RMID:
if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
- return error;
+ return (error);
shmseg->shm_perm.key = IPC_PRIVATE;
shmseg->shm_perm.mode |= SHMSEG_REMOVED;
if (shmseg->shm_nattch <= 0) {
shm_deallocate_segment(shmseg);
shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
+ shmsegs[shm_last_free] = NULL;
}
break;
case SHM_LOCK:
case SHM_UNLOCK:
default:
- return EINVAL;
+ return (EINVAL);
}
- return 0;
+ return (0);
}
int
-shmget_existing(p, uap, mode, segnum, retval)
- struct proc *p;
+shmget_existing(struct proc *p,
struct sys_shmget_args /* {
syscallarg(key_t) key;
syscallarg(size_t) size;
syscallarg(int) shmflg;
- } */ *uap;
- int mode;
- int segnum;
- register_t *retval;
+ } */ *uap,
+ int mode, int segnum, register_t *retval)
{
struct shmid_ds *shmseg;
struct ucred *cred = p->p_ucred;
int error;
- shmseg = &shmsegs[segnum];
- if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
- /*
- * This segment is in the process of being allocated. Wait
- * until it's done, and look the key up again (in case the
- * allocation failed or it was freed).
- */
- shmseg->shm_perm.mode |= SHMSEG_WANTED;
- error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
- if (error)
- return error;
- return EAGAIN;
- }
+ shmseg = shmsegs[segnum]; /* We assume the segnum is valid */
if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
- return error;
+ return (error);
if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
- return EINVAL;
+ return (EINVAL);
if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
(IPC_CREAT | IPC_EXCL))
- return EEXIST;
+ return (EEXIST);
*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
- return 0;
+ return (0);
}
int
-shmget_allocate_segment(p, uap, mode, retval)
- struct proc *p;
+shmget_allocate_segment(struct proc *p,
struct sys_shmget_args /* {
syscallarg(key_t) key;
syscallarg(size_t) size;
syscallarg(int) shmflg;
- } */ *uap;
- int mode;
- register_t *retval;
+ } */ *uap,
+ int mode, register_t *retval)
{
- int i, segnum, shmid, size;
+ key_t key;
+ int segnum, size;
struct ucred *cred = p->p_ucred;
struct shmid_ds *shmseg;
struct shm_handle *shm_handle;
@@ -383,68 +368,67 @@ shmget_allocate_segment(p, uap, mode, retval)
if (SCARG(uap, size) < shminfo.shmmin ||
SCARG(uap, size) > shminfo.shmmax)
- return EINVAL;
+ return (EINVAL);
if (shm_nused >= shminfo.shmmni) /* any shmids left? */
- return ENOSPC;
+ return (ENOSPC);
size = round_page(SCARG(uap, size));
if (shm_committed + btoc(size) > shminfo.shmall)
- return ENOMEM;
+ return (ENOMEM);
+ shm_nused++;
+ shm_committed += btoc(size);
+
+ /*
+ * If a key has been specified and we had to wait for memory
+ * to be freed up we need to verify that no one has allocated
+ * the key we want in the meantime. Yes, this is ugly.
+ */
+ key = SCARG(uap, key);
+ shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK : 0);
+ if (shmseg == NULL) {
+ shmseg = pool_get(&shm_pool, PR_WAITOK);
+ if (shm_find_segment_by_key(key) != -1) {
+ pool_put(&shm_pool, shmseg);
+ shm_nused--;
+ shm_committed -= btoc(size);
+ return (EAGAIN);
+ }
+ }
+
+ /* XXX - hash shmids instead */
if (shm_last_free < 0) {
- for (i = 0; i < shminfo.shmmni; i++)
- if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
- break;
- if (i == shminfo.shmmni)
+ for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
+ segnum++)
+ ;
+ if (segnum == shminfo.shmmni)
panic("shmseg free count inconsistent");
- segnum = i;
- } else {
+ } else {
segnum = shm_last_free;
- shm_last_free = -1;
+ if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
+ shm_last_free = -1;
}
- shmseg = &shmsegs[segnum];
- /*
- * In case we sleep in malloc(), mark the segment present but deleted
- * so that noone else tries to create the same key.
- */
- shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
- shmseg->shm_perm.key = SCARG(uap, key);
- shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
- shm_handle = (struct shm_handle *)
- malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
- shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
-
+ shmsegs[segnum] = shmseg;
+ shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
shm_handle->shm_object = uao_create(size, 0);
- shmseg->shm_internal = shm_handle;
shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
- shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
- (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
+ shmseg->shm_perm.mode = (mode & ACCESSPERMS);
+ shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
+ shmseg->shm_perm.key = key;
shmseg->shm_segsz = SCARG(uap, size);
shmseg->shm_cpid = p->p_pid;
shmseg->shm_lpid = shmseg->shm_nattch = 0;
shmseg->shm_atime = shmseg->shm_dtime = 0;
shmseg->shm_ctime = time.tv_sec;
- shm_committed += btoc(size);
- shm_nused++;
+ shmseg->shm_internal = shm_handle;
- *retval = shmid;
- if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
- /*
- * Somebody else wanted this key while we were asleep. Wake
- * them up now.
- */
- shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
- wakeup((caddr_t)shmseg);
- }
- return error;
+ *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
+ return (error);
}
int
-sys_shmget(p, v, retval)
- struct proc *p;
- void *v;
- register_t *retval;
+sys_shmget(struct proc *p, void *v, register_t *retval)
{
struct sys_shmget_args /* {
syscallarg(key_t) key;
@@ -457,23 +441,23 @@ sys_shmget(p, v, retval)
if (SCARG(uap, key) != IPC_PRIVATE) {
again:
segnum = shm_find_segment_by_key(SCARG(uap, key));
- if (segnum >= 0) {
- error = shmget_existing(p, uap, mode, segnum, retval);
- if (error == EAGAIN)
- goto again;
- return error;
- }
+ if (segnum >= 0)
+ return (shmget_existing(p, uap, mode, segnum, retval));
if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
- return ENOENT;
+ return (ENOENT);
}
- return shmget_allocate_segment(p, uap, mode, retval);
+ error = shmget_allocate_segment(p, uap, mode, retval);
+ if (error == EAGAIN)
+ goto again;
+ return (error);
}
void
-shmfork(vm1, vm2)
- struct vmspace *vm1, *vm2;
+shmfork(struct vmspace *vm1, struct vmspace *vm2)
{
+ struct shmmap_head *shmmap_h;
struct shmmap_state *shmmap_s;
+ struct shmid_ds *shmseg;
size_t size;
int i;
@@ -482,26 +466,30 @@ shmfork(vm1, vm2)
return;
}
- size = shminfo.shmseg * sizeof(struct shmmap_state);
- shmmap_s = malloc(size, M_SHM, M_WAITOK);
- bcopy(vm1->vm_shm, shmmap_s, size);
- vm2->vm_shm = (caddr_t)shmmap_s;
- for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
- if (shmmap_s->shmid != -1)
- shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
+ shmmap_h = (struct shmmap_head *)vm1->vm_shm;
+ size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
+ vm2->vm_shm = malloc(size, M_SHM, M_WAITOK);
+ bcopy(vm1->vm_shm, vm2->vm_shm, size);
+ for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
+ i++, shmmap_s++) {
+ if (shmmap_s->shmid != -1 &&
+ (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL)
+ shmseg->shm_nattch++;
+ }
}
void
-shmexit(vm)
- struct vmspace *vm;
+shmexit(struct vmspace *vm)
{
+ struct shmmap_head *shmmap_h;
struct shmmap_state *shmmap_s;
int i;
- shmmap_s = (struct shmmap_state *)vm->vm_shm;
- if (shmmap_s == NULL)
+ shmmap_h = (struct shmmap_head *)vm->vm_shm;
+ if (shmmap_h == NULL)
return;
- for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
+ for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
+ i++, shmmap_s++)
if (shmmap_s->shmid != -1)
shm_delete_mapping(vm, shmmap_s);
free(vm->vm_shm, M_SHM);
@@ -509,26 +497,29 @@ shmexit(vm)
}
void
-shminit()
+shminit(void)
{
- int i;
- shminfo.shmmax *= PAGE_SIZE;
-
- for (i = 0; i < shminfo.shmmni; i++) {
- shmsegs[i].shm_perm.mode = SHMSEG_FREE;
- shmsegs[i].shm_perm.seq = 0;
- }
+ pool_init(&shm_pool, sizeof(struct shmid_ds) +
+ sizeof(struct shm_handle), 0, 0, 0, "shmpl",
+ &pool_allocator_nointr);
+ shmsegs = malloc(shminfo.shmmni * sizeof(struct shmid_ds *),
+ M_SHM, M_WAITOK);
+ bzero(shmsegs, shminfo.shmmni * sizeof(struct shmid_ds *));
+ shmseqs = malloc(shminfo.shmmni * sizeof(unsigned short),
+ M_SHM, M_WAITOK);
+ bzero(shmseqs, shminfo.shmmni * sizeof(unsigned short));
+
+ shminfo.shmmax *= PAGE_SIZE; /* actually in pages */
shm_last_free = 0;
shm_nused = 0;
shm_committed = 0;
}
void
-shmid_n2o(n, o)
- struct shmid_ds *n;
- struct oshmid_ds *o;
+shmid_n2o(struct shmid_ds *n, struct oshmid_ds *o)
{
+
o->shm_segsz = n->shm_segsz;
o->shm_lpid = n->shm_lpid;
o->shm_cpid = n->shm_cpid;
@@ -539,3 +530,97 @@ shmid_n2o(n, o)
o->shm_internal = n->shm_internal;
ipc_n2o(&n->shm_perm, &o->shm_perm);
}
+
+/*
+ * Userland access to struct shminfo.
+ */
+int
+sysctl_sysvshm(int *name, u_int namelen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int error, val;
+ struct shmid_ds **newsegs;
+ unsigned short *newseqs;
+
+ if (namelen != 2) {
+ switch (name[0]) {
+ case KERN_SHMINFO_SHMMAX:
+ case KERN_SHMINFO_SHMMIN:
+ case KERN_SHMINFO_SHMMNI:
+ case KERN_SHMINFO_SHMSEG:
+ case KERN_SHMINFO_SHMALL:
+ break;
+ default:
+ return (ENOTDIR); /* overloaded */
+ }
+ }
+
+ switch (name[0]) {
+ case KERN_SHMINFO_SHMMAX:
+ if ((error = sysctl_int(oldp, oldlenp, newp, newlen,
+ &shminfo.shmmax)) || newp == NULL)
+ return (error);
+
+ /* If new shmmax > shmall, crank shmall */
+ if (btoc(round_page(shminfo.shmmax)) > shminfo.shmall)
+ shminfo.shmall = btoc(round_page(shminfo.shmmax));
+ return (0);
+ case KERN_SHMINFO_SHMMIN:
+ val = shminfo.shmmin;
+ if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
+ val == shminfo.shmmin)
+ return (error);
+ if (val <= 0)
+ return (EINVAL); /* shmmin must be >= 1 */
+ shminfo.shmmin = val;
+ return (0);
+ case KERN_SHMINFO_SHMMNI:
+ val = shminfo.shmmni;
+ if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
+ val == shminfo.shmmni)
+ return (error);
+
+ if (val < shminfo.shmmni)
+ return (EINVAL); /* can't decrease shmmni */
+
+ /* Expand shmsegs and shmseqs arrays */
+ newsegs = malloc(val * sizeof(struct shmid_ds *),
+ M_SHM, M_WAITOK);
+ bcopy(shmsegs, newsegs,
+ shminfo.shmmni * sizeof(struct shmid_ds *));
+ bzero(newsegs + shminfo.shmmni,
+ (val - shminfo.shmmni) * sizeof(struct shmid_ds *));
+ newseqs = malloc(val * sizeof(unsigned short), M_SHM, M_WAITOK);
+ bcopy(shmseqs, newseqs,
+ shminfo.shmmni * sizeof(unsigned short));
+ bzero(newseqs + shminfo.shmmni,
+ (val - shminfo.shmmni) * sizeof(unsigned short));
+ free(shmsegs, M_SHM);
+ free(shmseqs, M_SHM);
+ shmsegs = newsegs;
+ shmseqs = newseqs;
+ shminfo.shmmni = val;
+ return (0);
+ case KERN_SHMINFO_SHMSEG:
+ val = shminfo.shmseg;
+ if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
+ val == shminfo.shmseg)
+ return (error);
+ if (val <= 0)
+ return (EINVAL); /* shmseg must be >= 1 */
+ shminfo.shmseg = val;
+ return (0);
+ case KERN_SHMINFO_SHMALL:
+ val = shminfo.shmall;
+ if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
+ val == shminfo.shmall)
+ return (error);
+ if (val < shminfo.shmall)
+ return (EINVAL); /* can't decrease shmall */
+ shminfo.shmall = val;
+ return (0);
+ default:
+ return (EOPNOTSUPP);
+ }
+ /* NOTREACHED */
+}