sysv_shm.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. /* $OpenBSD: sysv_shm.c,v 1.66 2015/03/14 03:38:50 jsg Exp $ */
  2. /* $NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $ */
  3. /*
  4. * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
  5. *
  6. * Permission to use, copy, modify, and distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. *
  18. * Sponsored in part by the Defense Advanced Research Projects
  19. * Agency (DARPA) and Air Force Research Laboratory, Air Force
  20. * Materiel Command, USAF, under agreement number F39502-99-1-0512.
  21. */
  22. /*
  23. * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
  24. *
  25. * Redistribution and use in source and binary forms, with or without
  26. * modification, are permitted provided that the following conditions
  27. * are met:
  28. * 1. Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * 2. Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in the
  32. * documentation and/or other materials provided with the distribution.
  33. * 3. All advertising materials mentioning features or use of this software
  34. * must display the following acknowledgement:
  35. * This product includes software developed by Adam Glass and Charles M.
  36. * Hannum.
  37. * 4. The names of the authors may not be used to endorse or promote products
  38. * derived from this software without specific prior written permission.
  39. *
  40. * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
  41. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  42. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  43. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  44. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  45. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  46. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  47. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  48. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  49. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  50. */
  51. #include <sys/param.h>
  52. #include <sys/shm.h>
  53. #include <sys/proc.h>
  54. #include <sys/uio.h>
  55. #include <sys/time.h>
  56. #include <sys/malloc.h>
  57. #include <sys/mman.h>
  58. #include <sys/pool.h>
  59. #include <sys/systm.h>
  60. #include <sys/sysctl.h>
  61. #include <sys/stat.h>
  62. #include <sys/mount.h>
  63. #include <sys/syscallargs.h>
  64. #include <uvm/uvm_extern.h>
  65. extern struct shminfo shminfo;
  66. struct shmid_ds **shmsegs; /* linear mapping of shmid -> shmseg */
  67. struct pool shm_pool;
  68. unsigned short *shmseqs; /* array of shm sequence numbers */
  69. struct shmid_ds *shm_find_segment_by_shmid(int);
  70. /*
  71. * Provides the following externally accessible functions:
  72. *
  73. * shminit(void); initialization
  74. * shmexit(struct vmspace *) cleanup
  75. * shmfork(struct vmspace *, struct vmspace *) fork handling
  76. * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
  77. *
  78. * Structures:
  79. * shmsegs (an array of 'struct shmid_ds *')
  80. * per proc 'struct shmmap_head' with an array of 'struct shmmap_state'
  81. */
  82. #define SHMSEG_REMOVED 0x0200 /* can't overlap ACCESSPERMS */
  83. int shm_last_free, shm_nused, shm_committed;
  84. struct shm_handle {
  85. struct uvm_object *shm_object;
  86. };
  87. struct shmmap_state {
  88. vaddr_t va;
  89. int shmid;
  90. };
  91. struct shmmap_head {
  92. int shmseg;
  93. struct shmmap_state state[1];
  94. };
  95. int shm_find_segment_by_key(key_t);
  96. void shm_deallocate_segment(struct shmid_ds *);
  97. int shm_delete_mapping(struct vmspace *, struct shmmap_state *);
  98. int shmget_existing(struct proc *, struct sys_shmget_args *,
  99. int, int, register_t *);
  100. int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
  101. int, register_t *);
  102. int
  103. shm_find_segment_by_key(key_t key)
  104. {
  105. struct shmid_ds *shmseg;
  106. int i;
  107. for (i = 0; i < shminfo.shmmni; i++) {
  108. shmseg = shmsegs[i];
  109. if (shmseg != NULL && shmseg->shm_perm.key == key)
  110. return (i);
  111. }
  112. return (-1);
  113. }
  114. struct shmid_ds *
  115. shm_find_segment_by_shmid(int shmid)
  116. {
  117. int segnum;
  118. struct shmid_ds *shmseg;
  119. segnum = IPCID_TO_IX(shmid);
  120. if (segnum < 0 || segnum >= shminfo.shmmni ||
  121. (shmseg = shmsegs[segnum]) == NULL ||
  122. shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
  123. return (NULL);
  124. return (shmseg);
  125. }
  126. void
  127. shm_deallocate_segment(struct shmid_ds *shmseg)
  128. {
  129. struct shm_handle *shm_handle;
  130. size_t size;
  131. shm_handle = shmseg->shm_internal;
  132. size = round_page(shmseg->shm_segsz);
  133. uao_detach(shm_handle->shm_object);
  134. pool_put(&shm_pool, shmseg);
  135. shm_committed -= atop(size);
  136. shm_nused--;
  137. }
  138. int
  139. shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
  140. {
  141. struct shmid_ds *shmseg;
  142. int segnum;
  143. size_t size;
  144. segnum = IPCID_TO_IX(shmmap_s->shmid);
  145. if (segnum < 0 || segnum >= shminfo.shmmni ||
  146. (shmseg = shmsegs[segnum]) == NULL)
  147. return (EINVAL);
  148. size = round_page(shmseg->shm_segsz);
  149. uvm_deallocate(&vm->vm_map, shmmap_s->va, size);
  150. shmmap_s->shmid = -1;
  151. shmseg->shm_dtime = time_second;
  152. if ((--shmseg->shm_nattch <= 0) &&
  153. (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
  154. shm_deallocate_segment(shmseg);
  155. shm_last_free = segnum;
  156. shmsegs[shm_last_free] = NULL;
  157. }
  158. return (0);
  159. }
  160. int
  161. sys_shmdt(struct proc *p, void *v, register_t *retval)
  162. {
  163. struct sys_shmdt_args /* {
  164. syscallarg(const void *) shmaddr;
  165. } */ *uap = v;
  166. struct shmmap_head *shmmap_h;
  167. struct shmmap_state *shmmap_s;
  168. int i;
  169. shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
  170. if (shmmap_h == NULL)
  171. return (EINVAL);
  172. for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
  173. i++, shmmap_s++)
  174. if (shmmap_s->shmid != -1 &&
  175. shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
  176. break;
  177. if (i == shmmap_h->shmseg)
  178. return (EINVAL);
  179. return (shm_delete_mapping(p->p_vmspace, shmmap_s));
  180. }
  181. int
  182. sys_shmat(struct proc *p, void *v, register_t *retval)
  183. {
  184. struct sys_shmat_args /* {
  185. syscallarg(int) shmid;
  186. syscallarg(const void *) shmaddr;
  187. syscallarg(int) shmflg;
  188. } */ *uap = v;
  189. int error, i, flags = 0;
  190. struct ucred *cred = p->p_ucred;
  191. struct shmid_ds *shmseg;
  192. struct shmmap_head *shmmap_h;
  193. struct shmmap_state *shmmap_s;
  194. struct shm_handle *shm_handle;
  195. vaddr_t attach_va;
  196. vm_prot_t prot;
  197. vsize_t size;
  198. shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
  199. if (shmmap_h == NULL) {
  200. size = sizeof(int) +
  201. shminfo.shmseg * sizeof(struct shmmap_state);
  202. shmmap_h = malloc(size, M_SHM, M_WAITOK);
  203. shmmap_h->shmseg = shminfo.shmseg;
  204. for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
  205. i++, shmmap_s++)
  206. shmmap_s->shmid = -1;
  207. p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
  208. }
  209. shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
  210. if (shmseg == NULL)
  211. return (EINVAL);
  212. error = ipcperm(cred, &shmseg->shm_perm,
  213. (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
  214. if (error)
  215. return (error);
  216. for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
  217. if (shmmap_s->shmid == -1)
  218. break;
  219. shmmap_s++;
  220. }
  221. if (i >= shmmap_h->shmseg)
  222. return (EMFILE);
  223. size = round_page(shmseg->shm_segsz);
  224. prot = PROT_READ;
  225. if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
  226. prot |= PROT_WRITE;
  227. if (SCARG(uap, shmaddr)) {
  228. flags |= UVM_FLAG_FIXED;
  229. if (SCARG(uap, shmflg) & SHM_RND)
  230. attach_va =
  231. (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
  232. else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
  233. attach_va = (vaddr_t)SCARG(uap, shmaddr);
  234. else
  235. return (EINVAL);
  236. } else
  237. attach_va = 0;
  238. shm_handle = shmseg->shm_internal;
  239. uao_reference(shm_handle->shm_object);
  240. error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
  241. shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
  242. MAP_INHERIT_SHARE, MADV_RANDOM, flags));
  243. if (error) {
  244. uao_detach(shm_handle->shm_object);
  245. return (error);
  246. }
  247. shmmap_s->va = attach_va;
  248. shmmap_s->shmid = SCARG(uap, shmid);
  249. shmseg->shm_lpid = p->p_p->ps_pid;
  250. shmseg->shm_atime = time_second;
  251. shmseg->shm_nattch++;
  252. *retval = attach_va;
  253. return (0);
  254. }
  255. int
  256. sys_shmctl(struct proc *p, void *v, register_t *retval)
  257. {
  258. struct sys_shmctl_args /* {
  259. syscallarg(int) shmid;
  260. syscallarg(int) cmd;
  261. syscallarg(struct shmid_ds *) buf;
  262. } */ *uap = v;
  263. return (shmctl1(p, SCARG(uap, shmid), SCARG(uap, cmd),
  264. (caddr_t)SCARG(uap, buf), copyin, copyout));
  265. }
  266. int
  267. shmctl1(struct proc *p, int shmid, int cmd, caddr_t buf,
  268. int (*ds_copyin)(const void *, void *, size_t),
  269. int (*ds_copyout)(const void *, void *, size_t))
  270. {
  271. struct ucred *cred = p->p_ucred;
  272. struct shmid_ds inbuf, *shmseg;
  273. int error;
  274. shmseg = shm_find_segment_by_shmid(shmid);
  275. if (shmseg == NULL)
  276. return (EINVAL);
  277. switch (cmd) {
  278. case IPC_STAT:
  279. if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
  280. return (error);
  281. error = ds_copyout(shmseg, buf, sizeof(inbuf));
  282. if (error)
  283. return (error);
  284. break;
  285. case IPC_SET:
  286. if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
  287. return (error);
  288. error = ds_copyin(buf, &inbuf, sizeof(inbuf));
  289. if (error)
  290. return (error);
  291. shmseg->shm_perm.uid = inbuf.shm_perm.uid;
  292. shmseg->shm_perm.gid = inbuf.shm_perm.gid;
  293. shmseg->shm_perm.mode =
  294. (shmseg->shm_perm.mode & ~ACCESSPERMS) |
  295. (inbuf.shm_perm.mode & ACCESSPERMS);
  296. shmseg->shm_ctime = time_second;
  297. break;
  298. case IPC_RMID:
  299. if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
  300. return (error);
  301. shmseg->shm_perm.key = IPC_PRIVATE;
  302. shmseg->shm_perm.mode |= SHMSEG_REMOVED;
  303. if (shmseg->shm_nattch <= 0) {
  304. shm_deallocate_segment(shmseg);
  305. shm_last_free = IPCID_TO_IX(shmid);
  306. shmsegs[shm_last_free] = NULL;
  307. }
  308. break;
  309. case SHM_LOCK:
  310. case SHM_UNLOCK:
  311. default:
  312. return (EINVAL);
  313. }
  314. return (0);
  315. }
  316. int
  317. shmget_existing(struct proc *p,
  318. struct sys_shmget_args /* {
  319. syscallarg(key_t) key;
  320. syscallarg(size_t) size;
  321. syscallarg(int) shmflg;
  322. } */ *uap,
  323. int mode, int segnum, register_t *retval)
  324. {
  325. struct shmid_ds *shmseg;
  326. struct ucred *cred = p->p_ucred;
  327. int error;
  328. shmseg = shmsegs[segnum]; /* We assume the segnum is valid */
  329. if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
  330. return (error);
  331. if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
  332. return (EINVAL);
  333. if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
  334. (IPC_CREAT | IPC_EXCL))
  335. return (EEXIST);
  336. *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
  337. return (0);
  338. }
  339. int
  340. shmget_allocate_segment(struct proc *p,
  341. struct sys_shmget_args /* {
  342. syscallarg(key_t) key;
  343. syscallarg(size_t) size;
  344. syscallarg(int) shmflg;
  345. } */ *uap,
  346. int mode, register_t *retval)
  347. {
  348. size_t size;
  349. key_t key;
  350. int segnum;
  351. struct ucred *cred = p->p_ucred;
  352. struct shmid_ds *shmseg;
  353. struct shm_handle *shm_handle;
  354. int error = 0;
  355. if (SCARG(uap, size) < shminfo.shmmin ||
  356. SCARG(uap, size) > shminfo.shmmax)
  357. return (EINVAL);
  358. if (shm_nused >= shminfo.shmmni) /* any shmids left? */
  359. return (ENOSPC);
  360. size = round_page(SCARG(uap, size));
  361. if (shm_committed + atop(size) > shminfo.shmall)
  362. return (ENOMEM);
  363. shm_nused++;
  364. shm_committed += atop(size);
  365. /*
  366. * If a key has been specified and we had to wait for memory
  367. * to be freed up we need to verify that no one has allocated
  368. * the key we want in the meantime. Yes, this is ugly.
  369. */
  370. key = SCARG(uap, key);
  371. shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK :
  372. PR_NOWAIT);
  373. if (shmseg == NULL) {
  374. shmseg = pool_get(&shm_pool, PR_WAITOK);
  375. if (shm_find_segment_by_key(key) != -1) {
  376. pool_put(&shm_pool, shmseg);
  377. shm_nused--;
  378. shm_committed -= atop(size);
  379. return (EAGAIN);
  380. }
  381. }
  382. /* XXX - hash shmids instead */
  383. if (shm_last_free < 0) {
  384. for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
  385. segnum++)
  386. ;
  387. if (segnum == shminfo.shmmni)
  388. panic("shmseg free count inconsistent");
  389. } else {
  390. segnum = shm_last_free;
  391. if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
  392. shm_last_free = -1;
  393. }
  394. shmsegs[segnum] = shmseg;
  395. shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
  396. shm_handle->shm_object = uao_create(size, 0);
  397. shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
  398. shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
  399. shmseg->shm_perm.mode = (mode & ACCESSPERMS);
  400. shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
  401. shmseg->shm_perm.key = key;
  402. shmseg->shm_segsz = SCARG(uap, size);
  403. shmseg->shm_cpid = p->p_p->ps_pid;
  404. shmseg->shm_lpid = shmseg->shm_nattch = 0;
  405. shmseg->shm_atime = shmseg->shm_dtime = 0;
  406. shmseg->shm_ctime = time_second;
  407. shmseg->shm_internal = shm_handle;
  408. *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
  409. return (error);
  410. }
  411. int
  412. sys_shmget(struct proc *p, void *v, register_t *retval)
  413. {
  414. struct sys_shmget_args /* {
  415. syscallarg(key_t) key;
  416. syscallarg(size_t) size;
  417. syscallarg(int) shmflg;
  418. } */ *uap = v;
  419. int segnum, mode, error;
  420. mode = SCARG(uap, shmflg) & ACCESSPERMS;
  421. if (SCARG(uap, key) != IPC_PRIVATE) {
  422. again:
  423. segnum = shm_find_segment_by_key(SCARG(uap, key));
  424. if (segnum >= 0)
  425. return (shmget_existing(p, uap, mode, segnum, retval));
  426. if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
  427. return (ENOENT);
  428. }
  429. error = shmget_allocate_segment(p, uap, mode, retval);
  430. if (error == EAGAIN)
  431. goto again;
  432. return (error);
  433. }
  434. void
  435. shmfork(struct vmspace *vm1, struct vmspace *vm2)
  436. {
  437. struct shmmap_head *shmmap_h;
  438. struct shmmap_state *shmmap_s;
  439. struct shmid_ds *shmseg;
  440. size_t size;
  441. int i;
  442. if (vm1->vm_shm == NULL) {
  443. vm2->vm_shm = NULL;
  444. return;
  445. }
  446. shmmap_h = (struct shmmap_head *)vm1->vm_shm;
  447. size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
  448. vm2->vm_shm = malloc(size, M_SHM, M_WAITOK);
  449. memcpy(vm2->vm_shm, vm1->vm_shm, size);
  450. for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
  451. i++, shmmap_s++) {
  452. if (shmmap_s->shmid != -1 &&
  453. (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL)
  454. shmseg->shm_nattch++;
  455. }
  456. }
  457. void
  458. shmexit(struct vmspace *vm)
  459. {
  460. struct shmmap_head *shmmap_h;
  461. struct shmmap_state *shmmap_s;
  462. int i;
  463. shmmap_h = (struct shmmap_head *)vm->vm_shm;
  464. if (shmmap_h == NULL)
  465. return;
  466. for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
  467. i++, shmmap_s++)
  468. if (shmmap_s->shmid != -1)
  469. shm_delete_mapping(vm, shmmap_s);
  470. free(vm->vm_shm, M_SHM, 0);
  471. vm->vm_shm = NULL;
  472. }
  473. void
  474. shminit(void)
  475. {
  476. pool_init(&shm_pool, sizeof(struct shmid_ds) +
  477. sizeof(struct shm_handle), 0, 0, PR_WAITOK, "shmpl", NULL);
  478. shmsegs = mallocarray(shminfo.shmmni, sizeof(struct shmid_ds *),
  479. M_SHM, M_WAITOK|M_ZERO);
  480. shmseqs = mallocarray(shminfo.shmmni, sizeof(unsigned short),
  481. M_SHM, M_WAITOK|M_ZERO);
  482. shminfo.shmmax *= PAGE_SIZE; /* actually in pages */
  483. shm_last_free = 0;
  484. shm_nused = 0;
  485. shm_committed = 0;
  486. }
  487. /*
  488. * Userland access to struct shminfo.
  489. */
  490. int
  491. sysctl_sysvshm(int *name, u_int namelen, void *oldp, size_t *oldlenp,
  492. void *newp, size_t newlen)
  493. {
  494. int error, val;
  495. struct shmid_ds **newsegs;
  496. unsigned short *newseqs;
  497. if (namelen != 2) {
  498. switch (name[0]) {
  499. case KERN_SHMINFO_SHMMAX:
  500. case KERN_SHMINFO_SHMMIN:
  501. case KERN_SHMINFO_SHMMNI:
  502. case KERN_SHMINFO_SHMSEG:
  503. case KERN_SHMINFO_SHMALL:
  504. break;
  505. default:
  506. return (ENOTDIR); /* overloaded */
  507. }
  508. }
  509. switch (name[0]) {
  510. case KERN_SHMINFO_SHMMAX:
  511. if ((error = sysctl_int(oldp, oldlenp, newp, newlen,
  512. &shminfo.shmmax)) || newp == NULL)
  513. return (error);
  514. /* If new shmmax > shmall, crank shmall */
  515. if (atop(round_page(shminfo.shmmax)) > shminfo.shmall)
  516. shminfo.shmall = atop(round_page(shminfo.shmmax));
  517. return (0);
  518. case KERN_SHMINFO_SHMMIN:
  519. val = shminfo.shmmin;
  520. if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
  521. val == shminfo.shmmin)
  522. return (error);
  523. if (val <= 0)
  524. return (EINVAL); /* shmmin must be >= 1 */
  525. shminfo.shmmin = val;
  526. return (0);
  527. case KERN_SHMINFO_SHMMNI:
  528. val = shminfo.shmmni;
  529. if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
  530. val == shminfo.shmmni)
  531. return (error);
  532. if (val < shminfo.shmmni || val > 0xffff)
  533. return (EINVAL);
  534. /* Expand shmsegs and shmseqs arrays */
  535. newsegs = mallocarray(val, sizeof(struct shmid_ds *),
  536. M_SHM, M_WAITOK|M_ZERO);
  537. memcpy(newsegs, shmsegs,
  538. shminfo.shmmni * sizeof(struct shmid_ds *));
  539. free(shmsegs, M_SHM, 0);
  540. shmsegs = newsegs;
  541. newseqs = mallocarray(val, sizeof(unsigned short), M_SHM,
  542. M_WAITOK|M_ZERO);
  543. memcpy(newseqs, shmseqs,
  544. shminfo.shmmni * sizeof(unsigned short));
  545. free(shmseqs, M_SHM, 0);
  546. shmseqs = newseqs;
  547. shminfo.shmmni = val;
  548. return (0);
  549. case KERN_SHMINFO_SHMSEG:
  550. val = shminfo.shmseg;
  551. if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
  552. val == shminfo.shmseg)
  553. return (error);
  554. if (val <= 0)
  555. return (EINVAL); /* shmseg must be >= 1 */
  556. shminfo.shmseg = val;
  557. return (0);
  558. case KERN_SHMINFO_SHMALL:
  559. val = shminfo.shmall;
  560. if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
  561. val == shminfo.shmall)
  562. return (error);
  563. if (val < shminfo.shmall)
  564. return (EINVAL); /* can't decrease shmall */
  565. shminfo.shmall = val;
  566. return (0);
  567. default:
  568. return (EOPNOTSUPP);
  569. }
  570. /* NOTREACHED */
  571. }