kern_fork.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /* $OpenBSD: kern_fork.c,v 1.181 2015/07/19 02:35:35 deraadt Exp $ */
  2. /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
  3. /*
  4. * Copyright (c) 1982, 1986, 1989, 1991, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. * (c) UNIX System Laboratories, Inc.
  7. * All or some portions of this file are derived from material licensed
  8. * to the University of California by American Telephone and Telegraph
  9. * Co. or Unix System Laboratories, Inc. and are reproduced herein with
  10. * the permission of UNIX System Laboratories, Inc.
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. * 1. Redistributions of source code must retain the above copyright
  16. * notice, this list of conditions and the following disclaimer.
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in the
  19. * documentation and/or other materials provided with the distribution.
  20. * 3. Neither the name of the University nor the names of its contributors
  21. * may be used to endorse or promote products derived from this software
  22. * without specific prior written permission.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  25. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  28. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  29. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  30. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  31. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  32. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  33. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  34. * SUCH DAMAGE.
  35. *
  36. * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
  37. */
  38. #include <sys/param.h>
  39. #include <sys/systm.h>
  40. #include <sys/filedesc.h>
  41. #include <sys/kernel.h>
  42. #include <sys/malloc.h>
  43. #include <sys/mount.h>
  44. #include <sys/proc.h>
  45. #include <sys/exec.h>
  46. #include <sys/resourcevar.h>
  47. #include <sys/signalvar.h>
  48. #include <sys/vnode.h>
  49. #include <sys/vmmeter.h>
  50. #include <sys/file.h>
  51. #include <sys/acct.h>
  52. #include <sys/ktrace.h>
  53. #include <sys/sched.h>
  54. #include <sys/sysctl.h>
  55. #include <sys/pool.h>
  56. #include <sys/mman.h>
  57. #include <sys/ptrace.h>
  58. #include <sys/atomic.h>
  59. #include <sys/unistd.h>
  60. #include <sys/syscallargs.h>
  61. #include "systrace.h"
  62. #include <dev/systrace.h>
  63. #include <uvm/uvm.h>
  64. #ifdef __HAVE_MD_TCB
  65. # include <machine/tcb.h>
  66. #endif
  67. int nprocesses = 1; /* process 0 */
  68. int nthreads = 1; /* proc 0 */
  69. int randompid; /* when set to 1, pid's go random */
  70. struct forkstat forkstat;
  71. void fork_return(void *);
  72. void tfork_child_return(void *);
  73. int pidtaken(pid_t);
  74. void process_new(struct proc *, struct process *, int);
  75. void
  76. fork_return(void *arg)
  77. {
  78. struct proc *p = (struct proc *)arg;
  79. if (p->p_p->ps_flags & PS_TRACED)
  80. psignal(p, SIGTRAP);
  81. child_return(p);
  82. }
  83. int
  84. sys_fork(struct proc *p, void *v, register_t *retval)
  85. {
  86. int flags;
  87. flags = FORK_FORK;
  88. if (p->p_p->ps_ptmask & PTRACE_FORK)
  89. flags |= FORK_PTRACE;
  90. return (fork1(p, flags, NULL, 0, fork_return, NULL, retval, NULL));
  91. }
  92. int
  93. sys_vfork(struct proc *p, void *v, register_t *retval)
  94. {
  95. return (fork1(p, FORK_VFORK|FORK_PPWAIT, NULL, 0, NULL,
  96. NULL, retval, NULL));
  97. }
  98. int
  99. sys___tfork(struct proc *p, void *v, register_t *retval)
  100. {
  101. struct sys___tfork_args /* {
  102. syscallarg(const struct __tfork) *param;
  103. syscallarg(size_t) psize;
  104. } */ *uap = v;
  105. size_t psize = SCARG(uap, psize);
  106. struct __tfork param = { 0 };
  107. int flags;
  108. int error;
  109. if (psize == 0 || psize > sizeof(param))
  110. return (EINVAL);
  111. if ((error = copyin(SCARG(uap, param), &param, psize)))
  112. return (error);
  113. #ifdef KTRACE
  114. if (KTRPOINT(p, KTR_STRUCT))
  115. ktrstruct(p, "tfork", &param, sizeof(param));
  116. #endif
  117. flags = FORK_TFORK | FORK_THREAD | FORK_SIGHAND | FORK_SHAREVM
  118. | FORK_SHAREFILES;
  119. return (fork1(p, flags, param.tf_stack, param.tf_tid,
  120. tfork_child_return, param.tf_tcb, retval, NULL));
  121. }
  122. void
  123. tfork_child_return(void *arg)
  124. {
  125. struct proc *p = curproc;
  126. TCB_SET(p, arg);
  127. child_return(p);
  128. }
  129. /*
  130. * Initialize common bits of a process structure, given the initial thread.
  131. */
  132. void
  133. process_initialize(struct process *pr, struct proc *p)
  134. {
  135. /* initialize the thread links */
  136. pr->ps_mainproc = p;
  137. TAILQ_INIT(&pr->ps_threads);
  138. TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
  139. pr->ps_refcnt = 1;
  140. p->p_p = pr;
  141. /* give the process the same creds as the initial thread */
  142. pr->ps_ucred = p->p_ucred;
  143. crhold(pr->ps_ucred);
  144. KASSERT(p->p_ucred->cr_ref >= 2); /* new thread and new process */
  145. LIST_INIT(&pr->ps_children);
  146. timeout_set(&pr->ps_realit_to, realitexpire, pr);
  147. }
  148. /*
  149. * Allocate and initialize a new process.
  150. */
  151. void
  152. process_new(struct proc *p, struct process *parent, int flags)
  153. {
  154. struct process *pr;
  155. pr = pool_get(&process_pool, PR_WAITOK);
  156. /*
  157. * Make a process structure for the new process.
  158. * Start by zeroing the section of proc that is zero-initialized,
  159. * then copy the section that is copied directly from the parent.
  160. */
  161. memset(&pr->ps_startzero, 0,
  162. (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero);
  163. memcpy(&pr->ps_startcopy, &parent->ps_startcopy,
  164. (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy);
  165. process_initialize(pr, p);
  166. /* post-copy fixups */
  167. pr->ps_pptr = parent;
  168. pr->ps_limit->p_refcnt++;
  169. /* bump references to the text vnode (for sysctl) */
  170. pr->ps_textvp = parent->ps_textvp;
  171. if (pr->ps_textvp)
  172. vref(pr->ps_textvp);
  173. pr->ps_flags = parent->ps_flags & (PS_SUGID | PS_SUGIDEXEC | PS_TAMED);
  174. if (parent->ps_session->s_ttyvp != NULL)
  175. pr->ps_flags |= parent->ps_flags & PS_CONTROLT;
  176. /*
  177. * Duplicate sub-structures as needed.
  178. * Increase reference counts on shared objects.
  179. */
  180. if (flags & FORK_SHAREFILES)
  181. pr->ps_fd = fdshare(parent);
  182. else
  183. pr->ps_fd = fdcopy(parent);
  184. if (flags & FORK_SIGHAND)
  185. pr->ps_sigacts = sigactsshare(parent);
  186. else
  187. pr->ps_sigacts = sigactsinit(parent);
  188. if (flags & FORK_SHAREVM)
  189. pr->ps_vmspace = uvmspace_share(parent);
  190. else
  191. pr->ps_vmspace = uvmspace_fork(parent);
  192. if (parent->ps_flags & PS_PROFIL)
  193. startprofclock(pr);
  194. if (flags & FORK_PTRACE)
  195. pr->ps_flags |= parent->ps_flags & PS_TRACED;
  196. if (flags & FORK_NOZOMBIE)
  197. pr->ps_flags |= PS_NOZOMBIE;
  198. if (flags & FORK_SYSTEM)
  199. pr->ps_flags |= PS_SYSTEM;
  200. /* mark as embryo to protect against others */
  201. pr->ps_flags |= PS_EMBRYO;
  202. /* Force visibility of all of the above changes */
  203. membar_producer();
  204. /* it's sufficiently inited to be globally visible */
  205. LIST_INSERT_HEAD(&allprocess, pr, ps_list);
  206. }
  207. /* print the 'table full' message once per 10 seconds */
  208. struct timeval fork_tfmrate = { 10, 0 };
  209. int
  210. fork1(struct proc *curp, int flags, void *stack, pid_t *tidptr,
  211. void (*func)(void *), void *arg, register_t *retval,
  212. struct proc **rnewprocp)
  213. {
  214. struct process *curpr = curp->p_p;
  215. struct process *pr;
  216. struct proc *p;
  217. uid_t uid;
  218. struct vmspace *vm;
  219. int count;
  220. vaddr_t uaddr;
  221. int s;
  222. struct ptrace_state *newptstat = NULL;
  223. #if NSYSTRACE > 0
  224. void *newstrp = NULL;
  225. #endif
  226. /* sanity check some flag combinations */
  227. if (flags & FORK_THREAD) {
  228. if ((flags & FORK_SHAREFILES) == 0 ||
  229. (flags & FORK_SIGHAND) == 0 ||
  230. (flags & FORK_SYSTEM) != 0)
  231. return (EINVAL);
  232. }
  233. if (flags & FORK_SIGHAND && (flags & FORK_SHAREVM) == 0)
  234. return (EINVAL);
  235. /*
  236. * Although process entries are dynamically created, we still keep
  237. * a global limit on the maximum number we will create. We reserve
  238. * the last 5 processes to root. The variable nprocesses is the
  239. * current number of processes, maxprocess is the limit. Similar
  240. * rules for threads (struct proc): we reserve the last 5 to root;
  241. * the variable nthreads is the current number of procs, maxthread is
  242. * the limit.
  243. */
  244. uid = curp->p_ucred->cr_ruid;
  245. if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) {
  246. static struct timeval lasttfm;
  247. if (ratecheck(&lasttfm, &fork_tfmrate))
  248. tablefull("proc");
  249. return (EAGAIN);
  250. }
  251. nthreads++;
  252. if ((flags & FORK_THREAD) == 0) {
  253. if ((nprocesses >= maxprocess - 5 && uid != 0) ||
  254. nprocesses >= maxprocess) {
  255. static struct timeval lasttfm;
  256. if (ratecheck(&lasttfm, &fork_tfmrate))
  257. tablefull("process");
  258. nthreads--;
  259. return (EAGAIN);
  260. }
  261. nprocesses++;
  262. /*
  263. * Increment the count of processes running with
  264. * this uid. Don't allow a nonprivileged user to
  265. * exceed their current limit.
  266. */
  267. count = chgproccnt(uid, 1);
  268. if (uid != 0 && count > curp->p_rlimit[RLIMIT_NPROC].rlim_cur) {
  269. (void)chgproccnt(uid, -1);
  270. nprocesses--;
  271. nthreads--;
  272. return (EAGAIN);
  273. }
  274. }
  275. uaddr = uvm_uarea_alloc();
  276. if (uaddr == 0) {
  277. if ((flags & FORK_THREAD) == 0) {
  278. (void)chgproccnt(uid, -1);
  279. nprocesses--;
  280. }
  281. nthreads--;
  282. return (ENOMEM);
  283. }
  284. /*
  285. * From now on, we're committed to the fork and cannot fail.
  286. */
  287. /* Allocate new proc. */
  288. p = pool_get(&proc_pool, PR_WAITOK);
  289. p->p_stat = SIDL; /* protect against others */
  290. p->p_flag = 0;
  291. /*
  292. * Make a proc table entry for the new process.
  293. * Start by zeroing the section of proc that is zero-initialized,
  294. * then copy the section that is copied directly from the parent.
  295. */
  296. memset(&p->p_startzero, 0,
  297. (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero);
  298. memcpy(&p->p_startcopy, &curp->p_startcopy,
  299. (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy);
  300. crhold(p->p_ucred);
  301. /*
  302. * Initialize the timeouts.
  303. */
  304. timeout_set(&p->p_sleep_to, endtsleep, p);
  305. if (flags & FORK_THREAD) {
  306. atomic_setbits_int(&p->p_flag, P_THREAD);
  307. p->p_p = pr = curpr;
  308. pr->ps_refcnt++;
  309. } else {
  310. process_new(p, curpr, flags);
  311. pr = p->p_p;
  312. }
  313. p->p_fd = pr->ps_fd;
  314. p->p_vmspace = pr->ps_vmspace;
  315. if (pr->ps_flags & PS_SYSTEM)
  316. atomic_setbits_int(&p->p_flag, P_SYSTEM);
  317. if (flags & FORK_PPWAIT) {
  318. atomic_setbits_int(&pr->ps_flags, PS_PPWAIT);
  319. atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT);
  320. }
  321. #ifdef KTRACE
  322. /*
  323. * Copy traceflag and tracefile if enabled.
  324. * If not inherited, these were zeroed above.
  325. */
  326. if ((flags & FORK_THREAD) == 0 && curpr->ps_traceflag & KTRFAC_INHERIT)
  327. ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp,
  328. curpr->ps_tracecred);
  329. #endif
  330. /*
  331. * set priority of child to be that of parent
  332. * XXX should move p_estcpu into the region of struct proc which gets
  333. * copied.
  334. */
  335. scheduler_fork_hook(curp, p);
  336. if (flags & FORK_THREAD)
  337. sigstkinit(&p->p_sigstk);
  338. /*
  339. * If emulation has thread fork hook, call it now.
  340. */
  341. if (pr->ps_emul->e_proc_fork)
  342. (*pr->ps_emul->e_proc_fork)(p, curp);
  343. p->p_addr = (struct user *)uaddr;
  344. /*
  345. * Finish creating the child thread. cpu_fork() will copy
  346. * and update the pcb and make the child ready to run. If
  347. * this is a normal user fork, the child will exit directly
  348. * to user mode via child_return() on its first time slice
  349. * and will not return here. If this is a kernel thread,
  350. * the specified entry point will be executed.
  351. */
  352. cpu_fork(curp, p, stack, 0, func ? func : child_return, arg ? arg : p);
  353. vm = pr->ps_vmspace;
  354. if (flags & FORK_FORK) {
  355. forkstat.cntfork++;
  356. forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
  357. } else if (flags & FORK_VFORK) {
  358. forkstat.cntvfork++;
  359. forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
  360. } else if (flags & FORK_TFORK) {
  361. forkstat.cnttfork++;
  362. } else {
  363. forkstat.cntkthread++;
  364. forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize;
  365. }
  366. if (pr->ps_flags & PS_TRACED && flags & FORK_FORK)
  367. newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK);
  368. #if NSYSTRACE > 0
  369. if (ISSET(curp->p_flag, P_SYSTRACE))
  370. newstrp = systrace_getproc();
  371. #endif
  372. p->p_pid = allocpid();
  373. LIST_INSERT_HEAD(&allproc, p, p_list);
  374. LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash);
  375. if ((flags & FORK_THREAD) == 0) {
  376. LIST_INSERT_AFTER(curpr, pr, ps_pglist);
  377. LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling);
  378. if (pr->ps_flags & PS_TRACED) {
  379. pr->ps_oppid = curpr->ps_pid;
  380. if (pr->ps_pptr != curpr->ps_pptr)
  381. proc_reparent(pr, curpr->ps_pptr);
  382. /*
  383. * Set ptrace status.
  384. */
  385. if (flags & FORK_FORK) {
  386. pr->ps_ptstat = newptstat;
  387. newptstat = NULL;
  388. curpr->ps_ptstat->pe_report_event = PTRACE_FORK;
  389. pr->ps_ptstat->pe_report_event = PTRACE_FORK;
  390. curpr->ps_ptstat->pe_other_pid = pr->ps_pid;
  391. pr->ps_ptstat->pe_other_pid = curpr->ps_pid;
  392. }
  393. }
  394. } else {
  395. TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
  396. /*
  397. * if somebody else wants to take us to single threaded mode,
  398. * count ourselves in.
  399. */
  400. if (pr->ps_single) {
  401. curpr->ps_singlecount++;
  402. atomic_setbits_int(&p->p_flag, P_SUSPSINGLE);
  403. }
  404. }
  405. #if NSYSTRACE > 0
  406. if (newstrp)
  407. systrace_fork(curp, p, newstrp);
  408. #endif
  409. if (tidptr != NULL) {
  410. pid_t pid = p->p_pid + THREAD_PID_OFFSET;
  411. if (copyout(&pid, tidptr, sizeof(pid)))
  412. psignal(curp, SIGSEGV);
  413. }
  414. /*
  415. * For new processes, set accounting bits and mark as complete.
  416. */
  417. if ((flags & FORK_THREAD) == 0) {
  418. getnanotime(&pr->ps_start);
  419. pr->ps_acflag = AFORK;
  420. atomic_clearbits_int(&pr->ps_flags, PS_EMBRYO);
  421. }
  422. /*
  423. * Make child runnable and add to run queue.
  424. */
  425. if ((flags & FORK_IDLE) == 0) {
  426. SCHED_LOCK(s);
  427. p->p_stat = SRUN;
  428. p->p_cpu = sched_choosecpu_fork(curp, flags);
  429. setrunqueue(p);
  430. SCHED_UNLOCK(s);
  431. } else
  432. p->p_cpu = arg;
  433. if (newptstat)
  434. free(newptstat, M_SUBPROC, sizeof(*newptstat));
  435. /*
  436. * Notify any interested parties about the new process.
  437. */
  438. if ((flags & FORK_THREAD) == 0)
  439. KNOTE(&curpr->ps_klist, NOTE_FORK | p->p_pid);
  440. /*
  441. * Update stats now that we know the fork was successful.
  442. */
  443. uvmexp.forks++;
  444. if (flags & FORK_PPWAIT)
  445. uvmexp.forks_ppwait++;
  446. if (flags & FORK_SHAREVM)
  447. uvmexp.forks_sharevm++;
  448. /*
  449. * Pass a pointer to the new process to the caller.
  450. */
  451. if (rnewprocp != NULL)
  452. *rnewprocp = p;
  453. /*
  454. * Preserve synchronization semantics of vfork. If waiting for
  455. * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT
  456. * on ourselves, and sleep on our process for the latter flag
  457. * to go away.
  458. * XXX Need to stop other rthreads in the parent
  459. */
  460. if (flags & FORK_PPWAIT)
  461. while (curpr->ps_flags & PS_ISPWAIT)
  462. tsleep(curpr, PWAIT, "ppwait", 0);
  463. /*
  464. * If we're tracing the child, alert the parent too.
  465. */
  466. if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED))
  467. psignal(curp, SIGTRAP);
  468. /*
  469. * Return child pid to parent process,
  470. * marking us as parent via retval[1].
  471. */
  472. if (retval != NULL) {
  473. retval[0] = p->p_pid +
  474. (flags & FORK_THREAD ? THREAD_PID_OFFSET : 0);
  475. retval[1] = 0;
  476. }
  477. return (0);
  478. }
  479. /*
  480. * Checks for current use of a pid, either as a pid or pgid.
  481. */
  482. pid_t oldpids[100];
  483. int
  484. ispidtaken(pid_t pid)
  485. {
  486. uint32_t i;
  487. struct process *pr;
  488. for (i = 0; i < nitems(oldpids); i++)
  489. if (pid == oldpids[i])
  490. return (1);
  491. if (pfind(pid) != NULL)
  492. return (1);
  493. if (pgfind(pid) != NULL)
  494. return (1);
  495. LIST_FOREACH(pr, &zombprocess, ps_list) {
  496. if (pr->ps_pid == pid ||
  497. (pr->ps_pgrp && pr->ps_pgrp->pg_id == pid))
  498. return (1);
  499. }
  500. return (0);
  501. }
  502. /* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */
  503. pid_t
  504. allocpid(void)
  505. {
  506. static pid_t lastpid;
  507. pid_t pid;
  508. if (!randompid) {
  509. /* only used early on for system processes */
  510. pid = ++lastpid;
  511. } else {
  512. do {
  513. pid = 1 + arc4random_uniform(PID_MAX);
  514. } while (ispidtaken(pid));
  515. }
  516. return pid;
  517. }
  518. void
  519. freepid(pid_t pid)
  520. {
  521. static uint32_t idx;
  522. oldpids[idx++ % nitems(oldpids)] = pid;
  523. }
  524. #if defined(MULTIPROCESSOR)
  525. /*
  526. * XXX This is a slight hack to get newly-formed processes to
  527. * XXX acquire the kernel lock as soon as they run.
  528. */
  529. void
  530. proc_trampoline_mp(void)
  531. {
  532. struct proc *p;
  533. p = curproc;
  534. SCHED_ASSERT_LOCKED();
  535. __mp_unlock(&sched_lock);
  536. spl0();
  537. SCHED_ASSERT_UNLOCKED();
  538. KERNEL_ASSERT_UNLOCKED();
  539. KERNEL_LOCK();
  540. }
  541. #endif