kern_sched.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. /* $OpenBSD: kern_sched.c,v 1.36 2015/03/14 03:38:50 jsg Exp $ */
  2. /*
  3. * Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
  4. *
  5. * Permission to use, copy, modify, and distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <sys/param.h>
  18. #include <sys/sched.h>
  19. #include <sys/proc.h>
  20. #include <sys/kthread.h>
  21. #include <sys/systm.h>
  22. #include <sys/resourcevar.h>
  23. #include <sys/signalvar.h>
  24. #include <sys/mutex.h>
  25. #include <uvm/uvm_extern.h>
  26. void sched_kthreads_create(void *);
  27. int sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p);
  28. struct proc *sched_steal_proc(struct cpu_info *);
  29. /*
  30. * To help choosing which cpu should run which process we keep track
  31. * of cpus which are currently idle and which cpus have processes
  32. * queued.
  33. */
  34. struct cpuset sched_idle_cpus;
  35. struct cpuset sched_queued_cpus;
  36. struct cpuset sched_all_cpus;
  37. /*
  38. * Some general scheduler counters.
  39. */
  40. uint64_t sched_nmigrations; /* Cpu migration counter */
  41. uint64_t sched_nomigrations; /* Cpu no migration counter */
  42. uint64_t sched_noidle; /* Times we didn't pick the idle task */
  43. uint64_t sched_stolen; /* Times we stole proc from other cpus */
  44. uint64_t sched_choose; /* Times we chose a cpu */
  45. uint64_t sched_wasidle; /* Times we came out of idle */
  46. /*
  47. * A few notes about cpu_switchto that is implemented in MD code.
  48. *
  49. * cpu_switchto takes two arguments, the old proc and the proc
  50. * it should switch to. The new proc will never be NULL, so we always have
  51. * a saved state that we need to switch to. The old proc however can
  52. * be NULL if the process is exiting. NULL for the old proc simply
  53. * means "don't bother saving old state".
  54. *
  55. * cpu_switchto is supposed to atomically load the new state of the process
  56. * including the pcb, pmap and setting curproc, the p_cpu pointer in the
  57. * proc and p_stat to SONPROC. Atomically with respect to interrupts, other
  58. * cpus in the system must not depend on this state being consistent.
  59. * Therefore no locking is necessary in cpu_switchto other than blocking
  60. * interrupts during the context switch.
  61. */
  62. /*
  63. * sched_init_cpu is called from main() for the boot cpu, then it's the
  64. * responsibility of the MD code to call it for all other cpus.
  65. */
  66. void
  67. sched_init_cpu(struct cpu_info *ci)
  68. {
  69. struct schedstate_percpu *spc = &ci->ci_schedstate;
  70. int i;
  71. for (i = 0; i < SCHED_NQS; i++)
  72. TAILQ_INIT(&spc->spc_qs[i]);
  73. spc->spc_idleproc = NULL;
  74. kthread_create_deferred(sched_kthreads_create, ci);
  75. LIST_INIT(&spc->spc_deadproc);
  76. /*
  77. * Slight hack here until the cpuset code handles cpu_info
  78. * structures.
  79. */
  80. cpuset_init_cpu(ci);
  81. cpuset_add(&sched_all_cpus, ci);
  82. }
  83. void
  84. sched_kthreads_create(void *v)
  85. {
  86. struct cpu_info *ci = v;
  87. struct schedstate_percpu *spc = &ci->ci_schedstate;
  88. static int num;
  89. if (fork1(&proc0, FORK_SHAREVM|FORK_SHAREFILES|FORK_NOZOMBIE|
  90. FORK_SYSTEM|FORK_SIGHAND|FORK_IDLE, NULL, 0, sched_idle, ci, NULL,
  91. &spc->spc_idleproc))
  92. panic("fork idle");
  93. /* Name it as specified. */
  94. snprintf(spc->spc_idleproc->p_comm, sizeof(spc->spc_idleproc->p_comm),
  95. "idle%d", num);
  96. num++;
  97. }
  98. void
  99. sched_idle(void *v)
  100. {
  101. struct schedstate_percpu *spc;
  102. struct proc *p = curproc;
  103. struct cpu_info *ci = v;
  104. int s;
  105. KERNEL_UNLOCK();
  106. spc = &ci->ci_schedstate;
  107. /*
  108. * First time we enter here, we're not supposed to idle,
  109. * just go away for a while.
  110. */
  111. SCHED_LOCK(s);
  112. cpuset_add(&sched_idle_cpus, ci);
  113. p->p_stat = SSLEEP;
  114. p->p_cpu = ci;
  115. atomic_setbits_int(&p->p_flag, P_CPUPEG);
  116. mi_switch();
  117. cpuset_del(&sched_idle_cpus, ci);
  118. SCHED_UNLOCK(s);
  119. KASSERT(ci == curcpu());
  120. KASSERT(curproc == spc->spc_idleproc);
  121. while (1) {
  122. while (!curcpu_is_idle()) {
  123. struct proc *dead;
  124. SCHED_LOCK(s);
  125. p->p_stat = SSLEEP;
  126. mi_switch();
  127. SCHED_UNLOCK(s);
  128. while ((dead = LIST_FIRST(&spc->spc_deadproc))) {
  129. LIST_REMOVE(dead, p_hash);
  130. exit2(dead);
  131. }
  132. }
  133. splassert(IPL_NONE);
  134. cpuset_add(&sched_idle_cpus, ci);
  135. cpu_idle_enter();
  136. while (spc->spc_whichqs == 0) {
  137. #ifdef MULTIPROCESSOR
  138. if (spc->spc_schedflags & SPCF_SHOULDHALT &&
  139. (spc->spc_schedflags & SPCF_HALTED) == 0) {
  140. cpuset_del(&sched_idle_cpus, ci);
  141. SCHED_LOCK(s);
  142. atomic_setbits_int(&spc->spc_schedflags,
  143. spc->spc_whichqs ? 0 : SPCF_HALTED);
  144. SCHED_UNLOCK(s);
  145. wakeup(spc);
  146. }
  147. #endif
  148. cpu_idle_cycle();
  149. }
  150. cpu_idle_leave();
  151. cpuset_del(&sched_idle_cpus, ci);
  152. }
  153. }
  154. /*
  155. * To free our address space we have to jump through a few hoops.
  156. * The freeing is done by the reaper, but until we have one reaper
  157. * per cpu, we have no way of putting this proc on the deadproc list
  158. * and waking up the reaper without risking having our address space and
  159. * stack torn from under us before we manage to switch to another proc.
  160. * Therefore we have a per-cpu list of dead processes where we put this
  161. * proc and have idle clean up that list and move it to the reaper list.
  162. * All this will be unnecessary once we can bind the reaper this cpu
  163. * and not risk having it switch to another in case it sleeps.
  164. */
  165. void
  166. sched_exit(struct proc *p)
  167. {
  168. struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
  169. struct timespec ts;
  170. struct proc *idle;
  171. int s;
  172. nanouptime(&ts);
  173. timespecsub(&ts, &spc->spc_runtime, &ts);
  174. timespecadd(&p->p_rtime, &ts, &p->p_rtime);
  175. LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
  176. /* This process no longer needs to hold the kernel lock. */
  177. KERNEL_UNLOCK();
  178. SCHED_LOCK(s);
  179. idle = spc->spc_idleproc;
  180. idle->p_stat = SRUN;
  181. cpu_switchto(NULL, idle);
  182. panic("cpu_switchto returned");
  183. }
  184. /*
  185. * Run queue management.
  186. */
  187. void
  188. sched_init_runqueues(void)
  189. {
  190. }
  191. void
  192. setrunqueue(struct proc *p)
  193. {
  194. struct schedstate_percpu *spc;
  195. int queue = p->p_priority >> 2;
  196. SCHED_ASSERT_LOCKED();
  197. spc = &p->p_cpu->ci_schedstate;
  198. spc->spc_nrun++;
  199. TAILQ_INSERT_TAIL(&spc->spc_qs[queue], p, p_runq);
  200. spc->spc_whichqs |= (1 << queue);
  201. cpuset_add(&sched_queued_cpus, p->p_cpu);
  202. if (cpuset_isset(&sched_idle_cpus, p->p_cpu))
  203. cpu_unidle(p->p_cpu);
  204. }
  205. void
  206. remrunqueue(struct proc *p)
  207. {
  208. struct schedstate_percpu *spc;
  209. int queue = p->p_priority >> 2;
  210. SCHED_ASSERT_LOCKED();
  211. spc = &p->p_cpu->ci_schedstate;
  212. spc->spc_nrun--;
  213. TAILQ_REMOVE(&spc->spc_qs[queue], p, p_runq);
  214. if (TAILQ_EMPTY(&spc->spc_qs[queue])) {
  215. spc->spc_whichqs &= ~(1 << queue);
  216. if (spc->spc_whichqs == 0)
  217. cpuset_del(&sched_queued_cpus, p->p_cpu);
  218. }
  219. }
  220. struct proc *
  221. sched_chooseproc(void)
  222. {
  223. struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
  224. struct proc *p;
  225. int queue;
  226. SCHED_ASSERT_LOCKED();
  227. #ifdef MULTIPROCESSOR
  228. if (spc->spc_schedflags & SPCF_SHOULDHALT) {
  229. if (spc->spc_whichqs) {
  230. for (queue = 0; queue < SCHED_NQS; queue++) {
  231. while ((p = TAILQ_FIRST(&spc->spc_qs[queue]))) {
  232. remrunqueue(p);
  233. p->p_cpu = sched_choosecpu(p);
  234. KASSERT(p->p_cpu != curcpu());
  235. setrunqueue(p);
  236. }
  237. }
  238. }
  239. p = spc->spc_idleproc;
  240. KASSERT(p);
  241. KASSERT(p->p_wchan == NULL);
  242. p->p_stat = SRUN;
  243. return (p);
  244. }
  245. #endif
  246. again:
  247. if (spc->spc_whichqs) {
  248. queue = ffs(spc->spc_whichqs) - 1;
  249. p = TAILQ_FIRST(&spc->spc_qs[queue]);
  250. remrunqueue(p);
  251. sched_noidle++;
  252. KASSERT(p->p_stat == SRUN);
  253. } else if ((p = sched_steal_proc(curcpu())) == NULL) {
  254. p = spc->spc_idleproc;
  255. if (p == NULL) {
  256. int s;
  257. /*
  258. * We get here if someone decides to switch during
  259. * boot before forking kthreads, bleh.
  260. * This is kind of like a stupid idle loop.
  261. */
  262. #ifdef MULTIPROCESSOR
  263. __mp_unlock(&sched_lock);
  264. #endif
  265. spl0();
  266. delay(10);
  267. SCHED_LOCK(s);
  268. goto again;
  269. }
  270. KASSERT(p);
  271. p->p_stat = SRUN;
  272. }
  273. KASSERT(p->p_wchan == NULL);
  274. return (p);
  275. }
  276. struct cpu_info *
  277. sched_choosecpu_fork(struct proc *parent, int flags)
  278. {
  279. #ifdef MULTIPROCESSOR
  280. struct cpu_info *choice = NULL;
  281. fixpt_t load, best_load = ~0;
  282. int run, best_run = INT_MAX;
  283. struct cpu_info *ci;
  284. struct cpuset set;
  285. #if 0
  286. /*
  287. * XXX
  288. * Don't do this until we have a painless way to move the cpu in exec.
  289. * Preferably when nuking the old pmap and getting a new one on a
  290. * new cpu.
  291. */
  292. /*
  293. * PPWAIT forks are simple. We know that the parent will not
  294. * run until we exec and choose another cpu, so we just steal its
  295. * cpu.
  296. */
  297. if (flags & FORK_PPWAIT)
  298. return (parent->p_cpu);
  299. #endif
  300. /*
  301. * Look at all cpus that are currently idle and have nothing queued.
  302. * If there are none, pick the one with least queued procs first,
  303. * then the one with lowest load average.
  304. */
  305. cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus);
  306. cpuset_intersection(&set, &set, &sched_all_cpus);
  307. if (cpuset_first(&set) == NULL)
  308. cpuset_copy(&set, &sched_all_cpus);
  309. while ((ci = cpuset_first(&set)) != NULL) {
  310. cpuset_del(&set, ci);
  311. load = ci->ci_schedstate.spc_ldavg;
  312. run = ci->ci_schedstate.spc_nrun;
  313. if (choice == NULL || run < best_run ||
  314. (run == best_run &&load < best_load)) {
  315. choice = ci;
  316. best_load = load;
  317. best_run = run;
  318. }
  319. }
  320. return (choice);
  321. #else
  322. return (curcpu());
  323. #endif
  324. }
  325. struct cpu_info *
  326. sched_choosecpu(struct proc *p)
  327. {
  328. #ifdef MULTIPROCESSOR
  329. struct cpu_info *choice = NULL;
  330. int last_cost = INT_MAX;
  331. struct cpu_info *ci;
  332. struct cpuset set;
  333. /*
  334. * If pegged to a cpu, don't allow it to move.
  335. */
  336. if (p->p_flag & P_CPUPEG)
  337. return (p->p_cpu);
  338. sched_choose++;
  339. /*
  340. * Look at all cpus that are currently idle and have nothing queued.
  341. * If there are none, pick the cheapest of those.
  342. * (idle + queued could mean that the cpu is handling an interrupt
  343. * at this moment and haven't had time to leave idle yet).
  344. */
  345. cpuset_complement(&set, &sched_queued_cpus, &sched_idle_cpus);
  346. cpuset_intersection(&set, &set, &sched_all_cpus);
  347. /*
  348. * First, just check if our current cpu is in that set, if it is,
  349. * this is simple.
  350. * Also, our cpu might not be idle, but if it's the current cpu
  351. * and it has nothing else queued and we're curproc, take it.
  352. */
  353. if (cpuset_isset(&set, p->p_cpu) ||
  354. (p->p_cpu == curcpu() && p->p_cpu->ci_schedstate.spc_nrun == 0 &&
  355. (p->p_cpu->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0 &&
  356. curproc == p)) {
  357. sched_wasidle++;
  358. return (p->p_cpu);
  359. }
  360. if (cpuset_first(&set) == NULL)
  361. cpuset_copy(&set, &sched_all_cpus);
  362. while ((ci = cpuset_first(&set)) != NULL) {
  363. int cost = sched_proc_to_cpu_cost(ci, p);
  364. if (choice == NULL || cost < last_cost) {
  365. choice = ci;
  366. last_cost = cost;
  367. }
  368. cpuset_del(&set, ci);
  369. }
  370. if (p->p_cpu != choice)
  371. sched_nmigrations++;
  372. else
  373. sched_nomigrations++;
  374. return (choice);
  375. #else
  376. return (curcpu());
  377. #endif
  378. }
  379. /*
  380. * Attempt to steal a proc from some cpu.
  381. */
  382. struct proc *
  383. sched_steal_proc(struct cpu_info *self)
  384. {
  385. struct proc *best = NULL;
  386. #ifdef MULTIPROCESSOR
  387. struct schedstate_percpu *spc;
  388. int bestcost = INT_MAX;
  389. struct cpu_info *ci;
  390. struct cpuset set;
  391. KASSERT((self->ci_schedstate.spc_schedflags & SPCF_SHOULDHALT) == 0);
  392. cpuset_copy(&set, &sched_queued_cpus);
  393. while ((ci = cpuset_first(&set)) != NULL) {
  394. struct proc *p;
  395. int queue;
  396. int cost;
  397. cpuset_del(&set, ci);
  398. spc = &ci->ci_schedstate;
  399. queue = ffs(spc->spc_whichqs) - 1;
  400. TAILQ_FOREACH(p, &spc->spc_qs[queue], p_runq) {
  401. if (p->p_flag & P_CPUPEG)
  402. continue;
  403. cost = sched_proc_to_cpu_cost(self, p);
  404. if (best == NULL || cost < bestcost) {
  405. best = p;
  406. bestcost = cost;
  407. }
  408. }
  409. }
  410. if (best == NULL)
  411. return (NULL);
  412. spc = &best->p_cpu->ci_schedstate;
  413. remrunqueue(best);
  414. best->p_cpu = self;
  415. sched_stolen++;
  416. #endif
  417. return (best);
  418. }
  419. #ifdef MULTIPROCESSOR
  420. /*
  421. * Base 2 logarithm of an int. returns 0 for 0 (yeye, I know).
  422. */
  423. static int
  424. log2(unsigned int i)
  425. {
  426. int ret = 0;
  427. while (i >>= 1)
  428. ret++;
  429. return (ret);
  430. }
  431. /*
  432. * Calculate the cost of moving the proc to this cpu.
  433. *
  434. * What we want is some guesstimate of how much "performance" it will
  435. * cost us to move the proc here. Not just for caches and TLBs and NUMA
  436. * memory, but also for the proc itself. A highly loaded cpu might not
  437. * be the best candidate for this proc since it won't get run.
  438. *
  439. * Just total guesstimates for now.
  440. */
  441. int sched_cost_load = 1;
  442. int sched_cost_priority = 1;
  443. int sched_cost_runnable = 3;
  444. int sched_cost_resident = 1;
  445. #endif
  446. int
  447. sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p)
  448. {
  449. int cost = 0;
  450. #ifdef MULTIPROCESSOR
  451. struct schedstate_percpu *spc;
  452. int l2resident = 0;
  453. spc = &ci->ci_schedstate;
  454. /*
  455. * First, account for the priority of the proc we want to move.
  456. * More willing to move, the lower the priority of the destination
  457. * and the higher the priority of the proc.
  458. */
  459. if (!cpuset_isset(&sched_idle_cpus, ci)) {
  460. cost += (p->p_priority - spc->spc_curpriority) *
  461. sched_cost_priority;
  462. cost += sched_cost_runnable;
  463. }
  464. if (cpuset_isset(&sched_queued_cpus, ci))
  465. cost += spc->spc_nrun * sched_cost_runnable;
  466. /*
  467. * Higher load on the destination means we don't want to go there.
  468. */
  469. cost += ((sched_cost_load * spc->spc_ldavg) >> FSHIFT);
  470. /*
  471. * If the proc is on this cpu already, lower the cost by how much
  472. * it has been running and an estimate of its footprint.
  473. */
  474. if (p->p_cpu == ci && p->p_slptime == 0) {
  475. l2resident =
  476. log2(pmap_resident_count(p->p_vmspace->vm_map.pmap));
  477. cost -= l2resident * sched_cost_resident;
  478. }
  479. #endif
  480. return (cost);
  481. }
  482. /*
  483. * Peg a proc to a cpu.
  484. */
  485. void
  486. sched_peg_curproc(struct cpu_info *ci)
  487. {
  488. struct proc *p = curproc;
  489. int s;
  490. SCHED_LOCK(s);
  491. p->p_priority = p->p_usrpri;
  492. p->p_stat = SRUN;
  493. p->p_cpu = ci;
  494. atomic_setbits_int(&p->p_flag, P_CPUPEG);
  495. setrunqueue(p);
  496. p->p_ru.ru_nvcsw++;
  497. mi_switch();
  498. SCHED_UNLOCK(s);
  499. }
  500. #ifdef MULTIPROCESSOR
  501. void
  502. sched_start_secondary_cpus(void)
  503. {
  504. CPU_INFO_ITERATOR cii;
  505. struct cpu_info *ci;
  506. CPU_INFO_FOREACH(cii, ci) {
  507. struct schedstate_percpu *spc = &ci->ci_schedstate;
  508. if (CPU_IS_PRIMARY(ci))
  509. continue;
  510. cpuset_add(&sched_all_cpus, ci);
  511. atomic_clearbits_int(&spc->spc_schedflags,
  512. SPCF_SHOULDHALT | SPCF_HALTED);
  513. }
  514. }
  515. void
  516. sched_stop_secondary_cpus(void)
  517. {
  518. CPU_INFO_ITERATOR cii;
  519. struct cpu_info *ci;
  520. /*
  521. * Make sure we stop the secondary CPUs.
  522. */
  523. CPU_INFO_FOREACH(cii, ci) {
  524. struct schedstate_percpu *spc = &ci->ci_schedstate;
  525. if (CPU_IS_PRIMARY(ci))
  526. continue;
  527. cpuset_del(&sched_all_cpus, ci);
  528. atomic_setbits_int(&spc->spc_schedflags, SPCF_SHOULDHALT);
  529. }
  530. CPU_INFO_FOREACH(cii, ci) {
  531. struct schedstate_percpu *spc = &ci->ci_schedstate;
  532. struct sleep_state sls;
  533. if (CPU_IS_PRIMARY(ci))
  534. continue;
  535. while ((spc->spc_schedflags & SPCF_HALTED) == 0) {
  536. sleep_setup(&sls, spc, PZERO, "schedstate");
  537. sleep_finish(&sls,
  538. (spc->spc_schedflags & SPCF_HALTED) == 0);
  539. }
  540. }
  541. }
  542. #endif
  543. /*
  544. * Functions to manipulate cpu sets.
  545. */
  546. struct cpu_info *cpuset_infos[MAXCPUS];
  547. static struct cpuset cpuset_all;
  548. void
  549. cpuset_init_cpu(struct cpu_info *ci)
  550. {
  551. cpuset_add(&cpuset_all, ci);
  552. cpuset_infos[CPU_INFO_UNIT(ci)] = ci;
  553. }
  554. void
  555. cpuset_clear(struct cpuset *cs)
  556. {
  557. memset(cs, 0, sizeof(*cs));
  558. }
  559. void
  560. cpuset_add(struct cpuset *cs, struct cpu_info *ci)
  561. {
  562. unsigned int num = CPU_INFO_UNIT(ci);
  563. atomic_setbits_int(&cs->cs_set[num/32], (1 << (num % 32)));
  564. }
  565. void
  566. cpuset_del(struct cpuset *cs, struct cpu_info *ci)
  567. {
  568. unsigned int num = CPU_INFO_UNIT(ci);
  569. atomic_clearbits_int(&cs->cs_set[num/32], (1 << (num % 32)));
  570. }
  571. int
  572. cpuset_isset(struct cpuset *cs, struct cpu_info *ci)
  573. {
  574. unsigned int num = CPU_INFO_UNIT(ci);
  575. return (cs->cs_set[num/32] & (1 << (num % 32)));
  576. }
  577. void
  578. cpuset_add_all(struct cpuset *cs)
  579. {
  580. cpuset_copy(cs, &cpuset_all);
  581. }
  582. void
  583. cpuset_copy(struct cpuset *to, struct cpuset *from)
  584. {
  585. memcpy(to, from, sizeof(*to));
  586. }
  587. struct cpu_info *
  588. cpuset_first(struct cpuset *cs)
  589. {
  590. int i;
  591. for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
  592. if (cs->cs_set[i])
  593. return (cpuset_infos[i * 32 + ffs(cs->cs_set[i]) - 1]);
  594. return (NULL);
  595. }
  596. void
  597. cpuset_union(struct cpuset *to, struct cpuset *a, struct cpuset *b)
  598. {
  599. int i;
  600. for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
  601. to->cs_set[i] = a->cs_set[i] | b->cs_set[i];
  602. }
  603. void
  604. cpuset_intersection(struct cpuset *to, struct cpuset *a, struct cpuset *b)
  605. {
  606. int i;
  607. for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
  608. to->cs_set[i] = a->cs_set[i] & b->cs_set[i];
  609. }
  610. void
  611. cpuset_complement(struct cpuset *to, struct cpuset *a, struct cpuset *b)
  612. {
  613. int i;
  614. for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
  615. to->cs_set[i] = b->cs_set[i] & ~a->cs_set[i];
  616. }