hw_breakpoint.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) 2007 Alan Stern
  17. * Copyright (C) IBM Corporation, 2009
  18. * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
  19. *
  20. * Thanks to Ingo Molnar for his many suggestions.
  21. *
  22. * Authors: Alan Stern <stern@rowland.harvard.edu>
  23. * K.Prasad <prasad@linux.vnet.ibm.com>
  24. * Frederic Weisbecker <fweisbec@gmail.com>
  25. */
  26. /*
  27. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  28. * using the CPU's debug registers.
  29. * This file contains the arch-independent routines.
  30. */
  31. #include <linux/irqflags.h>
  32. #include <linux/kallsyms.h>
  33. #include <linux/notifier.h>
  34. #include <linux/kprobes.h>
  35. #include <linux/kdebug.h>
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/percpu.h>
  39. #include <linux/sched.h>
  40. #include <linux/init.h>
  41. #include <linux/slab.h>
  42. #include <linux/list.h>
  43. #include <linux/cpu.h>
  44. #include <linux/smp.h>
  45. #include <linux/bug.h>
  46. #include <linux/hw_breakpoint.h>
  47. /*
  48. * Constraints data
  49. */
  50. struct bp_cpuinfo {
  51. /* Number of pinned cpu breakpoints in a cpu */
  52. unsigned int cpu_pinned;
  53. /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
  54. unsigned int *tsk_pinned;
  55. /* Number of non-pinned cpu/task breakpoints in a cpu */
  56. unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */
  57. };
  58. static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
  59. static int nr_slots[TYPE_MAX];
  60. static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
  61. {
  62. return per_cpu_ptr(bp_cpuinfo + type, cpu);
  63. }
  64. /* Keep track of the breakpoints attached to tasks */
  65. static LIST_HEAD(bp_task_head);
  66. static int constraints_initialized;
  67. /* Gather the number of total pinned and un-pinned bp in a cpuset */
  68. struct bp_busy_slots {
  69. unsigned int pinned;
  70. unsigned int flexible;
  71. };
  72. /* Serialize accesses to the above constraints */
  73. static DEFINE_MUTEX(nr_bp_mutex);
  74. __weak int hw_breakpoint_weight(struct perf_event *bp)
  75. {
  76. return 1;
  77. }
  78. static inline enum bp_type_idx find_slot_idx(u64 bp_type)
  79. {
  80. if (bp_type & HW_BREAKPOINT_RW)
  81. return TYPE_DATA;
  82. return TYPE_INST;
  83. }
  84. /*
  85. * Report the maximum number of pinned breakpoints a task
  86. * have in this cpu
  87. */
  88. static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
  89. {
  90. unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
  91. int i;
  92. for (i = nr_slots[type] - 1; i >= 0; i--) {
  93. if (tsk_pinned[i] > 0)
  94. return i + 1;
  95. }
  96. return 0;
  97. }
  98. /*
  99. * Count the number of breakpoints of the same type and same task.
  100. * The given event must be not on the list.
  101. */
  102. static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
  103. {
  104. struct task_struct *tsk = bp->hw.target;
  105. struct perf_event *iter;
  106. int count = 0;
  107. list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
  108. if (iter->hw.target == tsk &&
  109. find_slot_idx(iter->attr.bp_type) == type &&
  110. (iter->cpu < 0 || cpu == iter->cpu))
  111. count += hw_breakpoint_weight(iter);
  112. }
  113. return count;
  114. }
  115. static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
  116. {
  117. if (bp->cpu >= 0)
  118. return cpumask_of(bp->cpu);
  119. return cpu_possible_mask;
  120. }
  121. /*
  122. * Report the number of pinned/un-pinned breakpoints we have in
  123. * a given cpu (cpu > -1) or in all of them (cpu = -1).
  124. */
  125. static void
  126. fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
  127. enum bp_type_idx type)
  128. {
  129. const struct cpumask *cpumask = cpumask_of_bp(bp);
  130. int cpu;
  131. for_each_cpu(cpu, cpumask) {
  132. struct bp_cpuinfo *info = get_bp_info(cpu, type);
  133. int nr;
  134. nr = info->cpu_pinned;
  135. if (!bp->hw.target)
  136. nr += max_task_bp_pinned(cpu, type);
  137. else
  138. nr += task_bp_pinned(cpu, bp, type);
  139. if (nr > slots->pinned)
  140. slots->pinned = nr;
  141. nr = info->flexible;
  142. if (nr > slots->flexible)
  143. slots->flexible = nr;
  144. }
  145. }
  146. /*
  147. * For now, continue to consider flexible as pinned, until we can
  148. * ensure no flexible event can ever be scheduled before a pinned event
  149. * in a same cpu.
  150. */
  151. static void
  152. fetch_this_slot(struct bp_busy_slots *slots, int weight)
  153. {
  154. slots->pinned += weight;
  155. }
  156. /*
  157. * Add a pinned breakpoint for the given task in our constraint table
  158. */
  159. static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
  160. enum bp_type_idx type, int weight)
  161. {
  162. unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
  163. int old_idx, new_idx;
  164. old_idx = task_bp_pinned(cpu, bp, type) - 1;
  165. new_idx = old_idx + weight;
  166. if (old_idx >= 0)
  167. tsk_pinned[old_idx]--;
  168. if (new_idx >= 0)
  169. tsk_pinned[new_idx]++;
  170. }
  171. /*
  172. * Add/remove the given breakpoint in our constraint table
  173. */
  174. static void
  175. toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
  176. int weight)
  177. {
  178. const struct cpumask *cpumask = cpumask_of_bp(bp);
  179. int cpu;
  180. if (!enable)
  181. weight = -weight;
  182. /* Pinned counter cpu profiling */
  183. if (!bp->hw.target) {
  184. get_bp_info(bp->cpu, type)->cpu_pinned += weight;
  185. return;
  186. }
  187. /* Pinned counter task profiling */
  188. for_each_cpu(cpu, cpumask)
  189. toggle_bp_task_slot(bp, cpu, type, weight);
  190. if (enable)
  191. list_add_tail(&bp->hw.bp_list, &bp_task_head);
  192. else
  193. list_del(&bp->hw.bp_list);
  194. }
  195. /*
  196. * Function to perform processor-specific cleanup during unregistration
  197. */
  198. __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
  199. {
  200. /*
  201. * A weak stub function here for those archs that don't define
  202. * it inside arch/.../kernel/hw_breakpoint.c
  203. */
  204. }
  205. /*
  206. * Contraints to check before allowing this new breakpoint counter:
  207. *
  208. * == Non-pinned counter == (Considered as pinned for now)
  209. *
  210. * - If attached to a single cpu, check:
  211. *
  212. * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
  213. * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
  214. *
  215. * -> If there are already non-pinned counters in this cpu, it means
  216. * there is already a free slot for them.
  217. * Otherwise, we check that the maximum number of per task
  218. * breakpoints (for this cpu) plus the number of per cpu breakpoint
  219. * (for this cpu) doesn't cover every registers.
  220. *
  221. * - If attached to every cpus, check:
  222. *
  223. * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
  224. * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
  225. *
  226. * -> This is roughly the same, except we check the number of per cpu
  227. * bp for every cpu and we keep the max one. Same for the per tasks
  228. * breakpoints.
  229. *
  230. *
  231. * == Pinned counter ==
  232. *
  233. * - If attached to a single cpu, check:
  234. *
  235. * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
  236. * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
  237. *
  238. * -> Same checks as before. But now the info->flexible, if any, must keep
  239. * one register at least (or they will never be fed).
  240. *
  241. * - If attached to every cpus, check:
  242. *
  243. * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
  244. * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
  245. */
  246. static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
  247. {
  248. struct bp_busy_slots slots = {0};
  249. enum bp_type_idx type;
  250. int weight;
  251. /* We couldn't initialize breakpoint constraints on boot */
  252. if (!constraints_initialized)
  253. return -ENOMEM;
  254. /* Basic checks */
  255. if (bp_type == HW_BREAKPOINT_EMPTY ||
  256. bp_type == HW_BREAKPOINT_INVALID)
  257. return -EINVAL;
  258. type = find_slot_idx(bp_type);
  259. weight = hw_breakpoint_weight(bp);
  260. fetch_bp_busy_slots(&slots, bp, type);
  261. /*
  262. * Simulate the addition of this breakpoint to the constraints
  263. * and see the result.
  264. */
  265. fetch_this_slot(&slots, weight);
  266. /* Flexible counters need to keep at least one slot */
  267. if (slots.pinned + (!!slots.flexible) > nr_slots[type])
  268. return -ENOSPC;
  269. toggle_bp_slot(bp, true, type, weight);
  270. return 0;
  271. }
  272. int reserve_bp_slot(struct perf_event *bp)
  273. {
  274. int ret;
  275. mutex_lock(&nr_bp_mutex);
  276. ret = __reserve_bp_slot(bp, bp->attr.bp_type);
  277. mutex_unlock(&nr_bp_mutex);
  278. return ret;
  279. }
  280. static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
  281. {
  282. enum bp_type_idx type;
  283. int weight;
  284. type = find_slot_idx(bp_type);
  285. weight = hw_breakpoint_weight(bp);
  286. toggle_bp_slot(bp, false, type, weight);
  287. }
  288. void release_bp_slot(struct perf_event *bp)
  289. {
  290. mutex_lock(&nr_bp_mutex);
  291. arch_unregister_hw_breakpoint(bp);
  292. __release_bp_slot(bp, bp->attr.bp_type);
  293. mutex_unlock(&nr_bp_mutex);
  294. }
  295. static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
  296. {
  297. int err;
  298. __release_bp_slot(bp, old_type);
  299. err = __reserve_bp_slot(bp, new_type);
  300. if (err) {
  301. /*
  302. * Reserve the old_type slot back in case
  303. * there's no space for the new type.
  304. *
  305. * This must succeed, because we just released
  306. * the old_type slot in the __release_bp_slot
  307. * call above. If not, something is broken.
  308. */
  309. WARN_ON(__reserve_bp_slot(bp, old_type));
  310. }
  311. return err;
  312. }
  313. static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
  314. {
  315. int ret;
  316. mutex_lock(&nr_bp_mutex);
  317. ret = __modify_bp_slot(bp, old_type, new_type);
  318. mutex_unlock(&nr_bp_mutex);
  319. return ret;
  320. }
  321. /*
  322. * Allow the kernel debugger to reserve breakpoint slots without
  323. * taking a lock using the dbg_* variant of for the reserve and
  324. * release breakpoint slots.
  325. */
  326. int dbg_reserve_bp_slot(struct perf_event *bp)
  327. {
  328. if (mutex_is_locked(&nr_bp_mutex))
  329. return -1;
  330. return __reserve_bp_slot(bp, bp->attr.bp_type);
  331. }
  332. int dbg_release_bp_slot(struct perf_event *bp)
  333. {
  334. if (mutex_is_locked(&nr_bp_mutex))
  335. return -1;
  336. __release_bp_slot(bp, bp->attr.bp_type);
  337. return 0;
  338. }
  339. static int hw_breakpoint_parse(struct perf_event *bp,
  340. const struct perf_event_attr *attr,
  341. struct arch_hw_breakpoint *hw)
  342. {
  343. int err;
  344. err = hw_breakpoint_arch_parse(bp, attr, hw);
  345. if (err)
  346. return err;
  347. if (arch_check_bp_in_kernelspace(hw)) {
  348. if (attr->exclude_kernel)
  349. return -EINVAL;
  350. /*
  351. * Don't let unprivileged users set a breakpoint in the trap
  352. * path to avoid trap recursion attacks.
  353. */
  354. if (!capable(CAP_SYS_ADMIN))
  355. return -EPERM;
  356. }
  357. return 0;
  358. }
  359. int register_perf_hw_breakpoint(struct perf_event *bp)
  360. {
  361. struct arch_hw_breakpoint hw = { };
  362. int err;
  363. err = reserve_bp_slot(bp);
  364. if (err)
  365. return err;
  366. err = hw_breakpoint_parse(bp, &bp->attr, &hw);
  367. if (err) {
  368. release_bp_slot(bp);
  369. return err;
  370. }
  371. bp->hw.info = hw;
  372. return 0;
  373. }
  374. /**
  375. * register_user_hw_breakpoint - register a hardware breakpoint for user space
  376. * @attr: breakpoint attributes
  377. * @triggered: callback to trigger when we hit the breakpoint
  378. * @tsk: pointer to 'task_struct' of the process to which the address belongs
  379. */
  380. struct perf_event *
  381. register_user_hw_breakpoint(struct perf_event_attr *attr,
  382. perf_overflow_handler_t triggered,
  383. void *context,
  384. struct task_struct *tsk)
  385. {
  386. return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
  387. context);
  388. }
  389. EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
  390. static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
  391. struct perf_event_attr *from)
  392. {
  393. to->bp_addr = from->bp_addr;
  394. to->bp_type = from->bp_type;
  395. to->bp_len = from->bp_len;
  396. to->disabled = from->disabled;
  397. }
  398. int
  399. modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
  400. bool check)
  401. {
  402. struct arch_hw_breakpoint hw = { };
  403. int err;
  404. err = hw_breakpoint_parse(bp, attr, &hw);
  405. if (err)
  406. return err;
  407. if (check) {
  408. struct perf_event_attr old_attr;
  409. old_attr = bp->attr;
  410. hw_breakpoint_copy_attr(&old_attr, attr);
  411. if (memcmp(&old_attr, attr, sizeof(*attr)))
  412. return -EINVAL;
  413. }
  414. if (bp->attr.bp_type != attr->bp_type) {
  415. err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
  416. if (err)
  417. return err;
  418. }
  419. hw_breakpoint_copy_attr(&bp->attr, attr);
  420. bp->hw.info = hw;
  421. return 0;
  422. }
  423. /**
  424. * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
  425. * @bp: the breakpoint structure to modify
  426. * @attr: new breakpoint attributes
  427. */
  428. int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
  429. {
  430. int err;
  431. /*
  432. * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
  433. * will not be possible to raise IPIs that invoke __perf_event_disable.
  434. * So call the function directly after making sure we are targeting the
  435. * current task.
  436. */
  437. if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
  438. perf_event_disable_local(bp);
  439. else
  440. perf_event_disable(bp);
  441. err = modify_user_hw_breakpoint_check(bp, attr, false);
  442. if (!bp->attr.disabled)
  443. perf_event_enable(bp);
  444. return err;
  445. }
  446. EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
  447. /**
  448. * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
  449. * @bp: the breakpoint structure to unregister
  450. */
  451. void unregister_hw_breakpoint(struct perf_event *bp)
  452. {
  453. if (!bp)
  454. return;
  455. perf_event_release_kernel(bp);
  456. }
  457. EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
  458. /**
  459. * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
  460. * @attr: breakpoint attributes
  461. * @triggered: callback to trigger when we hit the breakpoint
  462. *
  463. * @return a set of per_cpu pointers to perf events
  464. */
  465. struct perf_event * __percpu *
  466. register_wide_hw_breakpoint(struct perf_event_attr *attr,
  467. perf_overflow_handler_t triggered,
  468. void *context)
  469. {
  470. struct perf_event * __percpu *cpu_events, *bp;
  471. long err = 0;
  472. int cpu;
  473. cpu_events = alloc_percpu(typeof(*cpu_events));
  474. if (!cpu_events)
  475. return (void __percpu __force *)ERR_PTR(-ENOMEM);
  476. get_online_cpus();
  477. for_each_online_cpu(cpu) {
  478. bp = perf_event_create_kernel_counter(attr, cpu, NULL,
  479. triggered, context);
  480. if (IS_ERR(bp)) {
  481. err = PTR_ERR(bp);
  482. break;
  483. }
  484. per_cpu(*cpu_events, cpu) = bp;
  485. }
  486. put_online_cpus();
  487. if (likely(!err))
  488. return cpu_events;
  489. unregister_wide_hw_breakpoint(cpu_events);
  490. return (void __percpu __force *)ERR_PTR(err);
  491. }
  492. EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
  493. /**
  494. * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
  495. * @cpu_events: the per cpu set of events to unregister
  496. */
  497. void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
  498. {
  499. int cpu;
  500. for_each_possible_cpu(cpu)
  501. unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
  502. free_percpu(cpu_events);
  503. }
  504. EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
  505. static struct notifier_block hw_breakpoint_exceptions_nb = {
  506. .notifier_call = hw_breakpoint_exceptions_notify,
  507. /* we need to be notified first */
  508. .priority = 0x7fffffff
  509. };
  510. static void bp_perf_event_destroy(struct perf_event *event)
  511. {
  512. release_bp_slot(event);
  513. }
  514. static int hw_breakpoint_event_init(struct perf_event *bp)
  515. {
  516. int err;
  517. if (bp->attr.type != PERF_TYPE_BREAKPOINT)
  518. return -ENOENT;
  519. /*
  520. * no branch sampling for breakpoint events
  521. */
  522. if (has_branch_stack(bp))
  523. return -EOPNOTSUPP;
  524. err = register_perf_hw_breakpoint(bp);
  525. if (err)
  526. return err;
  527. bp->destroy = bp_perf_event_destroy;
  528. return 0;
  529. }
  530. static int hw_breakpoint_add(struct perf_event *bp, int flags)
  531. {
  532. if (!(flags & PERF_EF_START))
  533. bp->hw.state = PERF_HES_STOPPED;
  534. if (is_sampling_event(bp)) {
  535. bp->hw.last_period = bp->hw.sample_period;
  536. perf_swevent_set_period(bp);
  537. }
  538. return arch_install_hw_breakpoint(bp);
  539. }
  540. static void hw_breakpoint_del(struct perf_event *bp, int flags)
  541. {
  542. arch_uninstall_hw_breakpoint(bp);
  543. }
  544. static void hw_breakpoint_start(struct perf_event *bp, int flags)
  545. {
  546. bp->hw.state = 0;
  547. }
  548. static void hw_breakpoint_stop(struct perf_event *bp, int flags)
  549. {
  550. bp->hw.state = PERF_HES_STOPPED;
  551. }
  552. static struct pmu perf_breakpoint = {
  553. .task_ctx_nr = perf_sw_context, /* could eventually get its own */
  554. .event_init = hw_breakpoint_event_init,
  555. .add = hw_breakpoint_add,
  556. .del = hw_breakpoint_del,
  557. .start = hw_breakpoint_start,
  558. .stop = hw_breakpoint_stop,
  559. .read = hw_breakpoint_pmu_read,
  560. };
  561. int __init init_hw_breakpoint(void)
  562. {
  563. int cpu, err_cpu;
  564. int i;
  565. for (i = 0; i < TYPE_MAX; i++)
  566. nr_slots[i] = hw_breakpoint_slots(i);
  567. for_each_possible_cpu(cpu) {
  568. for (i = 0; i < TYPE_MAX; i++) {
  569. struct bp_cpuinfo *info = get_bp_info(cpu, i);
  570. info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
  571. GFP_KERNEL);
  572. if (!info->tsk_pinned)
  573. goto err_alloc;
  574. }
  575. }
  576. constraints_initialized = 1;
  577. perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
  578. return register_die_notifier(&hw_breakpoint_exceptions_nb);
  579. err_alloc:
  580. for_each_possible_cpu(err_cpu) {
  581. for (i = 0; i < TYPE_MAX; i++)
  582. kfree(get_bp_info(err_cpu, i)->tsk_pinned);
  583. if (err_cpu == cpu)
  584. break;
  585. }
  586. return -ENOMEM;
  587. }