internal.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. #ifndef _KERNEL_EVENTS_INTERNAL_H
  2. #define _KERNEL_EVENTS_INTERNAL_H
  3. #include <linux/hardirq.h>
  4. #include <linux/uaccess.h>
  5. /* Buffer handling */
  6. #define RING_BUFFER_WRITABLE 0x01
  7. struct ring_buffer {
  8. atomic_t refcount;
  9. struct rcu_head rcu_head;
  10. #ifdef CONFIG_PERF_USE_VMALLOC
  11. struct work_struct work;
  12. int page_order; /* allocation order */
  13. #endif
  14. int nr_pages; /* nr of data pages */
  15. int overwrite; /* can overwrite itself */
  16. atomic_t poll; /* POLL_ for wakeups */
  17. local_t head; /* write position */
  18. local_t nest; /* nested writers */
  19. local_t events; /* event limit */
  20. local_t wakeup; /* wakeup stamp */
  21. local_t lost; /* nr records lost */
  22. long watermark; /* wakeup watermark */
  23. long aux_watermark;
  24. /* poll crap */
  25. spinlock_t event_lock;
  26. struct list_head event_list;
  27. atomic_t mmap_count;
  28. unsigned long mmap_locked;
  29. struct user_struct *mmap_user;
  30. /* AUX area */
  31. local_t aux_head;
  32. local_t aux_nest;
  33. local_t aux_wakeup;
  34. unsigned long aux_pgoff;
  35. int aux_nr_pages;
  36. int aux_overwrite;
  37. atomic_t aux_mmap_count;
  38. unsigned long aux_mmap_locked;
  39. void (*free_aux)(void *);
  40. atomic_t aux_refcount;
  41. void **aux_pages;
  42. void *aux_priv;
  43. struct perf_event_mmap_page *user_page;
  44. void *data_pages[0];
  45. };
  46. extern void rb_free(struct ring_buffer *rb);
  47. extern struct ring_buffer *
  48. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  49. extern void perf_event_wakeup(struct perf_event *event);
  50. extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
  51. pgoff_t pgoff, int nr_pages, long watermark, int flags);
  52. extern void rb_free_aux(struct ring_buffer *rb);
  53. extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
  54. extern void ring_buffer_put(struct ring_buffer *rb);
  55. static inline bool rb_has_aux(struct ring_buffer *rb)
  56. {
  57. return !!rb->aux_nr_pages;
  58. }
  59. void perf_event_aux_event(struct perf_event *event, unsigned long head,
  60. unsigned long size, u64 flags);
  61. extern struct page *
  62. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  63. #ifdef CONFIG_PERF_USE_VMALLOC
  64. /*
  65. * Back perf_mmap() with vmalloc memory.
  66. *
  67. * Required for architectures that have d-cache aliasing issues.
  68. */
  69. static inline int page_order(struct ring_buffer *rb)
  70. {
  71. return rb->page_order;
  72. }
  73. #else
  74. static inline int page_order(struct ring_buffer *rb)
  75. {
  76. return 0;
  77. }
  78. #endif
  79. static inline unsigned long perf_data_size(struct ring_buffer *rb)
  80. {
  81. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  82. }
  83. static inline unsigned long perf_aux_size(struct ring_buffer *rb)
  84. {
  85. return rb->aux_nr_pages << PAGE_SHIFT;
  86. }
  87. #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
  88. static inline unsigned long \
  89. func_name(struct perf_output_handle *handle, \
  90. const void *buf, unsigned long len) \
  91. { \
  92. unsigned long size, written; \
  93. \
  94. do { \
  95. size = min(handle->size, len); \
  96. written = memcpy_func(handle->addr, buf, size); \
  97. written = size - written; \
  98. \
  99. len -= written; \
  100. handle->addr += written; \
  101. buf += written; \
  102. handle->size -= written; \
  103. if (!handle->size) { \
  104. struct ring_buffer *rb = handle->rb; \
  105. \
  106. handle->page++; \
  107. handle->page &= rb->nr_pages - 1; \
  108. handle->addr = rb->data_pages[handle->page]; \
  109. handle->size = PAGE_SIZE << page_order(rb); \
  110. } \
  111. } while (len && written == size); \
  112. \
  113. return len; \
  114. }
  115. static inline unsigned long
  116. memcpy_common(void *dst, const void *src, unsigned long n)
  117. {
  118. memcpy(dst, src, n);
  119. return 0;
  120. }
  121. DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
  122. static inline unsigned long
  123. memcpy_skip(void *dst, const void *src, unsigned long n)
  124. {
  125. return 0;
  126. }
  127. DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
  128. #ifndef arch_perf_out_copy_user
  129. #define arch_perf_out_copy_user arch_perf_out_copy_user
  130. static inline unsigned long
  131. arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
  132. {
  133. unsigned long ret;
  134. pagefault_disable();
  135. ret = __copy_from_user_inatomic(dst, src, n);
  136. pagefault_enable();
  137. return ret;
  138. }
  139. #endif
  140. DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
  141. /* Callchain handling */
  142. extern struct perf_callchain_entry *
  143. perf_callchain(struct perf_event *event, struct pt_regs *regs);
  144. extern int get_callchain_buffers(void);
  145. extern void put_callchain_buffers(void);
  146. static inline int get_recursion_context(int *recursion)
  147. {
  148. int rctx;
  149. if (in_nmi())
  150. rctx = 3;
  151. else if (in_irq())
  152. rctx = 2;
  153. else if (in_softirq())
  154. rctx = 1;
  155. else
  156. rctx = 0;
  157. if (recursion[rctx])
  158. return -1;
  159. recursion[rctx]++;
  160. barrier();
  161. return rctx;
  162. }
  163. static inline void put_recursion_context(int *recursion, int rctx)
  164. {
  165. barrier();
  166. recursion[rctx]--;
  167. }
  168. #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
  169. static inline bool arch_perf_have_user_stack_dump(void)
  170. {
  171. return true;
  172. }
  173. #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
  174. #else
  175. static inline bool arch_perf_have_user_stack_dump(void)
  176. {
  177. return false;
  178. }
  179. #define perf_user_stack_pointer(regs) 0
  180. #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
  181. #endif /* _KERNEL_EVENTS_INTERNAL_H */