common.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /**
  2. * @file arch/alpha/oprofile/common.c
  3. *
  4. * @remark Copyright 2002 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author Richard Henderson <rth@twiddle.net>
  8. */
  9. #include <linux/oprofile.h>
  10. #include <linux/init.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <asm/ptrace.h>
  14. #include <asm/special_insns.h>
  15. #include "op_impl.h"
  16. extern struct op_axp_model op_model_ev4 __attribute__((weak));
  17. extern struct op_axp_model op_model_ev5 __attribute__((weak));
  18. extern struct op_axp_model op_model_pca56 __attribute__((weak));
  19. extern struct op_axp_model op_model_ev6 __attribute__((weak));
  20. extern struct op_axp_model op_model_ev67 __attribute__((weak));
  21. static struct op_axp_model *model;
  22. extern void (*perf_irq)(unsigned long, struct pt_regs *);
  23. static void (*save_perf_irq)(unsigned long, struct pt_regs *);
  24. static struct op_counter_config ctr[20];
  25. static struct op_system_config sys;
  26. static struct op_register_config reg;
  27. /* Called from do_entInt to handle the performance monitor interrupt. */
  28. static void
  29. op_handle_interrupt(unsigned long which, struct pt_regs *regs)
  30. {
  31. model->handle_interrupt(which, regs, ctr);
  32. /* If the user has selected an interrupt frequency that is
  33. not exactly the width of the counter, write a new value
  34. into the counter such that it'll overflow after N more
  35. events. */
  36. if ((reg.need_reset >> which) & 1)
  37. model->reset_ctr(&reg, which);
  38. }
  39. static int
  40. op_axp_setup(void)
  41. {
  42. unsigned long i, e;
  43. /* Install our interrupt handler into the existing hook. */
  44. save_perf_irq = perf_irq;
  45. perf_irq = op_handle_interrupt;
  46. /* Compute the mask of enabled counters. */
  47. for (i = e = 0; i < model->num_counters; ++i)
  48. if (ctr[i].enabled)
  49. e |= 1 << i;
  50. reg.enable = e;
  51. /* Pre-compute the values to stuff in the hardware registers. */
  52. model->reg_setup(&reg, ctr, &sys);
  53. /* Configure the registers on all cpus. */
  54. (void)smp_call_function(model->cpu_setup, &reg, 1);
  55. model->cpu_setup(&reg);
  56. return 0;
  57. }
  58. static void
  59. op_axp_shutdown(void)
  60. {
  61. /* Remove our interrupt handler. We may be removing this module. */
  62. perf_irq = save_perf_irq;
  63. }
  64. static void
  65. op_axp_cpu_start(void *dummy)
  66. {
  67. wrperfmon(1, reg.enable);
  68. }
  69. static int
  70. op_axp_start(void)
  71. {
  72. (void)smp_call_function(op_axp_cpu_start, NULL, 1);
  73. op_axp_cpu_start(NULL);
  74. return 0;
  75. }
  76. static inline void
  77. op_axp_cpu_stop(void *dummy)
  78. {
  79. /* Disable performance monitoring for all counters. */
  80. wrperfmon(0, -1);
  81. }
  82. static void
  83. op_axp_stop(void)
  84. {
  85. (void)smp_call_function(op_axp_cpu_stop, NULL, 1);
  86. op_axp_cpu_stop(NULL);
  87. }
  88. static int
  89. op_axp_create_files(struct dentry *root)
  90. {
  91. int i;
  92. for (i = 0; i < model->num_counters; ++i) {
  93. struct dentry *dir;
  94. char buf[4];
  95. snprintf(buf, sizeof buf, "%d", i);
  96. dir = oprofilefs_mkdir(root, buf);
  97. oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
  98. oprofilefs_create_ulong(dir, "event", &ctr[i].event);
  99. oprofilefs_create_ulong(dir, "count", &ctr[i].count);
  100. /* Dummies. */
  101. oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
  102. oprofilefs_create_ulong(dir, "user", &ctr[i].user);
  103. oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
  104. }
  105. if (model->can_set_proc_mode) {
  106. oprofilefs_create_ulong(root, "enable_pal",
  107. &sys.enable_pal);
  108. oprofilefs_create_ulong(root, "enable_kernel",
  109. &sys.enable_kernel);
  110. oprofilefs_create_ulong(root, "enable_user",
  111. &sys.enable_user);
  112. }
  113. return 0;
  114. }
  115. int __init
  116. oprofile_arch_init(struct oprofile_operations *ops)
  117. {
  118. struct op_axp_model *lmodel = NULL;
  119. switch (implver()) {
  120. case IMPLVER_EV4:
  121. lmodel = &op_model_ev4;
  122. break;
  123. case IMPLVER_EV5:
  124. /* 21164PC has a slightly different set of events.
  125. Recognize the chip by the presence of the MAX insns. */
  126. if (!amask(AMASK_MAX))
  127. lmodel = &op_model_pca56;
  128. else
  129. lmodel = &op_model_ev5;
  130. break;
  131. case IMPLVER_EV6:
  132. /* 21264A supports ProfileMe.
  133. Recognize the chip by the presence of the CIX insns. */
  134. if (!amask(AMASK_CIX))
  135. lmodel = &op_model_ev67;
  136. else
  137. lmodel = &op_model_ev6;
  138. break;
  139. }
  140. if (!lmodel)
  141. return -ENODEV;
  142. model = lmodel;
  143. ops->create_files = op_axp_create_files;
  144. ops->setup = op_axp_setup;
  145. ops->shutdown = op_axp_shutdown;
  146. ops->start = op_axp_start;
  147. ops->stop = op_axp_stop;
  148. ops->cpu_type = lmodel->cpu_type;
  149. printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
  150. lmodel->cpu_type);
  151. return 0;
  152. }
  153. void
  154. oprofile_arch_exit(void)
  155. {
  156. }