stat.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. #include <linux/cpumask.h>
  2. #include <linux/fs.h>
  3. #include <linux/init.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/kernel_stat.h>
  6. #include <linux/proc_fs.h>
  7. #include <linux/sched.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/slab.h>
  10. #include <linux/time.h>
  11. #include <linux/irqnr.h>
  12. #include <asm/cputime.h>
  13. #ifndef arch_irq_stat_cpu
  14. #define arch_irq_stat_cpu(cpu) 0
  15. #endif
  16. #ifndef arch_irq_stat
  17. #define arch_irq_stat() 0
  18. #endif
  19. #ifndef arch_idle_time
  20. #define arch_idle_time(cpu) 0
  21. #endif
  22. static int show_stat(struct seq_file *p, void *v)
  23. {
  24. int i, j;
  25. unsigned long jif;
  26. cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
  27. cputime64_t guest, guest_nice;
  28. u64 sum = 0;
  29. u64 sum_softirq = 0;
  30. unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
  31. struct timespec boottime;
  32. user = nice = system = idle = iowait =
  33. irq = softirq = steal = cputime64_zero;
  34. guest = guest_nice = cputime64_zero;
  35. getboottime(&boottime);
  36. jif = boottime.tv_sec;
  37. for_each_possible_cpu(i) {
  38. user = cputime64_add(user, kstat_cpu(i).cpustat.user);
  39. nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
  40. system = cputime64_add(system, kstat_cpu(i).cpustat.system);
  41. idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
  42. idle = cputime64_add(idle, arch_idle_time(i));
  43. iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
  44. irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
  45. softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
  46. steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
  47. guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
  48. guest_nice = cputime64_add(guest_nice,
  49. kstat_cpu(i).cpustat.guest_nice);
  50. sum += kstat_cpu_irqs_sum(i);
  51. sum += arch_irq_stat_cpu(i);
  52. for (j = 0; j < NR_SOFTIRQS; j++) {
  53. unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
  54. per_softirq_sums[j] += softirq_stat;
  55. sum_softirq += softirq_stat;
  56. }
  57. }
  58. sum += arch_irq_stat();
  59. seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu "
  60. "%llu\n",
  61. (unsigned long long)cputime64_to_clock_t(user),
  62. (unsigned long long)cputime64_to_clock_t(nice),
  63. (unsigned long long)cputime64_to_clock_t(system),
  64. (unsigned long long)cputime64_to_clock_t(idle),
  65. (unsigned long long)cputime64_to_clock_t(iowait),
  66. (unsigned long long)cputime64_to_clock_t(irq),
  67. (unsigned long long)cputime64_to_clock_t(softirq),
  68. (unsigned long long)cputime64_to_clock_t(steal),
  69. (unsigned long long)cputime64_to_clock_t(guest),
  70. (unsigned long long)cputime64_to_clock_t(guest_nice));
  71. for_each_online_cpu(i) {
  72. /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
  73. user = kstat_cpu(i).cpustat.user;
  74. nice = kstat_cpu(i).cpustat.nice;
  75. system = kstat_cpu(i).cpustat.system;
  76. idle = kstat_cpu(i).cpustat.idle;
  77. idle = cputime64_add(idle, arch_idle_time(i));
  78. iowait = kstat_cpu(i).cpustat.iowait;
  79. irq = kstat_cpu(i).cpustat.irq;
  80. softirq = kstat_cpu(i).cpustat.softirq;
  81. steal = kstat_cpu(i).cpustat.steal;
  82. guest = kstat_cpu(i).cpustat.guest;
  83. guest_nice = kstat_cpu(i).cpustat.guest_nice;
  84. seq_printf(p,
  85. "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
  86. "%llu\n",
  87. i,
  88. (unsigned long long)cputime64_to_clock_t(user),
  89. (unsigned long long)cputime64_to_clock_t(nice),
  90. (unsigned long long)cputime64_to_clock_t(system),
  91. (unsigned long long)cputime64_to_clock_t(idle),
  92. (unsigned long long)cputime64_to_clock_t(iowait),
  93. (unsigned long long)cputime64_to_clock_t(irq),
  94. (unsigned long long)cputime64_to_clock_t(softirq),
  95. (unsigned long long)cputime64_to_clock_t(steal),
  96. (unsigned long long)cputime64_to_clock_t(guest),
  97. (unsigned long long)cputime64_to_clock_t(guest_nice));
  98. }
  99. seq_printf(p, "intr %llu", (unsigned long long)sum);
  100. /* sum again ? it could be updated? */
  101. for_each_irq_nr(j)
  102. seq_printf(p, " %u", kstat_irqs(j));
  103. seq_printf(p,
  104. "\nctxt %llu\n"
  105. "btime %lu\n"
  106. "processes %lu\n"
  107. "procs_running %lu\n"
  108. "procs_blocked %lu\n",
  109. nr_context_switches(),
  110. (unsigned long)jif,
  111. total_forks,
  112. nr_running(),
  113. nr_iowait());
  114. seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
  115. for (i = 0; i < NR_SOFTIRQS; i++)
  116. seq_printf(p, " %u", per_softirq_sums[i]);
  117. seq_putc(p, '\n');
  118. return 0;
  119. }
  120. static int stat_open(struct inode *inode, struct file *file)
  121. {
  122. unsigned size = 4096 * (1 + num_possible_cpus() / 32);
  123. char *buf;
  124. struct seq_file *m;
  125. int res;
  126. /* don't ask for more than the kmalloc() max size */
  127. if (size > KMALLOC_MAX_SIZE)
  128. size = KMALLOC_MAX_SIZE;
  129. buf = kmalloc(size, GFP_KERNEL);
  130. if (!buf)
  131. return -ENOMEM;
  132. res = single_open(file, show_stat, NULL);
  133. if (!res) {
  134. m = file->private_data;
  135. m->buf = buf;
  136. m->size = size;
  137. } else
  138. kfree(buf);
  139. return res;
  140. }
  141. static const struct file_operations proc_stat_operations = {
  142. .open = stat_open,
  143. .read = seq_read,
  144. .llseek = seq_lseek,
  145. .release = single_release,
  146. };
  147. static int __init proc_stat_init(void)
  148. {
  149. proc_create("stat", 0, NULL, &proc_stat_operations);
  150. return 0;
  151. }
  152. module_init(proc_stat_init);