sparc-us2e-cpufreq.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
  2. *
  3. * Copyright (C) 2003 David S. Miller (davem@redhat.com)
  4. *
  5. * Many thanks to Dominik Brodowski for fixing up the cpufreq
  6. * infrastructure in order to make this driver easier to implement.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/sched.h>
  11. #include <linux/smp.h>
  12. #include <linux/cpufreq.h>
  13. #include <linux/threads.h>
  14. #include <linux/slab.h>
  15. #include <linux/delay.h>
  16. #include <linux/init.h>
  17. #include <asm/asi.h>
  18. #include <asm/timer.h>
  19. static struct cpufreq_driver *cpufreq_us2e_driver;
  20. struct us2e_freq_percpu_info {
  21. struct cpufreq_frequency_table table[6];
  22. };
  23. /* Indexed by cpu number. */
  24. static struct us2e_freq_percpu_info *us2e_freq_table;
  25. #define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
  26. #define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
  27. /* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
  28. * in the ESTAR mode control register.
  29. */
  30. #define ESTAR_MODE_DIV_1 0x0000000000000000UL
  31. #define ESTAR_MODE_DIV_2 0x0000000000000001UL
  32. #define ESTAR_MODE_DIV_4 0x0000000000000003UL
  33. #define ESTAR_MODE_DIV_6 0x0000000000000002UL
  34. #define ESTAR_MODE_DIV_8 0x0000000000000004UL
  35. #define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
  36. #define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
  37. #define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
  38. #define MCTRL0_REFR_COUNT_SHIFT 8
  39. #define MCTRL0_REFR_INTERVAL 7800
  40. #define MCTRL0_REFR_CLKS_P_CNT 64
  41. static unsigned long read_hbreg(unsigned long addr)
  42. {
  43. unsigned long ret;
  44. __asm__ __volatile__("ldxa [%1] %2, %0"
  45. : "=&r" (ret)
  46. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
  47. return ret;
  48. }
  49. static void write_hbreg(unsigned long addr, unsigned long val)
  50. {
  51. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  52. "membar #Sync"
  53. : /* no outputs */
  54. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
  55. : "memory");
  56. if (addr == HBIRD_ESTAR_MODE_ADDR) {
  57. /* Need to wait 16 clock cycles for the PLL to lock. */
  58. udelay(1);
  59. }
  60. }
  61. static void self_refresh_ctl(int enable)
  62. {
  63. unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
  64. if (enable)
  65. mctrl |= MCTRL0_SREFRESH_ENAB;
  66. else
  67. mctrl &= ~MCTRL0_SREFRESH_ENAB;
  68. write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
  69. (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
  70. }
  71. static void frob_mem_refresh(int cpu_slowing_down,
  72. unsigned long clock_tick,
  73. unsigned long old_divisor, unsigned long divisor)
  74. {
  75. unsigned long old_refr_count, refr_count, mctrl;
  76. refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
  77. refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
  78. mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
  79. old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
  80. >> MCTRL0_REFR_COUNT_SHIFT;
  81. mctrl &= ~MCTRL0_REFR_COUNT_MASK;
  82. mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
  83. write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
  84. mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
  85. if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
  86. unsigned long usecs;
  87. /* We have to wait for both refresh counts (old
  88. * and new) to go to zero.
  89. */
  90. usecs = (MCTRL0_REFR_CLKS_P_CNT *
  91. (refr_count + old_refr_count) *
  92. 1000000UL *
  93. old_divisor) / clock_tick;
  94. udelay(usecs + 1UL);
  95. }
  96. }
  97. static void us2e_transition(unsigned long estar, unsigned long new_bits,
  98. unsigned long clock_tick,
  99. unsigned long old_divisor, unsigned long divisor)
  100. {
  101. unsigned long flags;
  102. local_irq_save(flags);
  103. estar &= ~ESTAR_MODE_DIV_MASK;
  104. /* This is based upon the state transition diagram in the IIe manual. */
  105. if (old_divisor == 2 && divisor == 1) {
  106. self_refresh_ctl(0);
  107. write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
  108. frob_mem_refresh(0, clock_tick, old_divisor, divisor);
  109. } else if (old_divisor == 1 && divisor == 2) {
  110. frob_mem_refresh(1, clock_tick, old_divisor, divisor);
  111. write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
  112. self_refresh_ctl(1);
  113. } else if (old_divisor == 1 && divisor > 2) {
  114. us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
  115. 1, 2);
  116. us2e_transition(estar, new_bits, clock_tick,
  117. 2, divisor);
  118. } else if (old_divisor > 2 && divisor == 1) {
  119. us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
  120. old_divisor, 2);
  121. us2e_transition(estar, new_bits, clock_tick,
  122. 2, divisor);
  123. } else if (old_divisor < divisor) {
  124. frob_mem_refresh(0, clock_tick, old_divisor, divisor);
  125. write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
  126. } else if (old_divisor > divisor) {
  127. write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
  128. frob_mem_refresh(1, clock_tick, old_divisor, divisor);
  129. } else {
  130. BUG();
  131. }
  132. local_irq_restore(flags);
  133. }
  134. static unsigned long index_to_estar_mode(unsigned int index)
  135. {
  136. switch (index) {
  137. case 0:
  138. return ESTAR_MODE_DIV_1;
  139. case 1:
  140. return ESTAR_MODE_DIV_2;
  141. case 2:
  142. return ESTAR_MODE_DIV_4;
  143. case 3:
  144. return ESTAR_MODE_DIV_6;
  145. case 4:
  146. return ESTAR_MODE_DIV_8;
  147. default:
  148. BUG();
  149. }
  150. }
  151. static unsigned long index_to_divisor(unsigned int index)
  152. {
  153. switch (index) {
  154. case 0:
  155. return 1;
  156. case 1:
  157. return 2;
  158. case 2:
  159. return 4;
  160. case 3:
  161. return 6;
  162. case 4:
  163. return 8;
  164. default:
  165. BUG();
  166. }
  167. }
  168. static unsigned long estar_to_divisor(unsigned long estar)
  169. {
  170. unsigned long ret;
  171. switch (estar & ESTAR_MODE_DIV_MASK) {
  172. case ESTAR_MODE_DIV_1:
  173. ret = 1;
  174. break;
  175. case ESTAR_MODE_DIV_2:
  176. ret = 2;
  177. break;
  178. case ESTAR_MODE_DIV_4:
  179. ret = 4;
  180. break;
  181. case ESTAR_MODE_DIV_6:
  182. ret = 6;
  183. break;
  184. case ESTAR_MODE_DIV_8:
  185. ret = 8;
  186. break;
  187. default:
  188. BUG();
  189. }
  190. return ret;
  191. }
  192. static unsigned int us2e_freq_get(unsigned int cpu)
  193. {
  194. cpumask_t cpus_allowed;
  195. unsigned long clock_tick, estar;
  196. cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
  197. set_cpus_allowed_ptr(current, cpumask_of(cpu));
  198. clock_tick = sparc64_get_clock_tick(cpu) / 1000;
  199. estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
  200. set_cpus_allowed_ptr(current, &cpus_allowed);
  201. return clock_tick / estar_to_divisor(estar);
  202. }
  203. static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
  204. {
  205. unsigned int cpu = policy->cpu;
  206. unsigned long new_bits, new_freq;
  207. unsigned long clock_tick, divisor, old_divisor, estar;
  208. cpumask_t cpus_allowed;
  209. cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
  210. set_cpus_allowed_ptr(current, cpumask_of(cpu));
  211. new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
  212. new_bits = index_to_estar_mode(index);
  213. divisor = index_to_divisor(index);
  214. new_freq /= divisor;
  215. estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
  216. old_divisor = estar_to_divisor(estar);
  217. if (old_divisor != divisor)
  218. us2e_transition(estar, new_bits, clock_tick * 1000,
  219. old_divisor, divisor);
  220. set_cpus_allowed_ptr(current, &cpus_allowed);
  221. return 0;
  222. }
  223. static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
  224. {
  225. unsigned int cpu = policy->cpu;
  226. unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
  227. struct cpufreq_frequency_table *table =
  228. &us2e_freq_table[cpu].table[0];
  229. table[0].driver_data = 0;
  230. table[0].frequency = clock_tick / 1;
  231. table[1].driver_data = 1;
  232. table[1].frequency = clock_tick / 2;
  233. table[2].driver_data = 2;
  234. table[2].frequency = clock_tick / 4;
  235. table[2].driver_data = 3;
  236. table[2].frequency = clock_tick / 6;
  237. table[2].driver_data = 4;
  238. table[2].frequency = clock_tick / 8;
  239. table[2].driver_data = 5;
  240. table[3].frequency = CPUFREQ_TABLE_END;
  241. policy->cpuinfo.transition_latency = 0;
  242. policy->cur = clock_tick;
  243. return cpufreq_table_validate_and_show(policy, table);
  244. }
  245. static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
  246. {
  247. if (cpufreq_us2e_driver)
  248. us2e_freq_target(policy, 0);
  249. return 0;
  250. }
  251. static int __init us2e_freq_init(void)
  252. {
  253. unsigned long manuf, impl, ver;
  254. int ret;
  255. if (tlb_type != spitfire)
  256. return -ENODEV;
  257. __asm__("rdpr %%ver, %0" : "=r" (ver));
  258. manuf = ((ver >> 48) & 0xffff);
  259. impl = ((ver >> 32) & 0xffff);
  260. if (manuf == 0x17 && impl == 0x13) {
  261. struct cpufreq_driver *driver;
  262. ret = -ENOMEM;
  263. driver = kzalloc(sizeof(*driver), GFP_KERNEL);
  264. if (!driver)
  265. goto err_out;
  266. us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
  267. GFP_KERNEL);
  268. if (!us2e_freq_table)
  269. goto err_out;
  270. driver->init = us2e_freq_cpu_init;
  271. driver->verify = cpufreq_generic_frequency_table_verify;
  272. driver->target_index = us2e_freq_target;
  273. driver->get = us2e_freq_get;
  274. driver->exit = us2e_freq_cpu_exit;
  275. strcpy(driver->name, "UltraSPARC-IIe");
  276. cpufreq_us2e_driver = driver;
  277. ret = cpufreq_register_driver(driver);
  278. if (ret)
  279. goto err_out;
  280. return 0;
  281. err_out:
  282. if (driver) {
  283. kfree(driver);
  284. cpufreq_us2e_driver = NULL;
  285. }
  286. kfree(us2e_freq_table);
  287. us2e_freq_table = NULL;
  288. return ret;
  289. }
  290. return -ENODEV;
  291. }
  292. static void __exit us2e_freq_exit(void)
  293. {
  294. if (cpufreq_us2e_driver) {
  295. cpufreq_unregister_driver(cpufreq_us2e_driver);
  296. kfree(cpufreq_us2e_driver);
  297. cpufreq_us2e_driver = NULL;
  298. kfree(us2e_freq_table);
  299. us2e_freq_table = NULL;
  300. }
  301. }
  302. MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
  303. MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
  304. MODULE_LICENSE("GPL");
  305. module_init(us2e_freq_init);
  306. module_exit(us2e_freq_exit);