clk-kona.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. /*
  2. * Copyright (C) 2013 Broadcom Corporation
  3. * Copyright 2013 Linaro Limited
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation version 2.
  8. *
  9. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  10. * kind, whether express or implied; without even the implied warranty
  11. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include "clk-kona.h"
  15. #include <linux/delay.h>
  16. #include <linux/kernel.h>
  17. /*
  18. * "Policies" affect the frequencies of bus clocks provided by a
  19. * CCU. (I believe these polices are named "Deep Sleep", "Economy",
  20. * "Normal", and "Turbo".) A lower policy number has lower power
  21. * consumption, and policy 2 is the default.
  22. */
  23. #define CCU_POLICY_COUNT 4
  24. #define CCU_ACCESS_PASSWORD 0xA5A500
  25. #define CLK_GATE_DELAY_LOOP 2000
  26. /* Bitfield operations */
  27. /* Produces a mask of set bits covering a range of a 32-bit value */
  28. static inline u32 bitfield_mask(u32 shift, u32 width)
  29. {
  30. return ((1 << width) - 1) << shift;
  31. }
  32. /* Extract the value of a bitfield found within a given register value */
  33. static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
  34. {
  35. return (reg_val & bitfield_mask(shift, width)) >> shift;
  36. }
  37. /* Replace the value of a bitfield found within a given register value */
  38. static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
  39. {
  40. u32 mask = bitfield_mask(shift, width);
  41. return (reg_val & ~mask) | (val << shift);
  42. }
  43. /* Divider and scaling helpers */
  44. /* Convert a divider into the scaled divisor value it represents. */
  45. static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
  46. {
  47. return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
  48. }
  49. /*
  50. * Build a scaled divider value as close as possible to the
  51. * given whole part (div_value) and fractional part (expressed
  52. * in billionths).
  53. */
  54. u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
  55. {
  56. u64 combined;
  57. BUG_ON(!div_value);
  58. BUG_ON(billionths >= BILLION);
  59. combined = (u64)div_value * BILLION + billionths;
  60. combined <<= div->u.s.frac_width;
  61. return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
  62. }
  63. /* The scaled minimum divisor representable by a divider */
  64. static inline u64
  65. scaled_div_min(struct bcm_clk_div *div)
  66. {
  67. if (divider_is_fixed(div))
  68. return (u64)div->u.fixed;
  69. return scaled_div_value(div, 0);
  70. }
  71. /* The scaled maximum divisor representable by a divider */
  72. u64 scaled_div_max(struct bcm_clk_div *div)
  73. {
  74. u32 reg_div;
  75. if (divider_is_fixed(div))
  76. return (u64)div->u.fixed;
  77. reg_div = ((u32)1 << div->u.s.width) - 1;
  78. return scaled_div_value(div, reg_div);
  79. }
  80. /*
  81. * Convert a scaled divisor into its divider representation as
  82. * stored in a divider register field.
  83. */
  84. static inline u32
  85. divider(struct bcm_clk_div *div, u64 scaled_div)
  86. {
  87. BUG_ON(scaled_div < scaled_div_min(div));
  88. BUG_ON(scaled_div > scaled_div_max(div));
  89. return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
  90. }
  91. /* Return a rate scaled for use when dividing by a scaled divisor. */
  92. static inline u64
  93. scale_rate(struct bcm_clk_div *div, u32 rate)
  94. {
  95. if (divider_is_fixed(div))
  96. return (u64)rate;
  97. return (u64)rate << div->u.s.frac_width;
  98. }
  99. /* CCU access */
  100. /* Read a 32-bit register value from a CCU's address space. */
  101. static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
  102. {
  103. return readl(ccu->base + reg_offset);
  104. }
  105. /* Write a 32-bit register value into a CCU's address space. */
  106. static inline void
  107. __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
  108. {
  109. writel(reg_val, ccu->base + reg_offset);
  110. }
  111. static inline unsigned long ccu_lock(struct ccu_data *ccu)
  112. {
  113. unsigned long flags;
  114. spin_lock_irqsave(&ccu->lock, flags);
  115. return flags;
  116. }
  117. static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
  118. {
  119. spin_unlock_irqrestore(&ccu->lock, flags);
  120. }
  121. /*
  122. * Enable/disable write access to CCU protected registers. The
  123. * WR_ACCESS register for all CCUs is at offset 0.
  124. */
  125. static inline void __ccu_write_enable(struct ccu_data *ccu)
  126. {
  127. if (ccu->write_enabled) {
  128. pr_err("%s: access already enabled for %s\n", __func__,
  129. ccu->name);
  130. return;
  131. }
  132. ccu->write_enabled = true;
  133. __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
  134. }
  135. static inline void __ccu_write_disable(struct ccu_data *ccu)
  136. {
  137. if (!ccu->write_enabled) {
  138. pr_err("%s: access wasn't enabled for %s\n", __func__,
  139. ccu->name);
  140. return;
  141. }
  142. __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
  143. ccu->write_enabled = false;
  144. }
  145. /*
  146. * Poll a register in a CCU's address space, returning when the
  147. * specified bit in that register's value is set (or clear). Delay
  148. * a microsecond after each read of the register. Returns true if
  149. * successful, or false if we gave up trying.
  150. *
  151. * Caller must ensure the CCU lock is held.
  152. */
  153. static inline bool
  154. __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
  155. {
  156. unsigned int tries;
  157. u32 bit_mask = 1 << bit;
  158. for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
  159. u32 val;
  160. bool bit_val;
  161. val = __ccu_read(ccu, reg_offset);
  162. bit_val = (val & bit_mask) != 0;
  163. if (bit_val == want)
  164. return true;
  165. udelay(1);
  166. }
  167. pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__,
  168. ccu->name, reg_offset, bit, want ? "set" : "clear");
  169. return false;
  170. }
  171. /* Policy operations */
  172. static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync)
  173. {
  174. struct bcm_policy_ctl *control = &ccu->policy.control;
  175. u32 offset;
  176. u32 go_bit;
  177. u32 mask;
  178. bool ret;
  179. /* If we don't need to control policy for this CCU, we're done. */
  180. if (!policy_ctl_exists(control))
  181. return true;
  182. offset = control->offset;
  183. go_bit = control->go_bit;
  184. /* Ensure we're not busy before we start */
  185. ret = __ccu_wait_bit(ccu, offset, go_bit, false);
  186. if (!ret) {
  187. pr_err("%s: ccu %s policy engine wouldn't go idle\n",
  188. __func__, ccu->name);
  189. return false;
  190. }
  191. /*
  192. * If it's a synchronous request, we'll wait for the voltage
  193. * and frequency of the active load to stabilize before
  194. * returning. To do this we select the active load by
  195. * setting the ATL bit.
  196. *
  197. * An asynchronous request instead ramps the voltage in the
  198. * background, and when that process stabilizes, the target
  199. * load is copied to the active load and the CCU frequency
  200. * is switched. We do this by selecting the target load
  201. * (ATL bit clear) and setting the request auto-copy (AC bit
  202. * set).
  203. *
  204. * Note, we do NOT read-modify-write this register.
  205. */
  206. mask = (u32)1 << go_bit;
  207. if (sync)
  208. mask |= 1 << control->atl_bit;
  209. else
  210. mask |= 1 << control->ac_bit;
  211. __ccu_write(ccu, offset, mask);
  212. /* Wait for indication that operation is complete. */
  213. ret = __ccu_wait_bit(ccu, offset, go_bit, false);
  214. if (!ret)
  215. pr_err("%s: ccu %s policy engine never started\n",
  216. __func__, ccu->name);
  217. return ret;
  218. }
  219. static bool __ccu_policy_engine_stop(struct ccu_data *ccu)
  220. {
  221. struct bcm_lvm_en *enable = &ccu->policy.enable;
  222. u32 offset;
  223. u32 enable_bit;
  224. bool ret;
  225. /* If we don't need to control policy for this CCU, we're done. */
  226. if (!policy_lvm_en_exists(enable))
  227. return true;
  228. /* Ensure we're not busy before we start */
  229. offset = enable->offset;
  230. enable_bit = enable->bit;
  231. ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
  232. if (!ret) {
  233. pr_err("%s: ccu %s policy engine already stopped\n",
  234. __func__, ccu->name);
  235. return false;
  236. }
  237. /* Now set the bit to stop the engine (NO read-modify-write) */
  238. __ccu_write(ccu, offset, (u32)1 << enable_bit);
  239. /* Wait for indication that it has stopped. */
  240. ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
  241. if (!ret)
  242. pr_err("%s: ccu %s policy engine never stopped\n",
  243. __func__, ccu->name);
  244. return ret;
  245. }
  246. /*
  247. * A CCU has four operating conditions ("policies"), and some clocks
  248. * can be disabled or enabled based on which policy is currently in
  249. * effect. Such clocks have a bit in a "policy mask" register for
  250. * each policy indicating whether the clock is enabled for that
  251. * policy or not. The bit position for a clock is the same for all
  252. * four registers, and the 32-bit registers are at consecutive
  253. * addresses.
  254. */
  255. static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy)
  256. {
  257. u32 offset;
  258. u32 mask;
  259. int i;
  260. bool ret;
  261. if (!policy_exists(policy))
  262. return true;
  263. /*
  264. * We need to stop the CCU policy engine to allow update
  265. * of our policy bits.
  266. */
  267. if (!__ccu_policy_engine_stop(ccu)) {
  268. pr_err("%s: unable to stop CCU %s policy engine\n",
  269. __func__, ccu->name);
  270. return false;
  271. }
  272. /*
  273. * For now, if a clock defines its policy bit we just mark
  274. * it "enabled" for all four policies.
  275. */
  276. offset = policy->offset;
  277. mask = (u32)1 << policy->bit;
  278. for (i = 0; i < CCU_POLICY_COUNT; i++) {
  279. u32 reg_val;
  280. reg_val = __ccu_read(ccu, offset);
  281. reg_val |= mask;
  282. __ccu_write(ccu, offset, reg_val);
  283. offset += sizeof(u32);
  284. }
  285. /* We're done updating; fire up the policy engine again. */
  286. ret = __ccu_policy_engine_start(ccu, true);
  287. if (!ret)
  288. pr_err("%s: unable to restart CCU %s policy engine\n",
  289. __func__, ccu->name);
  290. return ret;
  291. }
  292. /* Gate operations */
  293. /* Determine whether a clock is gated. CCU lock must be held. */
  294. static bool
  295. __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
  296. {
  297. u32 bit_mask;
  298. u32 reg_val;
  299. /* If there is no gate we can assume it's enabled. */
  300. if (!gate_exists(gate))
  301. return true;
  302. bit_mask = 1 << gate->status_bit;
  303. reg_val = __ccu_read(ccu, gate->offset);
  304. return (reg_val & bit_mask) != 0;
  305. }
  306. /* Determine whether a clock is gated. */
  307. static bool
  308. is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
  309. {
  310. long flags;
  311. bool ret;
  312. /* Avoid taking the lock if we can */
  313. if (!gate_exists(gate))
  314. return true;
  315. flags = ccu_lock(ccu);
  316. ret = __is_clk_gate_enabled(ccu, gate);
  317. ccu_unlock(ccu, flags);
  318. return ret;
  319. }
  320. /*
  321. * Commit our desired gate state to the hardware.
  322. * Returns true if successful, false otherwise.
  323. */
  324. static bool
  325. __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
  326. {
  327. u32 reg_val;
  328. u32 mask;
  329. bool enabled = false;
  330. BUG_ON(!gate_exists(gate));
  331. if (!gate_is_sw_controllable(gate))
  332. return true; /* Nothing we can change */
  333. reg_val = __ccu_read(ccu, gate->offset);
  334. /* For a hardware/software gate, set which is in control */
  335. if (gate_is_hw_controllable(gate)) {
  336. mask = (u32)1 << gate->hw_sw_sel_bit;
  337. if (gate_is_sw_managed(gate))
  338. reg_val |= mask;
  339. else
  340. reg_val &= ~mask;
  341. }
  342. /*
  343. * If software is in control, enable or disable the gate.
  344. * If hardware is, clear the enabled bit for good measure.
  345. * If a software controlled gate can't be disabled, we're
  346. * required to write a 0 into the enable bit (but the gate
  347. * will be enabled).
  348. */
  349. mask = (u32)1 << gate->en_bit;
  350. if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
  351. !gate_is_no_disable(gate))
  352. reg_val |= mask;
  353. else
  354. reg_val &= ~mask;
  355. __ccu_write(ccu, gate->offset, reg_val);
  356. /* For a hardware controlled gate, we're done */
  357. if (!gate_is_sw_managed(gate))
  358. return true;
  359. /* Otherwise wait for the gate to be in desired state */
  360. return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
  361. }
  362. /*
  363. * Initialize a gate. Our desired state (hardware/software select,
  364. * and if software, its enable state) is committed to hardware
  365. * without the usual checks to see if it's already set up that way.
  366. * Returns true if successful, false otherwise.
  367. */
  368. static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
  369. {
  370. if (!gate_exists(gate))
  371. return true;
  372. return __gate_commit(ccu, gate);
  373. }
  374. /*
  375. * Set a gate to enabled or disabled state. Does nothing if the
  376. * gate is not currently under software control, or if it is already
  377. * in the requested state. Returns true if successful, false
  378. * otherwise. CCU lock must be held.
  379. */
  380. static bool
  381. __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
  382. {
  383. bool ret;
  384. if (!gate_exists(gate) || !gate_is_sw_managed(gate))
  385. return true; /* Nothing to do */
  386. if (!enable && gate_is_no_disable(gate)) {
  387. pr_warn("%s: invalid gate disable request (ignoring)\n",
  388. __func__);
  389. return true;
  390. }
  391. if (enable == gate_is_enabled(gate))
  392. return true; /* No change */
  393. gate_flip_enabled(gate);
  394. ret = __gate_commit(ccu, gate);
  395. if (!ret)
  396. gate_flip_enabled(gate); /* Revert the change */
  397. return ret;
  398. }
  399. /* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
  400. static int clk_gate(struct ccu_data *ccu, const char *name,
  401. struct bcm_clk_gate *gate, bool enable)
  402. {
  403. unsigned long flags;
  404. bool success;
  405. /*
  406. * Avoid taking the lock if we can. We quietly ignore
  407. * requests to change state that don't make sense.
  408. */
  409. if (!gate_exists(gate) || !gate_is_sw_managed(gate))
  410. return 0;
  411. if (!enable && gate_is_no_disable(gate))
  412. return 0;
  413. flags = ccu_lock(ccu);
  414. __ccu_write_enable(ccu);
  415. success = __clk_gate(ccu, gate, enable);
  416. __ccu_write_disable(ccu);
  417. ccu_unlock(ccu, flags);
  418. if (success)
  419. return 0;
  420. pr_err("%s: failed to %s gate for %s\n", __func__,
  421. enable ? "enable" : "disable", name);
  422. return -EIO;
  423. }
  424. /* Hysteresis operations */
  425. /*
  426. * If a clock gate requires a turn-off delay it will have
  427. * "hysteresis" register bits defined. The first, if set, enables
  428. * the delay; and if enabled, the second bit determines whether the
  429. * delay is "low" or "high" (1 means high). For now, if it's
  430. * defined for a clock, we set it.
  431. */
  432. static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst)
  433. {
  434. u32 offset;
  435. u32 reg_val;
  436. u32 mask;
  437. if (!hyst_exists(hyst))
  438. return true;
  439. offset = hyst->offset;
  440. mask = (u32)1 << hyst->en_bit;
  441. mask |= (u32)1 << hyst->val_bit;
  442. reg_val = __ccu_read(ccu, offset);
  443. reg_val |= mask;
  444. __ccu_write(ccu, offset, reg_val);
  445. return true;
  446. }
  447. /* Trigger operations */
  448. /*
  449. * Caller must ensure CCU lock is held and access is enabled.
  450. * Returns true if successful, false otherwise.
  451. */
  452. static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
  453. {
  454. /* Trigger the clock and wait for it to finish */
  455. __ccu_write(ccu, trig->offset, 1 << trig->bit);
  456. return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
  457. }
  458. /* Divider operations */
  459. /* Read a divider value and return the scaled divisor it represents. */
  460. static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
  461. {
  462. unsigned long flags;
  463. u32 reg_val;
  464. u32 reg_div;
  465. if (divider_is_fixed(div))
  466. return (u64)div->u.fixed;
  467. flags = ccu_lock(ccu);
  468. reg_val = __ccu_read(ccu, div->u.s.offset);
  469. ccu_unlock(ccu, flags);
  470. /* Extract the full divider field from the register value */
  471. reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
  472. /* Return the scaled divisor value it represents */
  473. return scaled_div_value(div, reg_div);
  474. }
  475. /*
  476. * Convert a divider's scaled divisor value into its recorded form
  477. * and commit it into the hardware divider register.
  478. *
  479. * Returns 0 on success. Returns -EINVAL for invalid arguments.
  480. * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
  481. */
  482. static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
  483. struct bcm_clk_div *div, struct bcm_clk_trig *trig)
  484. {
  485. bool enabled;
  486. u32 reg_div;
  487. u32 reg_val;
  488. int ret = 0;
  489. BUG_ON(divider_is_fixed(div));
  490. /*
  491. * If we're just initializing the divider, and no initial
  492. * state was defined in the device tree, we just find out
  493. * what its current value is rather than updating it.
  494. */
  495. if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
  496. reg_val = __ccu_read(ccu, div->u.s.offset);
  497. reg_div = bitfield_extract(reg_val, div->u.s.shift,
  498. div->u.s.width);
  499. div->u.s.scaled_div = scaled_div_value(div, reg_div);
  500. return 0;
  501. }
  502. /* Convert the scaled divisor to the value we need to record */
  503. reg_div = divider(div, div->u.s.scaled_div);
  504. /* Clock needs to be enabled before changing the rate */
  505. enabled = __is_clk_gate_enabled(ccu, gate);
  506. if (!enabled && !__clk_gate(ccu, gate, true)) {
  507. ret = -ENXIO;
  508. goto out;
  509. }
  510. /* Replace the divider value and record the result */
  511. reg_val = __ccu_read(ccu, div->u.s.offset);
  512. reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
  513. reg_div);
  514. __ccu_write(ccu, div->u.s.offset, reg_val);
  515. /* If the trigger fails we still want to disable the gate */
  516. if (!__clk_trigger(ccu, trig))
  517. ret = -EIO;
  518. /* Disable the clock again if it was disabled to begin with */
  519. if (!enabled && !__clk_gate(ccu, gate, false))
  520. ret = ret ? ret : -ENXIO; /* return first error */
  521. out:
  522. return ret;
  523. }
  524. /*
  525. * Initialize a divider by committing our desired state to hardware
  526. * without the usual checks to see if it's already set up that way.
  527. * Returns true if successful, false otherwise.
  528. */
  529. static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
  530. struct bcm_clk_div *div, struct bcm_clk_trig *trig)
  531. {
  532. if (!divider_exists(div) || divider_is_fixed(div))
  533. return true;
  534. return !__div_commit(ccu, gate, div, trig);
  535. }
  536. static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
  537. struct bcm_clk_div *div, struct bcm_clk_trig *trig,
  538. u64 scaled_div)
  539. {
  540. unsigned long flags;
  541. u64 previous;
  542. int ret;
  543. BUG_ON(divider_is_fixed(div));
  544. previous = div->u.s.scaled_div;
  545. if (previous == scaled_div)
  546. return 0; /* No change */
  547. div->u.s.scaled_div = scaled_div;
  548. flags = ccu_lock(ccu);
  549. __ccu_write_enable(ccu);
  550. ret = __div_commit(ccu, gate, div, trig);
  551. __ccu_write_disable(ccu);
  552. ccu_unlock(ccu, flags);
  553. if (ret)
  554. div->u.s.scaled_div = previous; /* Revert the change */
  555. return ret;
  556. }
  557. /* Common clock rate helpers */
  558. /*
  559. * Implement the common clock framework recalc_rate method, taking
  560. * into account a divider and an optional pre-divider. The
  561. * pre-divider register pointer may be NULL.
  562. */
  563. static unsigned long clk_recalc_rate(struct ccu_data *ccu,
  564. struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
  565. unsigned long parent_rate)
  566. {
  567. u64 scaled_parent_rate;
  568. u64 scaled_div;
  569. u64 result;
  570. if (!divider_exists(div))
  571. return parent_rate;
  572. if (parent_rate > (unsigned long)LONG_MAX)
  573. return 0; /* actually this would be a caller bug */
  574. /*
  575. * If there is a pre-divider, divide the scaled parent rate
  576. * by the pre-divider value first. In this case--to improve
  577. * accuracy--scale the parent rate by *both* the pre-divider
  578. * value and the divider before actually computing the
  579. * result of the pre-divider.
  580. *
  581. * If there's only one divider, just scale the parent rate.
  582. */
  583. if (pre_div && divider_exists(pre_div)) {
  584. u64 scaled_rate;
  585. scaled_rate = scale_rate(pre_div, parent_rate);
  586. scaled_rate = scale_rate(div, scaled_rate);
  587. scaled_div = divider_read_scaled(ccu, pre_div);
  588. scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
  589. scaled_div);
  590. } else {
  591. scaled_parent_rate = scale_rate(div, parent_rate);
  592. }
  593. /*
  594. * Get the scaled divisor value, and divide the scaled
  595. * parent rate by that to determine this clock's resulting
  596. * rate.
  597. */
  598. scaled_div = divider_read_scaled(ccu, div);
  599. result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div);
  600. return (unsigned long)result;
  601. }
  602. /*
  603. * Compute the output rate produced when a given parent rate is fed
  604. * into two dividers. The pre-divider can be NULL, and even if it's
  605. * non-null it may be nonexistent. It's also OK for the divider to
  606. * be nonexistent, and in that case the pre-divider is also ignored.
  607. *
  608. * If scaled_div is non-null, it is used to return the scaled divisor
  609. * value used by the (downstream) divider to produce that rate.
  610. */
  611. static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
  612. struct bcm_clk_div *pre_div,
  613. unsigned long rate, unsigned long parent_rate,
  614. u64 *scaled_div)
  615. {
  616. u64 scaled_parent_rate;
  617. u64 min_scaled_div;
  618. u64 max_scaled_div;
  619. u64 best_scaled_div;
  620. u64 result;
  621. BUG_ON(!divider_exists(div));
  622. BUG_ON(!rate);
  623. BUG_ON(parent_rate > (u64)LONG_MAX);
  624. /*
  625. * If there is a pre-divider, divide the scaled parent rate
  626. * by the pre-divider value first. In this case--to improve
  627. * accuracy--scale the parent rate by *both* the pre-divider
  628. * value and the divider before actually computing the
  629. * result of the pre-divider.
  630. *
  631. * If there's only one divider, just scale the parent rate.
  632. *
  633. * For simplicity we treat the pre-divider as fixed (for now).
  634. */
  635. if (divider_exists(pre_div)) {
  636. u64 scaled_rate;
  637. u64 scaled_pre_div;
  638. scaled_rate = scale_rate(pre_div, parent_rate);
  639. scaled_rate = scale_rate(div, scaled_rate);
  640. scaled_pre_div = divider_read_scaled(ccu, pre_div);
  641. scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
  642. scaled_pre_div);
  643. } else {
  644. scaled_parent_rate = scale_rate(div, parent_rate);
  645. }
  646. /*
  647. * Compute the best possible divider and ensure it is in
  648. * range. A fixed divider can't be changed, so just report
  649. * the best we can do.
  650. */
  651. if (!divider_is_fixed(div)) {
  652. best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate,
  653. rate);
  654. min_scaled_div = scaled_div_min(div);
  655. max_scaled_div = scaled_div_max(div);
  656. if (best_scaled_div > max_scaled_div)
  657. best_scaled_div = max_scaled_div;
  658. else if (best_scaled_div < min_scaled_div)
  659. best_scaled_div = min_scaled_div;
  660. } else {
  661. best_scaled_div = divider_read_scaled(ccu, div);
  662. }
  663. /* OK, figure out the resulting rate */
  664. result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div);
  665. if (scaled_div)
  666. *scaled_div = best_scaled_div;
  667. return (long)result;
  668. }
  669. /* Common clock parent helpers */
  670. /*
  671. * For a given parent selector (register field) value, find the
  672. * index into a selector's parent_sel array that contains it.
  673. * Returns the index, or BAD_CLK_INDEX if it's not found.
  674. */
  675. static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
  676. {
  677. u8 i;
  678. BUG_ON(sel->parent_count > (u32)U8_MAX);
  679. for (i = 0; i < sel->parent_count; i++)
  680. if (sel->parent_sel[i] == parent_sel)
  681. return i;
  682. return BAD_CLK_INDEX;
  683. }
  684. /*
  685. * Fetch the current value of the selector, and translate that into
  686. * its corresponding index in the parent array we registered with
  687. * the clock framework.
  688. *
  689. * Returns parent array index that corresponds with the value found,
  690. * or BAD_CLK_INDEX if the found value is out of range.
  691. */
  692. static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
  693. {
  694. unsigned long flags;
  695. u32 reg_val;
  696. u32 parent_sel;
  697. u8 index;
  698. /* If there's no selector, there's only one parent */
  699. if (!selector_exists(sel))
  700. return 0;
  701. /* Get the value in the selector register */
  702. flags = ccu_lock(ccu);
  703. reg_val = __ccu_read(ccu, sel->offset);
  704. ccu_unlock(ccu, flags);
  705. parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
  706. /* Look up that selector's parent array index and return it */
  707. index = parent_index(sel, parent_sel);
  708. if (index == BAD_CLK_INDEX)
  709. pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
  710. __func__, parent_sel, ccu->name, sel->offset);
  711. return index;
  712. }
  713. /*
  714. * Commit our desired selector value to the hardware.
  715. *
  716. * Returns 0 on success. Returns -EINVAL for invalid arguments.
  717. * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
  718. */
  719. static int
  720. __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
  721. struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
  722. {
  723. u32 parent_sel;
  724. u32 reg_val;
  725. bool enabled;
  726. int ret = 0;
  727. BUG_ON(!selector_exists(sel));
  728. /*
  729. * If we're just initializing the selector, and no initial
  730. * state was defined in the device tree, we just find out
  731. * what its current value is rather than updating it.
  732. */
  733. if (sel->clk_index == BAD_CLK_INDEX) {
  734. u8 index;
  735. reg_val = __ccu_read(ccu, sel->offset);
  736. parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
  737. index = parent_index(sel, parent_sel);
  738. if (index == BAD_CLK_INDEX)
  739. return -EINVAL;
  740. sel->clk_index = index;
  741. return 0;
  742. }
  743. BUG_ON((u32)sel->clk_index >= sel->parent_count);
  744. parent_sel = sel->parent_sel[sel->clk_index];
  745. /* Clock needs to be enabled before changing the parent */
  746. enabled = __is_clk_gate_enabled(ccu, gate);
  747. if (!enabled && !__clk_gate(ccu, gate, true))
  748. return -ENXIO;
  749. /* Replace the selector value and record the result */
  750. reg_val = __ccu_read(ccu, sel->offset);
  751. reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
  752. __ccu_write(ccu, sel->offset, reg_val);
  753. /* If the trigger fails we still want to disable the gate */
  754. if (!__clk_trigger(ccu, trig))
  755. ret = -EIO;
  756. /* Disable the clock again if it was disabled to begin with */
  757. if (!enabled && !__clk_gate(ccu, gate, false))
  758. ret = ret ? ret : -ENXIO; /* return first error */
  759. return ret;
  760. }
  761. /*
  762. * Initialize a selector by committing our desired state to hardware
  763. * without the usual checks to see if it's already set up that way.
  764. * Returns true if successful, false otherwise.
  765. */
  766. static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
  767. struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
  768. {
  769. if (!selector_exists(sel))
  770. return true;
  771. return !__sel_commit(ccu, gate, sel, trig);
  772. }
  773. /*
  774. * Write a new value into a selector register to switch to a
  775. * different parent clock. Returns 0 on success, or an error code
  776. * (from __sel_commit()) otherwise.
  777. */
  778. static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
  779. struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
  780. u8 index)
  781. {
  782. unsigned long flags;
  783. u8 previous;
  784. int ret;
  785. previous = sel->clk_index;
  786. if (previous == index)
  787. return 0; /* No change */
  788. sel->clk_index = index;
  789. flags = ccu_lock(ccu);
  790. __ccu_write_enable(ccu);
  791. ret = __sel_commit(ccu, gate, sel, trig);
  792. __ccu_write_disable(ccu);
  793. ccu_unlock(ccu, flags);
  794. if (ret)
  795. sel->clk_index = previous; /* Revert the change */
  796. return ret;
  797. }
  798. /* Clock operations */
  799. static int kona_peri_clk_enable(struct clk_hw *hw)
  800. {
  801. struct kona_clk *bcm_clk = to_kona_clk(hw);
  802. struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
  803. return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true);
  804. }
  805. static void kona_peri_clk_disable(struct clk_hw *hw)
  806. {
  807. struct kona_clk *bcm_clk = to_kona_clk(hw);
  808. struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
  809. (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false);
  810. }
  811. static int kona_peri_clk_is_enabled(struct clk_hw *hw)
  812. {
  813. struct kona_clk *bcm_clk = to_kona_clk(hw);
  814. struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
  815. return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
  816. }
  817. static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
  818. unsigned long parent_rate)
  819. {
  820. struct kona_clk *bcm_clk = to_kona_clk(hw);
  821. struct peri_clk_data *data = bcm_clk->u.peri;
  822. return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
  823. parent_rate);
  824. }
  825. static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
  826. unsigned long *parent_rate)
  827. {
  828. struct kona_clk *bcm_clk = to_kona_clk(hw);
  829. struct bcm_clk_div *div = &bcm_clk->u.peri->div;
  830. if (!divider_exists(div))
  831. return __clk_get_rate(hw->clk);
  832. /* Quietly avoid a zero rate */
  833. return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
  834. rate ? rate : 1, *parent_rate, NULL);
  835. }
  836. static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
  837. unsigned long min_rate,
  838. unsigned long max_rate,
  839. unsigned long *best_parent_rate, struct clk_hw **best_parent)
  840. {
  841. struct kona_clk *bcm_clk = to_kona_clk(hw);
  842. struct clk *clk = hw->clk;
  843. struct clk *current_parent;
  844. unsigned long parent_rate;
  845. unsigned long best_delta;
  846. unsigned long best_rate;
  847. u32 parent_count;
  848. u32 which;
  849. /*
  850. * If there is no other parent to choose, use the current one.
  851. * Note: We don't honor (or use) CLK_SET_RATE_NO_REPARENT.
  852. */
  853. WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
  854. parent_count = (u32)bcm_clk->init_data.num_parents;
  855. if (parent_count < 2)
  856. return kona_peri_clk_round_rate(hw, rate, best_parent_rate);
  857. /* Unless we can do better, stick with current parent */
  858. current_parent = clk_get_parent(clk);
  859. parent_rate = __clk_get_rate(current_parent);
  860. best_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
  861. best_delta = abs(best_rate - rate);
  862. /* Check whether any other parent clock can produce a better result */
  863. for (which = 0; which < parent_count; which++) {
  864. struct clk *parent = clk_get_parent_by_index(clk, which);
  865. unsigned long delta;
  866. unsigned long other_rate;
  867. BUG_ON(!parent);
  868. if (parent == current_parent)
  869. continue;
  870. /* We don't support CLK_SET_RATE_PARENT */
  871. parent_rate = __clk_get_rate(parent);
  872. other_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
  873. delta = abs(other_rate - rate);
  874. if (delta < best_delta) {
  875. best_delta = delta;
  876. best_rate = other_rate;
  877. *best_parent = __clk_get_hw(parent);
  878. *best_parent_rate = parent_rate;
  879. }
  880. }
  881. return best_rate;
  882. }
  883. static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
  884. {
  885. struct kona_clk *bcm_clk = to_kona_clk(hw);
  886. struct peri_clk_data *data = bcm_clk->u.peri;
  887. struct bcm_clk_sel *sel = &data->sel;
  888. struct bcm_clk_trig *trig;
  889. int ret;
  890. BUG_ON(index >= sel->parent_count);
  891. /* If there's only one parent we don't require a selector */
  892. if (!selector_exists(sel))
  893. return 0;
  894. /*
  895. * The regular trigger is used by default, but if there's a
  896. * pre-trigger we want to use that instead.
  897. */
  898. trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
  899. : &data->trig;
  900. ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
  901. if (ret == -ENXIO) {
  902. pr_err("%s: gating failure for %s\n", __func__,
  903. bcm_clk->init_data.name);
  904. ret = -EIO; /* Don't proliferate weird errors */
  905. } else if (ret == -EIO) {
  906. pr_err("%s: %strigger failed for %s\n", __func__,
  907. trig == &data->pre_trig ? "pre-" : "",
  908. bcm_clk->init_data.name);
  909. }
  910. return ret;
  911. }
  912. static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
  913. {
  914. struct kona_clk *bcm_clk = to_kona_clk(hw);
  915. struct peri_clk_data *data = bcm_clk->u.peri;
  916. u8 index;
  917. index = selector_read_index(bcm_clk->ccu, &data->sel);
  918. /* Not all callers would handle an out-of-range value gracefully */
  919. return index == BAD_CLK_INDEX ? 0 : index;
  920. }
  921. static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
  922. unsigned long parent_rate)
  923. {
  924. struct kona_clk *bcm_clk = to_kona_clk(hw);
  925. struct peri_clk_data *data = bcm_clk->u.peri;
  926. struct bcm_clk_div *div = &data->div;
  927. u64 scaled_div = 0;
  928. int ret;
  929. if (parent_rate > (unsigned long)LONG_MAX)
  930. return -EINVAL;
  931. if (rate == __clk_get_rate(hw->clk))
  932. return 0;
  933. if (!divider_exists(div))
  934. return rate == parent_rate ? 0 : -EINVAL;
  935. /*
  936. * A fixed divider can't be changed. (Nor can a fixed
  937. * pre-divider be, but for now we never actually try to
  938. * change that.) Tolerate a request for a no-op change.
  939. */
  940. if (divider_is_fixed(&data->div))
  941. return rate == parent_rate ? 0 : -EINVAL;
  942. /*
  943. * Get the scaled divisor value needed to achieve a clock
  944. * rate as close as possible to what was requested, given
  945. * the parent clock rate supplied.
  946. */
  947. (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
  948. rate ? rate : 1, parent_rate, &scaled_div);
  949. /*
  950. * We aren't updating any pre-divider at this point, so
  951. * we'll use the regular trigger.
  952. */
  953. ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
  954. &data->trig, scaled_div);
  955. if (ret == -ENXIO) {
  956. pr_err("%s: gating failure for %s\n", __func__,
  957. bcm_clk->init_data.name);
  958. ret = -EIO; /* Don't proliferate weird errors */
  959. } else if (ret == -EIO) {
  960. pr_err("%s: trigger failed for %s\n", __func__,
  961. bcm_clk->init_data.name);
  962. }
  963. return ret;
  964. }
  965. struct clk_ops kona_peri_clk_ops = {
  966. .enable = kona_peri_clk_enable,
  967. .disable = kona_peri_clk_disable,
  968. .is_enabled = kona_peri_clk_is_enabled,
  969. .recalc_rate = kona_peri_clk_recalc_rate,
  970. .determine_rate = kona_peri_clk_determine_rate,
  971. .set_parent = kona_peri_clk_set_parent,
  972. .get_parent = kona_peri_clk_get_parent,
  973. .set_rate = kona_peri_clk_set_rate,
  974. };
  975. /* Put a peripheral clock into its initial state */
  976. static bool __peri_clk_init(struct kona_clk *bcm_clk)
  977. {
  978. struct ccu_data *ccu = bcm_clk->ccu;
  979. struct peri_clk_data *peri = bcm_clk->u.peri;
  980. const char *name = bcm_clk->init_data.name;
  981. struct bcm_clk_trig *trig;
  982. BUG_ON(bcm_clk->type != bcm_clk_peri);
  983. if (!policy_init(ccu, &peri->policy)) {
  984. pr_err("%s: error initializing policy for %s\n",
  985. __func__, name);
  986. return false;
  987. }
  988. if (!gate_init(ccu, &peri->gate)) {
  989. pr_err("%s: error initializing gate for %s\n", __func__, name);
  990. return false;
  991. }
  992. if (!hyst_init(ccu, &peri->hyst)) {
  993. pr_err("%s: error initializing hyst for %s\n", __func__, name);
  994. return false;
  995. }
  996. if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
  997. pr_err("%s: error initializing divider for %s\n", __func__,
  998. name);
  999. return false;
  1000. }
  1001. /*
  1002. * For the pre-divider and selector, the pre-trigger is used
  1003. * if it's present, otherwise we just use the regular trigger.
  1004. */
  1005. trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
  1006. : &peri->trig;
  1007. if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
  1008. pr_err("%s: error initializing pre-divider for %s\n", __func__,
  1009. name);
  1010. return false;
  1011. }
  1012. if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
  1013. pr_err("%s: error initializing selector for %s\n", __func__,
  1014. name);
  1015. return false;
  1016. }
  1017. return true;
  1018. }
  1019. static bool __kona_clk_init(struct kona_clk *bcm_clk)
  1020. {
  1021. switch (bcm_clk->type) {
  1022. case bcm_clk_peri:
  1023. return __peri_clk_init(bcm_clk);
  1024. default:
  1025. BUG();
  1026. }
  1027. return false;
  1028. }
  1029. /* Set a CCU and all its clocks into their desired initial state */
  1030. bool __init kona_ccu_init(struct ccu_data *ccu)
  1031. {
  1032. unsigned long flags;
  1033. unsigned int which;
  1034. struct clk **clks = ccu->clk_data.clks;
  1035. bool success = true;
  1036. flags = ccu_lock(ccu);
  1037. __ccu_write_enable(ccu);
  1038. for (which = 0; which < ccu->clk_data.clk_num; which++) {
  1039. struct kona_clk *bcm_clk;
  1040. if (!clks[which])
  1041. continue;
  1042. bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
  1043. success &= __kona_clk_init(bcm_clk);
  1044. }
  1045. __ccu_write_disable(ccu);
  1046. ccu_unlock(ccu, flags);
  1047. return success;
  1048. }