clk-core.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032
  1. /*
  2. * Purna Chandra Mandal,<purna.mandal@microchip.com>
  3. * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
  4. *
  5. * This program is free software; you can distribute it and/or modify it
  6. * under the terms of the GNU General Public License (Version 2) as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  12. * for more details.
  13. */
  14. #include <linux/clk-provider.h>
  15. #include <linux/delay.h>
  16. #include <linux/device.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/iopoll.h>
  19. #include <asm/mach-pic32/pic32.h>
  20. #include <asm/traps.h>
  21. #include "clk-core.h"
  22. /* OSCCON Reg fields */
  23. #define OSC_CUR_MASK 0x07
  24. #define OSC_CUR_SHIFT 12
  25. #define OSC_NEW_MASK 0x07
  26. #define OSC_NEW_SHIFT 8
  27. #define OSC_SWEN BIT(0)
  28. /* SPLLCON Reg fields */
  29. #define PLL_RANGE_MASK 0x07
  30. #define PLL_RANGE_SHIFT 0
  31. #define PLL_ICLK_MASK 0x01
  32. #define PLL_ICLK_SHIFT 7
  33. #define PLL_IDIV_MASK 0x07
  34. #define PLL_IDIV_SHIFT 8
  35. #define PLL_ODIV_MASK 0x07
  36. #define PLL_ODIV_SHIFT 24
  37. #define PLL_MULT_MASK 0x7F
  38. #define PLL_MULT_SHIFT 16
  39. #define PLL_MULT_MAX 128
  40. #define PLL_ODIV_MIN 1
  41. #define PLL_ODIV_MAX 5
  42. /* Peripheral Bus Clock Reg Fields */
  43. #define PB_DIV_MASK 0x7f
  44. #define PB_DIV_SHIFT 0
  45. #define PB_DIV_READY BIT(11)
  46. #define PB_DIV_ENABLE BIT(15)
  47. #define PB_DIV_MAX 128
  48. #define PB_DIV_MIN 0
  49. /* Reference Oscillator Control Reg fields */
  50. #define REFO_SEL_MASK 0x0f
  51. #define REFO_SEL_SHIFT 0
  52. #define REFO_ACTIVE BIT(8)
  53. #define REFO_DIVSW_EN BIT(9)
  54. #define REFO_OE BIT(12)
  55. #define REFO_ON BIT(15)
  56. #define REFO_DIV_SHIFT 16
  57. #define REFO_DIV_MASK 0x7fff
  58. /* Reference Oscillator Trim Register Fields */
  59. #define REFO_TRIM_REG 0x10
  60. #define REFO_TRIM_MASK 0x1ff
  61. #define REFO_TRIM_SHIFT 23
  62. #define REFO_TRIM_MAX 511
  63. /* Mux Slew Control Register fields */
  64. #define SLEW_BUSY BIT(0)
  65. #define SLEW_DOWNEN BIT(1)
  66. #define SLEW_UPEN BIT(2)
  67. #define SLEW_DIV 0x07
  68. #define SLEW_DIV_SHIFT 8
  69. #define SLEW_SYSDIV 0x0f
  70. #define SLEW_SYSDIV_SHIFT 20
  71. /* Clock Poll Timeout */
  72. #define LOCK_TIMEOUT_US USEC_PER_MSEC
  73. /* SoC specific clock needed during SPLL clock rate switch */
  74. static struct clk_hw *pic32_sclk_hw;
  75. /* add instruction pipeline delay while CPU clock is in-transition. */
  76. #define cpu_nop5() \
  77. do { \
  78. __asm__ __volatile__("nop"); \
  79. __asm__ __volatile__("nop"); \
  80. __asm__ __volatile__("nop"); \
  81. __asm__ __volatile__("nop"); \
  82. __asm__ __volatile__("nop"); \
  83. } while (0)
  84. /* Perpheral bus clocks */
  85. struct pic32_periph_clk {
  86. struct clk_hw hw;
  87. void __iomem *ctrl_reg;
  88. struct pic32_clk_common *core;
  89. };
  90. #define clkhw_to_pbclk(_hw) container_of(_hw, struct pic32_periph_clk, hw)
  91. static int pbclk_is_enabled(struct clk_hw *hw)
  92. {
  93. struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
  94. return readl(pb->ctrl_reg) & PB_DIV_ENABLE;
  95. }
  96. static int pbclk_enable(struct clk_hw *hw)
  97. {
  98. struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
  99. writel(PB_DIV_ENABLE, PIC32_SET(pb->ctrl_reg));
  100. return 0;
  101. }
  102. static void pbclk_disable(struct clk_hw *hw)
  103. {
  104. struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
  105. writel(PB_DIV_ENABLE, PIC32_CLR(pb->ctrl_reg));
  106. }
  107. static unsigned long calc_best_divided_rate(unsigned long rate,
  108. unsigned long parent_rate,
  109. u32 divider_max,
  110. u32 divider_min)
  111. {
  112. unsigned long divided_rate, divided_rate_down, best_rate;
  113. unsigned long div, div_up;
  114. /* eq. clk_rate = parent_rate / divider.
  115. *
  116. * Find best divider to produce closest of target divided rate.
  117. */
  118. div = parent_rate / rate;
  119. div = clamp_val(div, divider_min, divider_max);
  120. div_up = clamp_val(div + 1, divider_min, divider_max);
  121. divided_rate = parent_rate / div;
  122. divided_rate_down = parent_rate / div_up;
  123. if (abs(rate - divided_rate_down) < abs(rate - divided_rate))
  124. best_rate = divided_rate_down;
  125. else
  126. best_rate = divided_rate;
  127. return best_rate;
  128. }
  129. static inline u32 pbclk_read_pbdiv(struct pic32_periph_clk *pb)
  130. {
  131. return ((readl(pb->ctrl_reg) >> PB_DIV_SHIFT) & PB_DIV_MASK) + 1;
  132. }
  133. static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
  134. unsigned long parent_rate)
  135. {
  136. struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
  137. return parent_rate / pbclk_read_pbdiv(pb);
  138. }
  139. static long pbclk_round_rate(struct clk_hw *hw, unsigned long rate,
  140. unsigned long *parent_rate)
  141. {
  142. return calc_best_divided_rate(rate, *parent_rate,
  143. PB_DIV_MAX, PB_DIV_MIN);
  144. }
  145. static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
  146. unsigned long parent_rate)
  147. {
  148. struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
  149. unsigned long flags;
  150. u32 v, div;
  151. int err;
  152. /* check & wait for DIV_READY */
  153. err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
  154. 1, LOCK_TIMEOUT_US);
  155. if (err)
  156. return err;
  157. /* calculate clkdiv and best rate */
  158. div = DIV_ROUND_CLOSEST(parent_rate, rate);
  159. spin_lock_irqsave(&pb->core->reg_lock, flags);
  160. /* apply new div */
  161. v = readl(pb->ctrl_reg);
  162. v &= ~PB_DIV_MASK;
  163. v |= (div - 1);
  164. pic32_syskey_unlock();
  165. writel(v, pb->ctrl_reg);
  166. spin_unlock_irqrestore(&pb->core->reg_lock, flags);
  167. /* wait again for DIV_READY */
  168. err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
  169. 1, LOCK_TIMEOUT_US);
  170. if (err)
  171. return err;
  172. /* confirm that new div is applied correctly */
  173. return (pbclk_read_pbdiv(pb) == div) ? 0 : -EBUSY;
  174. }
  175. const struct clk_ops pic32_pbclk_ops = {
  176. .enable = pbclk_enable,
  177. .disable = pbclk_disable,
  178. .is_enabled = pbclk_is_enabled,
  179. .recalc_rate = pbclk_recalc_rate,
  180. .round_rate = pbclk_round_rate,
  181. .set_rate = pbclk_set_rate,
  182. };
  183. struct clk *pic32_periph_clk_register(const struct pic32_periph_clk_data *desc,
  184. struct pic32_clk_common *core)
  185. {
  186. struct pic32_periph_clk *pbclk;
  187. struct clk *clk;
  188. pbclk = devm_kzalloc(core->dev, sizeof(*pbclk), GFP_KERNEL);
  189. if (!pbclk)
  190. return ERR_PTR(-ENOMEM);
  191. pbclk->hw.init = &desc->init_data;
  192. pbclk->core = core;
  193. pbclk->ctrl_reg = desc->ctrl_reg + core->iobase;
  194. clk = devm_clk_register(core->dev, &pbclk->hw);
  195. if (IS_ERR(clk)) {
  196. dev_err(core->dev, "%s: clk_register() failed\n", __func__);
  197. devm_kfree(core->dev, pbclk);
  198. }
  199. return clk;
  200. }
  201. /* Reference oscillator operations */
  202. struct pic32_ref_osc {
  203. struct clk_hw hw;
  204. void __iomem *ctrl_reg;
  205. const u32 *parent_map;
  206. struct pic32_clk_common *core;
  207. };
  208. #define clkhw_to_refosc(_hw) container_of(_hw, struct pic32_ref_osc, hw)
  209. static int roclk_is_enabled(struct clk_hw *hw)
  210. {
  211. struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
  212. return readl(refo->ctrl_reg) & REFO_ON;
  213. }
  214. static int roclk_enable(struct clk_hw *hw)
  215. {
  216. struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
  217. writel(REFO_ON | REFO_OE, PIC32_SET(refo->ctrl_reg));
  218. return 0;
  219. }
  220. static void roclk_disable(struct clk_hw *hw)
  221. {
  222. struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
  223. writel(REFO_ON | REFO_OE, PIC32_CLR(refo->ctrl_reg));
  224. }
  225. static void roclk_init(struct clk_hw *hw)
  226. {
  227. /* initialize clock in disabled state */
  228. roclk_disable(hw);
  229. }
  230. static u8 roclk_get_parent(struct clk_hw *hw)
  231. {
  232. struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
  233. u32 v, i;
  234. v = (readl(refo->ctrl_reg) >> REFO_SEL_SHIFT) & REFO_SEL_MASK;
  235. if (!refo->parent_map)
  236. return v;
  237. for (i = 0; i < clk_hw_get_num_parents(hw); i++)
  238. if (refo->parent_map[i] == v)
  239. return i;
  240. return -EINVAL;
  241. }
  242. static unsigned long roclk_calc_rate(unsigned long parent_rate,
  243. u32 rodiv, u32 rotrim)
  244. {
  245. u64 rate64;
  246. /* fout = fin / [2 * {div + (trim / 512)}]
  247. * = fin * 512 / [1024 * div + 2 * trim]
  248. * = fin * 256 / (512 * div + trim)
  249. * = (fin << 8) / ((div << 9) + trim)
  250. */
  251. if (rotrim) {
  252. rodiv = (rodiv << 9) + rotrim;
  253. rate64 = parent_rate;
  254. rate64 <<= 8;
  255. do_div(rate64, rodiv);
  256. } else if (rodiv) {
  257. rate64 = parent_rate / (rodiv << 1);
  258. } else {
  259. rate64 = parent_rate;
  260. }
  261. return rate64;
  262. }
  263. static void roclk_calc_div_trim(unsigned long rate,
  264. unsigned long parent_rate,
  265. u32 *rodiv_p, u32 *rotrim_p)
  266. {
  267. u32 div, rotrim, rodiv;
  268. u64 frac;
  269. /* Find integer approximation of floating-point arithmetic.
  270. * fout = fin / [2 * {rodiv + (rotrim / 512)}] ... (1)
  271. * i.e. fout = fin / 2 * DIV
  272. * whereas DIV = rodiv + (rotrim / 512)
  273. *
  274. * Since kernel does not perform floating-point arithmatic so
  275. * (rotrim/512) will be zero. And DIV & rodiv will result same.
  276. *
  277. * ie. fout = (fin * 256) / [(512 * rodiv) + rotrim] ... from (1)
  278. * ie. rotrim = ((fin * 256) / fout) - (512 * DIV)
  279. */
  280. if (parent_rate <= rate) {
  281. div = 0;
  282. frac = 0;
  283. rodiv = 0;
  284. rotrim = 0;
  285. } else {
  286. div = parent_rate / (rate << 1);
  287. frac = parent_rate;
  288. frac <<= 8;
  289. do_div(frac, rate);
  290. frac -= (u64)(div << 9);
  291. rodiv = (div > REFO_DIV_MASK) ? REFO_DIV_MASK : div;
  292. rotrim = (frac >= REFO_TRIM_MAX) ? REFO_TRIM_MAX : frac;
  293. }
  294. if (rodiv_p)
  295. *rodiv_p = rodiv;
  296. if (rotrim_p)
  297. *rotrim_p = rotrim;
  298. }
  299. static unsigned long roclk_recalc_rate(struct clk_hw *hw,
  300. unsigned long parent_rate)
  301. {
  302. struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
  303. u32 v, rodiv, rotrim;
  304. /* get rodiv */
  305. v = readl(refo->ctrl_reg);
  306. rodiv = (v >> REFO_DIV_SHIFT) & REFO_DIV_MASK;
  307. /* get trim */
  308. v = readl(refo->ctrl_reg + REFO_TRIM_REG);
  309. rotrim = (v >> REFO_TRIM_SHIFT) & REFO_TRIM_MASK;
  310. return roclk_calc_rate(parent_rate, rodiv, rotrim);
  311. }
  312. static long roclk_round_rate(struct clk_hw *hw, unsigned long rate,
  313. unsigned long *parent_rate)
  314. {
  315. u32 rotrim, rodiv;
  316. /* calculate dividers for new rate */
  317. roclk_calc_div_trim(rate, *parent_rate, &rodiv, &rotrim);
  318. /* caclulate new rate (rounding) based on new rodiv & rotrim */
  319. return roclk_calc_rate(*parent_rate, rodiv, rotrim);
  320. }
  321. static int roclk_determine_rate(struct clk_hw *hw,
  322. struct clk_rate_request *req)
  323. {
  324. struct clk_hw *parent_clk, *best_parent_clk = NULL;
  325. unsigned int i, delta, best_delta = -1;
  326. unsigned long parent_rate, best_parent_rate = 0;
  327. unsigned long best = 0, nearest_rate;
  328. /* find a parent which can generate nearest clkrate >= rate */
  329. for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
  330. /* get parent */
  331. parent_clk = clk_hw_get_parent_by_index(hw, i);
  332. if (!parent_clk)
  333. continue;
  334. /* skip if parent runs slower than target rate */
  335. parent_rate = clk_hw_get_rate(parent_clk);
  336. if (req->rate > parent_rate)
  337. continue;
  338. nearest_rate = roclk_round_rate(hw, req->rate, &parent_rate);
  339. delta = abs(nearest_rate - req->rate);
  340. if ((nearest_rate >= req->rate) && (delta < best_delta)) {
  341. best_parent_clk = parent_clk;
  342. best_parent_rate = parent_rate;
  343. best = nearest_rate;
  344. best_delta = delta;
  345. if (delta == 0)
  346. break;
  347. }
  348. }
  349. /* if no match found, retain old rate */
  350. if (!best_parent_clk) {
  351. pr_err("%s:%s, no parent found for rate %lu.\n",
  352. __func__, clk_hw_get_name(hw), req->rate);
  353. return clk_hw_get_rate(hw);
  354. }
  355. pr_debug("%s,rate %lu, best_parent(%s, %lu), best %lu, delta %d\n",
  356. clk_hw_get_name(hw), req->rate,
  357. clk_hw_get_name(best_parent_clk), best_parent_rate,
  358. best, best_delta);
  359. if (req->best_parent_rate)
  360. req->best_parent_rate = best_parent_rate;
  361. if (req->best_parent_hw)
  362. req->best_parent_hw = best_parent_clk;
  363. return best;
  364. }
  365. static int roclk_set_parent(struct clk_hw *hw, u8 index)
  366. {
  367. struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
  368. unsigned long flags;
  369. u32 v;
  370. int err;
  371. if (refo->parent_map)
  372. index = refo->parent_map[index];
  373. /* wait until ACTIVE bit is zero or timeout */
  374. err = readl_poll_timeout(refo->ctrl_reg, v, !(v & REFO_ACTIVE),
  375. 1, LOCK_TIMEOUT_US);
  376. if (err) {
  377. pr_err("%s: poll failed, clk active\n", clk_hw_get_name(hw));
  378. return err;
  379. }
  380. spin_lock_irqsave(&refo->core->reg_lock, flags);
  381. pic32_syskey_unlock();
  382. /* calculate & apply new */
  383. v = readl(refo->ctrl_reg);
  384. v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
  385. v |= index << REFO_SEL_SHIFT;
  386. writel(v, refo->ctrl_reg);
  387. spin_unlock_irqrestore(&refo->core->reg_lock, flags);
  388. return 0;
  389. }
  390. static int roclk_set_rate_and_parent(struct clk_hw *hw,
  391. unsigned long rate,
  392. unsigned long parent_rate,
  393. u8 index)
  394. {
  395. struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
  396. unsigned long flags;
  397. u32 trim, rodiv, v;
  398. int err;
  399. /* calculate new rodiv & rotrim for new rate */
  400. roclk_calc_div_trim(rate, parent_rate, &rodiv, &trim);
  401. pr_debug("parent_rate = %lu, rate = %lu, div = %d, trim = %d\n",
  402. parent_rate, rate, rodiv, trim);
  403. /* wait till source change is active */
  404. err = readl_poll_timeout(refo->ctrl_reg, v,
  405. !(v & (REFO_ACTIVE | REFO_DIVSW_EN)),
  406. 1, LOCK_TIMEOUT_US);
  407. if (err) {
  408. pr_err("%s: poll timedout, clock is still active\n", __func__);
  409. return err;
  410. }
  411. spin_lock_irqsave(&refo->core->reg_lock, flags);
  412. v = readl(refo->ctrl_reg);
  413. pic32_syskey_unlock();
  414. /* apply parent, if required */
  415. if (refo->parent_map)
  416. index = refo->parent_map[index];
  417. v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
  418. v |= index << REFO_SEL_SHIFT;
  419. /* apply RODIV */
  420. v &= ~(REFO_DIV_MASK << REFO_DIV_SHIFT);
  421. v |= rodiv << REFO_DIV_SHIFT;
  422. writel(v, refo->ctrl_reg);
  423. /* apply ROTRIM */
  424. v = readl(refo->ctrl_reg + REFO_TRIM_REG);
  425. v &= ~(REFO_TRIM_MASK << REFO_TRIM_SHIFT);
  426. v |= trim << REFO_TRIM_SHIFT;
  427. writel(v, refo->ctrl_reg + REFO_TRIM_REG);
  428. /* enable & activate divider switching */
  429. writel(REFO_ON | REFO_DIVSW_EN, PIC32_SET(refo->ctrl_reg));
  430. /* wait till divswen is in-progress */
  431. err = readl_poll_timeout_atomic(refo->ctrl_reg, v, !(v & REFO_DIVSW_EN),
  432. 1, LOCK_TIMEOUT_US);
  433. /* leave the clk gated as it was */
  434. writel(REFO_ON, PIC32_CLR(refo->ctrl_reg));
  435. spin_unlock_irqrestore(&refo->core->reg_lock, flags);
  436. return err;
  437. }
  438. static int roclk_set_rate(struct clk_hw *hw, unsigned long rate,
  439. unsigned long parent_rate)
  440. {
  441. u8 index = roclk_get_parent(hw);
  442. return roclk_set_rate_and_parent(hw, rate, parent_rate, index);
  443. }
  444. const struct clk_ops pic32_roclk_ops = {
  445. .enable = roclk_enable,
  446. .disable = roclk_disable,
  447. .is_enabled = roclk_is_enabled,
  448. .get_parent = roclk_get_parent,
  449. .set_parent = roclk_set_parent,
  450. .determine_rate = roclk_determine_rate,
  451. .recalc_rate = roclk_recalc_rate,
  452. .set_rate_and_parent = roclk_set_rate_and_parent,
  453. .set_rate = roclk_set_rate,
  454. .init = roclk_init,
  455. };
  456. struct clk *pic32_refo_clk_register(const struct pic32_ref_osc_data *data,
  457. struct pic32_clk_common *core)
  458. {
  459. struct pic32_ref_osc *refo;
  460. struct clk *clk;
  461. refo = devm_kzalloc(core->dev, sizeof(*refo), GFP_KERNEL);
  462. if (!refo)
  463. return ERR_PTR(-ENOMEM);
  464. refo->core = core;
  465. refo->hw.init = &data->init_data;
  466. refo->ctrl_reg = data->ctrl_reg + core->iobase;
  467. refo->parent_map = data->parent_map;
  468. clk = devm_clk_register(core->dev, &refo->hw);
  469. if (IS_ERR(clk))
  470. dev_err(core->dev, "%s: clk_register() failed\n", __func__);
  471. return clk;
  472. }
  473. struct pic32_sys_pll {
  474. struct clk_hw hw;
  475. void __iomem *ctrl_reg;
  476. void __iomem *status_reg;
  477. u32 lock_mask;
  478. u32 idiv; /* PLL iclk divider, treated fixed */
  479. struct pic32_clk_common *core;
  480. };
  481. #define clkhw_to_spll(_hw) container_of(_hw, struct pic32_sys_pll, hw)
  482. static inline u32 spll_odiv_to_divider(u32 odiv)
  483. {
  484. odiv = clamp_val(odiv, PLL_ODIV_MIN, PLL_ODIV_MAX);
  485. return 1 << odiv;
  486. }
  487. static unsigned long spll_calc_mult_div(struct pic32_sys_pll *pll,
  488. unsigned long rate,
  489. unsigned long parent_rate,
  490. u32 *mult_p, u32 *odiv_p)
  491. {
  492. u32 mul, div, best_mul = 1, best_div = 1;
  493. unsigned long new_rate, best_rate = rate;
  494. unsigned int best_delta = -1, delta, match_found = 0;
  495. u64 rate64;
  496. parent_rate /= pll->idiv;
  497. for (mul = 1; mul <= PLL_MULT_MAX; mul++) {
  498. for (div = PLL_ODIV_MIN; div <= PLL_ODIV_MAX; div++) {
  499. rate64 = parent_rate;
  500. rate64 *= mul;
  501. do_div(rate64, 1 << div);
  502. new_rate = rate64;
  503. delta = abs(rate - new_rate);
  504. if ((new_rate >= rate) && (delta < best_delta)) {
  505. best_delta = delta;
  506. best_rate = new_rate;
  507. best_mul = mul;
  508. best_div = div;
  509. match_found = 1;
  510. }
  511. }
  512. }
  513. if (!match_found) {
  514. pr_warn("spll: no match found\n");
  515. return 0;
  516. }
  517. pr_debug("rate %lu, par_rate %lu/mult %u, div %u, best_rate %lu\n",
  518. rate, parent_rate, best_mul, best_div, best_rate);
  519. if (mult_p)
  520. *mult_p = best_mul - 1;
  521. if (odiv_p)
  522. *odiv_p = best_div;
  523. return best_rate;
  524. }
  525. static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
  526. unsigned long parent_rate)
  527. {
  528. struct pic32_sys_pll *pll = clkhw_to_spll(hw);
  529. unsigned long pll_in_rate;
  530. u32 mult, odiv, div, v;
  531. u64 rate64;
  532. v = readl(pll->ctrl_reg);
  533. odiv = ((v >> PLL_ODIV_SHIFT) & PLL_ODIV_MASK);
  534. mult = ((v >> PLL_MULT_SHIFT) & PLL_MULT_MASK) + 1;
  535. div = spll_odiv_to_divider(odiv);
  536. /* pll_in_rate = parent_rate / idiv
  537. * pll_out_rate = pll_in_rate * mult / div;
  538. */
  539. pll_in_rate = parent_rate / pll->idiv;
  540. rate64 = pll_in_rate;
  541. rate64 *= mult;
  542. do_div(rate64, div);
  543. return rate64;
  544. }
  545. static long spll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
  546. unsigned long *parent_rate)
  547. {
  548. struct pic32_sys_pll *pll = clkhw_to_spll(hw);
  549. return spll_calc_mult_div(pll, rate, *parent_rate, NULL, NULL);
  550. }
  551. static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
  552. unsigned long parent_rate)
  553. {
  554. struct pic32_sys_pll *pll = clkhw_to_spll(hw);
  555. unsigned long ret, flags;
  556. u32 mult, odiv, v;
  557. int err;
  558. ret = spll_calc_mult_div(pll, rate, parent_rate, &mult, &odiv);
  559. if (!ret)
  560. return -EINVAL;
  561. /*
  562. * We can't change SPLL counters when it is in-active use
  563. * by SYSCLK. So check before applying new counters/rate.
  564. */
  565. /* Is spll_clk active parent of sys_clk ? */
  566. if (unlikely(clk_hw_get_parent(pic32_sclk_hw) == hw)) {
  567. pr_err("%s: failed, clk in-use\n", __func__);
  568. return -EBUSY;
  569. }
  570. spin_lock_irqsave(&pll->core->reg_lock, flags);
  571. /* apply new multiplier & divisor */
  572. v = readl(pll->ctrl_reg);
  573. v &= ~(PLL_MULT_MASK << PLL_MULT_SHIFT);
  574. v &= ~(PLL_ODIV_MASK << PLL_ODIV_SHIFT);
  575. v |= (mult << PLL_MULT_SHIFT) | (odiv << PLL_ODIV_SHIFT);
  576. /* sys unlock before write */
  577. pic32_syskey_unlock();
  578. writel(v, pll->ctrl_reg);
  579. cpu_relax();
  580. /* insert few nops (5-stage) to ensure CPU does not hang */
  581. cpu_nop5();
  582. cpu_nop5();
  583. /* Wait until PLL is locked (maximum 100 usecs). */
  584. err = readl_poll_timeout_atomic(pll->status_reg, v,
  585. v & pll->lock_mask, 1, 100);
  586. spin_unlock_irqrestore(&pll->core->reg_lock, flags);
  587. return err;
  588. }
  589. /* SPLL clock operation */
  590. const struct clk_ops pic32_spll_ops = {
  591. .recalc_rate = spll_clk_recalc_rate,
  592. .round_rate = spll_clk_round_rate,
  593. .set_rate = spll_clk_set_rate,
  594. };
  595. struct clk *pic32_spll_clk_register(const struct pic32_sys_pll_data *data,
  596. struct pic32_clk_common *core)
  597. {
  598. struct pic32_sys_pll *spll;
  599. struct clk *clk;
  600. spll = devm_kzalloc(core->dev, sizeof(*spll), GFP_KERNEL);
  601. if (!spll)
  602. return ERR_PTR(-ENOMEM);
  603. spll->core = core;
  604. spll->hw.init = &data->init_data;
  605. spll->ctrl_reg = data->ctrl_reg + core->iobase;
  606. spll->status_reg = data->status_reg + core->iobase;
  607. spll->lock_mask = data->lock_mask;
  608. /* cache PLL idiv; PLL driver uses it as constant.*/
  609. spll->idiv = (readl(spll->ctrl_reg) >> PLL_IDIV_SHIFT) & PLL_IDIV_MASK;
  610. spll->idiv += 1;
  611. clk = devm_clk_register(core->dev, &spll->hw);
  612. if (IS_ERR(clk))
  613. dev_err(core->dev, "sys_pll: clk_register() failed\n");
  614. return clk;
  615. }
  616. /* System mux clock(aka SCLK) */
  617. struct pic32_sys_clk {
  618. struct clk_hw hw;
  619. void __iomem *mux_reg;
  620. void __iomem *slew_reg;
  621. u32 slew_div;
  622. const u32 *parent_map;
  623. struct pic32_clk_common *core;
  624. };
  625. #define clkhw_to_sys_clk(_hw) container_of(_hw, struct pic32_sys_clk, hw)
  626. static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
  627. {
  628. struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
  629. u32 div;
  630. div = (readl(sclk->slew_reg) >> SLEW_SYSDIV_SHIFT) & SLEW_SYSDIV;
  631. div += 1; /* sys-div to divider */
  632. return parent_rate / div;
  633. }
  634. static long sclk_round_rate(struct clk_hw *hw, unsigned long rate,
  635. unsigned long *parent_rate)
  636. {
  637. return calc_best_divided_rate(rate, *parent_rate, SLEW_SYSDIV, 1);
  638. }
  639. static int sclk_set_rate(struct clk_hw *hw,
  640. unsigned long rate, unsigned long parent_rate)
  641. {
  642. struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
  643. unsigned long flags;
  644. u32 v, div;
  645. int err;
  646. div = parent_rate / rate;
  647. spin_lock_irqsave(&sclk->core->reg_lock, flags);
  648. /* apply new div */
  649. v = readl(sclk->slew_reg);
  650. v &= ~(SLEW_SYSDIV << SLEW_SYSDIV_SHIFT);
  651. v |= (div - 1) << SLEW_SYSDIV_SHIFT;
  652. pic32_syskey_unlock();
  653. writel(v, sclk->slew_reg);
  654. /* wait until BUSY is cleared */
  655. err = readl_poll_timeout_atomic(sclk->slew_reg, v,
  656. !(v & SLEW_BUSY), 1, LOCK_TIMEOUT_US);
  657. spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
  658. return err;
  659. }
  660. static u8 sclk_get_parent(struct clk_hw *hw)
  661. {
  662. struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
  663. u32 i, v;
  664. v = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
  665. if (!sclk->parent_map)
  666. return v;
  667. for (i = 0; i < clk_hw_get_num_parents(hw); i++)
  668. if (sclk->parent_map[i] == v)
  669. return i;
  670. return -EINVAL;
  671. }
  672. static int sclk_set_parent(struct clk_hw *hw, u8 index)
  673. {
  674. struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
  675. unsigned long flags;
  676. u32 nosc, cosc, v;
  677. int err;
  678. spin_lock_irqsave(&sclk->core->reg_lock, flags);
  679. /* find new_osc */
  680. nosc = sclk->parent_map ? sclk->parent_map[index] : index;
  681. /* set new parent */
  682. v = readl(sclk->mux_reg);
  683. v &= ~(OSC_NEW_MASK << OSC_NEW_SHIFT);
  684. v |= nosc << OSC_NEW_SHIFT;
  685. pic32_syskey_unlock();
  686. writel(v, sclk->mux_reg);
  687. /* initate switch */
  688. writel(OSC_SWEN, PIC32_SET(sclk->mux_reg));
  689. cpu_relax();
  690. /* add nop to flush pipeline (as cpu_clk is in-flux) */
  691. cpu_nop5();
  692. /* wait for SWEN bit to clear */
  693. err = readl_poll_timeout_atomic(sclk->slew_reg, v,
  694. !(v & OSC_SWEN), 1, LOCK_TIMEOUT_US);
  695. spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
  696. /*
  697. * SCLK clock-switching logic might reject a clock switching request
  698. * if pre-requisites (like new clk_src not present or unstable) are
  699. * not met.
  700. * So confirm before claiming success.
  701. */
  702. cosc = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
  703. if (cosc != nosc) {
  704. pr_err("%s: err, failed to set_parent() to %d, current %d\n",
  705. clk_hw_get_name(hw), nosc, cosc);
  706. err = -EBUSY;
  707. }
  708. return err;
  709. }
  710. static void sclk_init(struct clk_hw *hw)
  711. {
  712. struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
  713. unsigned long flags;
  714. u32 v;
  715. /* Maintain reference to this clk, required in spll_clk_set_rate() */
  716. pic32_sclk_hw = hw;
  717. /* apply slew divider on both up and down scaling */
  718. if (sclk->slew_div) {
  719. spin_lock_irqsave(&sclk->core->reg_lock, flags);
  720. v = readl(sclk->slew_reg);
  721. v &= ~(SLEW_DIV << SLEW_DIV_SHIFT);
  722. v |= sclk->slew_div << SLEW_DIV_SHIFT;
  723. v |= SLEW_DOWNEN | SLEW_UPEN;
  724. writel(v, sclk->slew_reg);
  725. spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
  726. }
  727. }
  728. /* sclk with post-divider */
  729. const struct clk_ops pic32_sclk_ops = {
  730. .get_parent = sclk_get_parent,
  731. .set_parent = sclk_set_parent,
  732. .round_rate = sclk_round_rate,
  733. .set_rate = sclk_set_rate,
  734. .recalc_rate = sclk_get_rate,
  735. .init = sclk_init,
  736. .determine_rate = __clk_mux_determine_rate,
  737. };
  738. /* sclk with no slew and no post-divider */
  739. const struct clk_ops pic32_sclk_no_div_ops = {
  740. .get_parent = sclk_get_parent,
  741. .set_parent = sclk_set_parent,
  742. .init = sclk_init,
  743. .determine_rate = __clk_mux_determine_rate,
  744. };
  745. struct clk *pic32_sys_clk_register(const struct pic32_sys_clk_data *data,
  746. struct pic32_clk_common *core)
  747. {
  748. struct pic32_sys_clk *sclk;
  749. struct clk *clk;
  750. sclk = devm_kzalloc(core->dev, sizeof(*sclk), GFP_KERNEL);
  751. if (!sclk)
  752. return ERR_PTR(-ENOMEM);
  753. sclk->core = core;
  754. sclk->hw.init = &data->init_data;
  755. sclk->mux_reg = data->mux_reg + core->iobase;
  756. sclk->slew_reg = data->slew_reg + core->iobase;
  757. sclk->slew_div = data->slew_div;
  758. sclk->parent_map = data->parent_map;
  759. clk = devm_clk_register(core->dev, &sclk->hw);
  760. if (IS_ERR(clk))
  761. dev_err(core->dev, "%s: clk register failed\n", __func__);
  762. return clk;
  763. }
  764. /* secondary oscillator */
  765. struct pic32_sec_osc {
  766. struct clk_hw hw;
  767. void __iomem *enable_reg;
  768. void __iomem *status_reg;
  769. u32 enable_mask;
  770. u32 status_mask;
  771. unsigned long fixed_rate;
  772. struct pic32_clk_common *core;
  773. };
  774. #define clkhw_to_sosc(_hw) container_of(_hw, struct pic32_sec_osc, hw)
  775. static int sosc_clk_enable(struct clk_hw *hw)
  776. {
  777. struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
  778. u32 v;
  779. /* enable SOSC */
  780. pic32_syskey_unlock();
  781. writel(sosc->enable_mask, PIC32_SET(sosc->enable_reg));
  782. /* wait till warm-up period expires or ready-status is updated */
  783. return readl_poll_timeout_atomic(sosc->status_reg, v,
  784. v & sosc->status_mask, 1, 100);
  785. }
  786. static void sosc_clk_disable(struct clk_hw *hw)
  787. {
  788. struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
  789. pic32_syskey_unlock();
  790. writel(sosc->enable_mask, PIC32_CLR(sosc->enable_reg));
  791. }
  792. static int sosc_clk_is_enabled(struct clk_hw *hw)
  793. {
  794. struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
  795. u32 enabled, ready;
  796. /* check enabled and ready status */
  797. enabled = readl(sosc->enable_reg) & sosc->enable_mask;
  798. ready = readl(sosc->status_reg) & sosc->status_mask;
  799. return enabled && ready;
  800. }
  801. static unsigned long sosc_clk_calc_rate(struct clk_hw *hw,
  802. unsigned long parent_rate)
  803. {
  804. return clkhw_to_sosc(hw)->fixed_rate;
  805. }
  806. const struct clk_ops pic32_sosc_ops = {
  807. .enable = sosc_clk_enable,
  808. .disable = sosc_clk_disable,
  809. .is_enabled = sosc_clk_is_enabled,
  810. .recalc_rate = sosc_clk_calc_rate,
  811. };
  812. struct clk *pic32_sosc_clk_register(const struct pic32_sec_osc_data *data,
  813. struct pic32_clk_common *core)
  814. {
  815. struct pic32_sec_osc *sosc;
  816. sosc = devm_kzalloc(core->dev, sizeof(*sosc), GFP_KERNEL);
  817. if (!sosc)
  818. return ERR_PTR(-ENOMEM);
  819. sosc->core = core;
  820. sosc->hw.init = &data->init_data;
  821. sosc->fixed_rate = data->fixed_rate;
  822. sosc->enable_mask = data->enable_mask;
  823. sosc->status_mask = data->status_mask;
  824. sosc->enable_reg = data->enable_reg + core->iobase;
  825. sosc->status_reg = data->status_reg + core->iobase;
  826. return devm_clk_register(core->dev, &sosc->hw);
  827. }