clk-mix.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /*
  2. * mmp mix(div and mux) clock operation source file
  3. *
  4. * Copyright (C) 2014 Marvell
  5. * Chao Xie <chao.xie@marvell.com>
  6. *
  7. * This file is licensed under the terms of the GNU General Public
  8. * License version 2. This program is licensed "as is" without any
  9. * warranty of any kind, whether express or implied.
  10. */
  11. #include <linux/clk-provider.h>
  12. #include <linux/slab.h>
  13. #include <linux/io.h>
  14. #include <linux/err.h>
  15. #include "clk.h"
  16. /*
  17. * The mix clock is a clock combined mux and div type clock.
  18. * Because the div field and mux field need to be set at same
  19. * time, we can not divide it into 2 types of clock
  20. */
  21. #define to_clk_mix(hw) container_of(hw, struct mmp_clk_mix, hw)
  22. static unsigned int _get_maxdiv(struct mmp_clk_mix *mix)
  23. {
  24. unsigned int div_mask = (1 << mix->reg_info.width_div) - 1;
  25. unsigned int maxdiv = 0;
  26. struct clk_div_table *clkt;
  27. if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
  28. return div_mask;
  29. if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
  30. return 1 << div_mask;
  31. if (mix->div_table) {
  32. for (clkt = mix->div_table; clkt->div; clkt++)
  33. if (clkt->div > maxdiv)
  34. maxdiv = clkt->div;
  35. return maxdiv;
  36. }
  37. return div_mask + 1;
  38. }
  39. static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
  40. {
  41. struct clk_div_table *clkt;
  42. if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
  43. return val;
  44. if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
  45. return 1 << val;
  46. if (mix->div_table) {
  47. for (clkt = mix->div_table; clkt->div; clkt++)
  48. if (clkt->val == val)
  49. return clkt->div;
  50. if (clkt->div == 0)
  51. return 0;
  52. }
  53. return val + 1;
  54. }
  55. static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
  56. {
  57. int num_parents = __clk_get_num_parents(mix->hw.clk);
  58. int i;
  59. if (mix->mux_flags & CLK_MUX_INDEX_BIT)
  60. return ffs(val) - 1;
  61. if (mix->mux_flags & CLK_MUX_INDEX_ONE)
  62. return val - 1;
  63. if (mix->mux_table) {
  64. for (i = 0; i < num_parents; i++)
  65. if (mix->mux_table[i] == val)
  66. return i;
  67. if (i == num_parents)
  68. return 0;
  69. }
  70. return val;
  71. }
  72. static unsigned int _get_div_val(struct mmp_clk_mix *mix, unsigned int div)
  73. {
  74. struct clk_div_table *clkt;
  75. if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
  76. return div;
  77. if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
  78. return __ffs(div);
  79. if (mix->div_table) {
  80. for (clkt = mix->div_table; clkt->div; clkt++)
  81. if (clkt->div == div)
  82. return clkt->val;
  83. if (clkt->div == 0)
  84. return 0;
  85. }
  86. return div - 1;
  87. }
  88. static unsigned int _get_mux_val(struct mmp_clk_mix *mix, unsigned int mux)
  89. {
  90. if (mix->mux_table)
  91. return mix->mux_table[mux];
  92. return mux;
  93. }
  94. static void _filter_clk_table(struct mmp_clk_mix *mix,
  95. struct mmp_clk_mix_clk_table *table,
  96. unsigned int table_size)
  97. {
  98. int i;
  99. struct mmp_clk_mix_clk_table *item;
  100. struct clk *parent, *clk;
  101. unsigned long parent_rate;
  102. clk = mix->hw.clk;
  103. for (i = 0; i < table_size; i++) {
  104. item = &table[i];
  105. parent = clk_get_parent_by_index(clk, item->parent_index);
  106. parent_rate = __clk_get_rate(parent);
  107. if (parent_rate % item->rate) {
  108. item->valid = 0;
  109. } else {
  110. item->divisor = parent_rate / item->rate;
  111. item->valid = 1;
  112. }
  113. }
  114. }
  115. static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
  116. unsigned int change_mux, unsigned int change_div)
  117. {
  118. struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
  119. u8 width, shift;
  120. u32 mux_div, fc_req;
  121. int ret, timeout = 50;
  122. unsigned long flags = 0;
  123. if (!change_mux && !change_div)
  124. return -EINVAL;
  125. if (mix->lock)
  126. spin_lock_irqsave(mix->lock, flags);
  127. if (mix->type == MMP_CLK_MIX_TYPE_V1
  128. || mix->type == MMP_CLK_MIX_TYPE_V2)
  129. mux_div = readl(ri->reg_clk_ctrl);
  130. else
  131. mux_div = readl(ri->reg_clk_sel);
  132. if (change_div) {
  133. width = ri->width_div;
  134. shift = ri->shift_div;
  135. mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
  136. mux_div |= MMP_CLK_BITS_SET_VAL(div_val, width, shift);
  137. }
  138. if (change_mux) {
  139. width = ri->width_mux;
  140. shift = ri->shift_mux;
  141. mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
  142. mux_div |= MMP_CLK_BITS_SET_VAL(mux_val, width, shift);
  143. }
  144. if (mix->type == MMP_CLK_MIX_TYPE_V1) {
  145. writel(mux_div, ri->reg_clk_ctrl);
  146. } else if (mix->type == MMP_CLK_MIX_TYPE_V2) {
  147. mux_div |= (1 << ri->bit_fc);
  148. writel(mux_div, ri->reg_clk_ctrl);
  149. do {
  150. fc_req = readl(ri->reg_clk_ctrl);
  151. timeout--;
  152. if (!(fc_req & (1 << ri->bit_fc)))
  153. break;
  154. } while (timeout);
  155. if (timeout == 0) {
  156. pr_err("%s:%s cannot do frequency change\n",
  157. __func__, __clk_get_name(mix->hw.clk));
  158. ret = -EBUSY;
  159. goto error;
  160. }
  161. } else {
  162. fc_req = readl(ri->reg_clk_ctrl);
  163. fc_req |= 1 << ri->bit_fc;
  164. writel(fc_req, ri->reg_clk_ctrl);
  165. writel(mux_div, ri->reg_clk_sel);
  166. fc_req &= ~(1 << ri->bit_fc);
  167. }
  168. ret = 0;
  169. error:
  170. if (mix->lock)
  171. spin_unlock_irqrestore(mix->lock, flags);
  172. return ret;
  173. }
  174. static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
  175. unsigned long min_rate,
  176. unsigned long max_rate,
  177. unsigned long *best_parent_rate,
  178. struct clk_hw **best_parent_clk)
  179. {
  180. struct mmp_clk_mix *mix = to_clk_mix(hw);
  181. struct mmp_clk_mix_clk_table *item;
  182. struct clk *parent, *parent_best, *mix_clk;
  183. unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
  184. unsigned long gap, gap_best;
  185. u32 div_val_max;
  186. unsigned int div;
  187. int i, j;
  188. mix_clk = hw->clk;
  189. parent = NULL;
  190. mix_rate_best = 0;
  191. parent_rate_best = 0;
  192. gap_best = rate;
  193. parent_best = NULL;
  194. if (mix->table) {
  195. for (i = 0; i < mix->table_size; i++) {
  196. item = &mix->table[i];
  197. if (item->valid == 0)
  198. continue;
  199. parent = clk_get_parent_by_index(mix_clk,
  200. item->parent_index);
  201. parent_rate = __clk_get_rate(parent);
  202. mix_rate = parent_rate / item->divisor;
  203. gap = abs(mix_rate - rate);
  204. if (parent_best == NULL || gap < gap_best) {
  205. parent_best = parent;
  206. parent_rate_best = parent_rate;
  207. mix_rate_best = mix_rate;
  208. gap_best = gap;
  209. if (gap_best == 0)
  210. goto found;
  211. }
  212. }
  213. } else {
  214. for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
  215. parent = clk_get_parent_by_index(mix_clk, i);
  216. parent_rate = __clk_get_rate(parent);
  217. div_val_max = _get_maxdiv(mix);
  218. for (j = 0; j < div_val_max; j++) {
  219. div = _get_div(mix, j);
  220. mix_rate = parent_rate / div;
  221. gap = abs(mix_rate - rate);
  222. if (parent_best == NULL || gap < gap_best) {
  223. parent_best = parent;
  224. parent_rate_best = parent_rate;
  225. mix_rate_best = mix_rate;
  226. gap_best = gap;
  227. if (gap_best == 0)
  228. goto found;
  229. }
  230. }
  231. }
  232. }
  233. found:
  234. *best_parent_rate = parent_rate_best;
  235. *best_parent_clk = __clk_get_hw(parent_best);
  236. return mix_rate_best;
  237. }
  238. static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
  239. unsigned long rate,
  240. unsigned long parent_rate,
  241. u8 index)
  242. {
  243. struct mmp_clk_mix *mix = to_clk_mix(hw);
  244. unsigned int div;
  245. u32 div_val, mux_val;
  246. div = parent_rate / rate;
  247. div_val = _get_div_val(mix, div);
  248. mux_val = _get_mux_val(mix, index);
  249. return _set_rate(mix, mux_val, div_val, 1, 1);
  250. }
  251. static u8 mmp_clk_mix_get_parent(struct clk_hw *hw)
  252. {
  253. struct mmp_clk_mix *mix = to_clk_mix(hw);
  254. struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
  255. unsigned long flags = 0;
  256. u32 mux_div = 0;
  257. u8 width, shift;
  258. u32 mux_val;
  259. if (mix->lock)
  260. spin_lock_irqsave(mix->lock, flags);
  261. if (mix->type == MMP_CLK_MIX_TYPE_V1
  262. || mix->type == MMP_CLK_MIX_TYPE_V2)
  263. mux_div = readl(ri->reg_clk_ctrl);
  264. else
  265. mux_div = readl(ri->reg_clk_sel);
  266. if (mix->lock)
  267. spin_unlock_irqrestore(mix->lock, flags);
  268. width = mix->reg_info.width_mux;
  269. shift = mix->reg_info.shift_mux;
  270. mux_val = MMP_CLK_BITS_GET_VAL(mux_div, width, shift);
  271. return _get_mux(mix, mux_val);
  272. }
  273. static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw *hw,
  274. unsigned long parent_rate)
  275. {
  276. struct mmp_clk_mix *mix = to_clk_mix(hw);
  277. struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
  278. unsigned long flags = 0;
  279. u32 mux_div = 0;
  280. u8 width, shift;
  281. unsigned int div;
  282. if (mix->lock)
  283. spin_lock_irqsave(mix->lock, flags);
  284. if (mix->type == MMP_CLK_MIX_TYPE_V1
  285. || mix->type == MMP_CLK_MIX_TYPE_V2)
  286. mux_div = readl(ri->reg_clk_ctrl);
  287. else
  288. mux_div = readl(ri->reg_clk_sel);
  289. if (mix->lock)
  290. spin_unlock_irqrestore(mix->lock, flags);
  291. width = mix->reg_info.width_div;
  292. shift = mix->reg_info.shift_div;
  293. div = _get_div(mix, MMP_CLK_BITS_GET_VAL(mux_div, width, shift));
  294. return parent_rate / div;
  295. }
  296. static int mmp_clk_set_parent(struct clk_hw *hw, u8 index)
  297. {
  298. struct mmp_clk_mix *mix = to_clk_mix(hw);
  299. struct mmp_clk_mix_clk_table *item;
  300. int i;
  301. u32 div_val, mux_val;
  302. if (mix->table) {
  303. for (i = 0; i < mix->table_size; i++) {
  304. item = &mix->table[i];
  305. if (item->valid == 0)
  306. continue;
  307. if (item->parent_index == index)
  308. break;
  309. }
  310. if (i < mix->table_size) {
  311. div_val = _get_div_val(mix, item->divisor);
  312. mux_val = _get_mux_val(mix, item->parent_index);
  313. } else
  314. return -EINVAL;
  315. } else {
  316. mux_val = _get_mux_val(mix, index);
  317. div_val = 0;
  318. }
  319. return _set_rate(mix, mux_val, div_val, 1, div_val ? 1 : 0);
  320. }
  321. static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
  322. unsigned long best_parent_rate)
  323. {
  324. struct mmp_clk_mix *mix = to_clk_mix(hw);
  325. struct mmp_clk_mix_clk_table *item;
  326. unsigned long parent_rate;
  327. unsigned int best_divisor;
  328. struct clk *mix_clk, *parent;
  329. int i;
  330. best_divisor = best_parent_rate / rate;
  331. mix_clk = hw->clk;
  332. if (mix->table) {
  333. for (i = 0; i < mix->table_size; i++) {
  334. item = &mix->table[i];
  335. if (item->valid == 0)
  336. continue;
  337. parent = clk_get_parent_by_index(mix_clk,
  338. item->parent_index);
  339. parent_rate = __clk_get_rate(parent);
  340. if (parent_rate == best_parent_rate
  341. && item->divisor == best_divisor)
  342. break;
  343. }
  344. if (i < mix->table_size)
  345. return _set_rate(mix,
  346. _get_mux_val(mix, item->parent_index),
  347. _get_div_val(mix, item->divisor),
  348. 1, 1);
  349. else
  350. return -EINVAL;
  351. } else {
  352. for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
  353. parent = clk_get_parent_by_index(mix_clk, i);
  354. parent_rate = __clk_get_rate(parent);
  355. if (parent_rate == best_parent_rate)
  356. break;
  357. }
  358. if (i < __clk_get_num_parents(mix_clk))
  359. return _set_rate(mix, _get_mux_val(mix, i),
  360. _get_div_val(mix, best_divisor), 1, 1);
  361. else
  362. return -EINVAL;
  363. }
  364. }
  365. static void mmp_clk_mix_init(struct clk_hw *hw)
  366. {
  367. struct mmp_clk_mix *mix = to_clk_mix(hw);
  368. if (mix->table)
  369. _filter_clk_table(mix, mix->table, mix->table_size);
  370. }
  371. const struct clk_ops mmp_clk_mix_ops = {
  372. .determine_rate = mmp_clk_mix_determine_rate,
  373. .set_rate_and_parent = mmp_clk_mix_set_rate_and_parent,
  374. .set_rate = mmp_clk_set_rate,
  375. .set_parent = mmp_clk_set_parent,
  376. .get_parent = mmp_clk_mix_get_parent,
  377. .recalc_rate = mmp_clk_mix_recalc_rate,
  378. .init = mmp_clk_mix_init,
  379. };
  380. struct clk *mmp_clk_register_mix(struct device *dev,
  381. const char *name,
  382. const char **parent_names,
  383. u8 num_parents,
  384. unsigned long flags,
  385. struct mmp_clk_mix_config *config,
  386. spinlock_t *lock)
  387. {
  388. struct mmp_clk_mix *mix;
  389. struct clk *clk;
  390. struct clk_init_data init;
  391. size_t table_bytes;
  392. mix = kzalloc(sizeof(*mix), GFP_KERNEL);
  393. if (!mix) {
  394. pr_err("%s:%s: could not allocate mmp mix clk\n",
  395. __func__, name);
  396. return ERR_PTR(-ENOMEM);
  397. }
  398. init.name = name;
  399. init.flags = flags | CLK_GET_RATE_NOCACHE;
  400. init.parent_names = parent_names;
  401. init.num_parents = num_parents;
  402. init.ops = &mmp_clk_mix_ops;
  403. memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
  404. if (config->table) {
  405. table_bytes = sizeof(*config->table) * config->table_size;
  406. mix->table = kzalloc(table_bytes, GFP_KERNEL);
  407. if (!mix->table) {
  408. pr_err("%s:%s: could not allocate mmp mix table\n",
  409. __func__, name);
  410. kfree(mix);
  411. return ERR_PTR(-ENOMEM);
  412. }
  413. memcpy(mix->table, config->table, table_bytes);
  414. mix->table_size = config->table_size;
  415. }
  416. if (config->mux_table) {
  417. table_bytes = sizeof(u32) * num_parents;
  418. mix->mux_table = kzalloc(table_bytes, GFP_KERNEL);
  419. if (!mix->mux_table) {
  420. pr_err("%s:%s: could not allocate mmp mix mux-table\n",
  421. __func__, name);
  422. kfree(mix->table);
  423. kfree(mix);
  424. return ERR_PTR(-ENOMEM);
  425. }
  426. memcpy(mix->mux_table, config->mux_table, table_bytes);
  427. }
  428. mix->div_flags = config->div_flags;
  429. mix->mux_flags = config->mux_flags;
  430. mix->lock = lock;
  431. mix->hw.init = &init;
  432. if (config->reg_info.bit_fc >= 32)
  433. mix->type = MMP_CLK_MIX_TYPE_V1;
  434. else if (config->reg_info.reg_clk_sel)
  435. mix->type = MMP_CLK_MIX_TYPE_V3;
  436. else
  437. mix->type = MMP_CLK_MIX_TYPE_V2;
  438. clk = clk_register(dev, &mix->hw);
  439. if (IS_ERR(clk)) {
  440. kfree(mix->mux_table);
  441. kfree(mix->table);
  442. kfree(mix);
  443. }
  444. return clk;
  445. }