clk-mux.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /*
  2. * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  3. * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
  4. * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Simple multiplexer clock implementation
  11. */
  12. #include <linux/clk-provider.h>
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/io.h>
  16. #include <linux/err.h>
  17. /*
  18. * DOC: basic adjustable multiplexer clock that cannot gate
  19. *
  20. * Traits of this clock:
  21. * prepare - clk_prepare only ensures that parents are prepared
  22. * enable - clk_enable only ensures that parents are enabled
  23. * rate - rate is only affected by parent switching. No clk_set_rate support
  24. * parent - parent is adjustable through clk_set_parent
  25. */
  26. int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
  27. unsigned int val)
  28. {
  29. int num_parents = clk_hw_get_num_parents(hw);
  30. if (table) {
  31. int i;
  32. for (i = 0; i < num_parents; i++)
  33. if (table[i] == val)
  34. return i;
  35. return -EINVAL;
  36. }
  37. if (val && (flags & CLK_MUX_INDEX_BIT))
  38. val = ffs(val) - 1;
  39. if (val && (flags & CLK_MUX_INDEX_ONE))
  40. val--;
  41. if (val >= num_parents)
  42. return -EINVAL;
  43. return val;
  44. }
  45. EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
  46. unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
  47. {
  48. unsigned int val = index;
  49. if (table) {
  50. val = table[index];
  51. } else {
  52. if (flags & CLK_MUX_INDEX_BIT)
  53. val = 1 << index;
  54. if (flags & CLK_MUX_INDEX_ONE)
  55. val++;
  56. }
  57. return val;
  58. }
  59. EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
  60. static u8 clk_mux_get_parent(struct clk_hw *hw)
  61. {
  62. struct clk_mux *mux = to_clk_mux(hw);
  63. u32 val;
  64. val = clk_readl(mux->reg) >> mux->shift;
  65. val &= mux->mask;
  66. return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
  67. }
  68. static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
  69. {
  70. struct clk_mux *mux = to_clk_mux(hw);
  71. u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
  72. unsigned long flags = 0;
  73. u32 reg;
  74. if (mux->lock)
  75. spin_lock_irqsave(mux->lock, flags);
  76. else
  77. __acquire(mux->lock);
  78. if (mux->flags & CLK_MUX_HIWORD_MASK) {
  79. reg = mux->mask << (mux->shift + 16);
  80. } else {
  81. reg = clk_readl(mux->reg);
  82. reg &= ~(mux->mask << mux->shift);
  83. }
  84. val = val << mux->shift;
  85. reg |= val;
  86. clk_writel(reg, mux->reg);
  87. if (mux->lock)
  88. spin_unlock_irqrestore(mux->lock, flags);
  89. else
  90. __release(mux->lock);
  91. return 0;
  92. }
  93. static int clk_mux_determine_rate(struct clk_hw *hw,
  94. struct clk_rate_request *req)
  95. {
  96. struct clk_mux *mux = to_clk_mux(hw);
  97. return clk_mux_determine_rate_flags(hw, req, mux->flags);
  98. }
  99. const struct clk_ops clk_mux_ops = {
  100. .get_parent = clk_mux_get_parent,
  101. .set_parent = clk_mux_set_parent,
  102. .determine_rate = clk_mux_determine_rate,
  103. };
  104. EXPORT_SYMBOL_GPL(clk_mux_ops);
  105. const struct clk_ops clk_mux_ro_ops = {
  106. .get_parent = clk_mux_get_parent,
  107. };
  108. EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
  109. struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
  110. const char * const *parent_names, u8 num_parents,
  111. unsigned long flags,
  112. void __iomem *reg, u8 shift, u32 mask,
  113. u8 clk_mux_flags, u32 *table, spinlock_t *lock)
  114. {
  115. struct clk_mux *mux;
  116. struct clk_hw *hw;
  117. struct clk_init_data init;
  118. u8 width = 0;
  119. int ret;
  120. if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
  121. width = fls(mask) - ffs(mask) + 1;
  122. if (width + shift > 16) {
  123. pr_err("mux value exceeds LOWORD field\n");
  124. return ERR_PTR(-EINVAL);
  125. }
  126. }
  127. /* allocate the mux */
  128. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  129. if (!mux)
  130. return ERR_PTR(-ENOMEM);
  131. init.name = name;
  132. if (clk_mux_flags & CLK_MUX_READ_ONLY)
  133. init.ops = &clk_mux_ro_ops;
  134. else
  135. init.ops = &clk_mux_ops;
  136. init.flags = flags | CLK_IS_BASIC;
  137. init.parent_names = parent_names;
  138. init.num_parents = num_parents;
  139. /* struct clk_mux assignments */
  140. mux->reg = reg;
  141. mux->shift = shift;
  142. mux->mask = mask;
  143. mux->flags = clk_mux_flags;
  144. mux->lock = lock;
  145. mux->table = table;
  146. mux->hw.init = &init;
  147. hw = &mux->hw;
  148. ret = clk_hw_register(dev, hw);
  149. if (ret) {
  150. kfree(mux);
  151. hw = ERR_PTR(ret);
  152. }
  153. return hw;
  154. }
  155. EXPORT_SYMBOL_GPL(clk_hw_register_mux_table);
  156. struct clk *clk_register_mux_table(struct device *dev, const char *name,
  157. const char * const *parent_names, u8 num_parents,
  158. unsigned long flags,
  159. void __iomem *reg, u8 shift, u32 mask,
  160. u8 clk_mux_flags, u32 *table, spinlock_t *lock)
  161. {
  162. struct clk_hw *hw;
  163. hw = clk_hw_register_mux_table(dev, name, parent_names, num_parents,
  164. flags, reg, shift, mask, clk_mux_flags,
  165. table, lock);
  166. if (IS_ERR(hw))
  167. return ERR_CAST(hw);
  168. return hw->clk;
  169. }
  170. EXPORT_SYMBOL_GPL(clk_register_mux_table);
  171. struct clk *clk_register_mux(struct device *dev, const char *name,
  172. const char * const *parent_names, u8 num_parents,
  173. unsigned long flags,
  174. void __iomem *reg, u8 shift, u8 width,
  175. u8 clk_mux_flags, spinlock_t *lock)
  176. {
  177. u32 mask = BIT(width) - 1;
  178. return clk_register_mux_table(dev, name, parent_names, num_parents,
  179. flags, reg, shift, mask, clk_mux_flags,
  180. NULL, lock);
  181. }
  182. EXPORT_SYMBOL_GPL(clk_register_mux);
  183. struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
  184. const char * const *parent_names, u8 num_parents,
  185. unsigned long flags,
  186. void __iomem *reg, u8 shift, u8 width,
  187. u8 clk_mux_flags, spinlock_t *lock)
  188. {
  189. u32 mask = BIT(width) - 1;
  190. return clk_hw_register_mux_table(dev, name, parent_names, num_parents,
  191. flags, reg, shift, mask, clk_mux_flags,
  192. NULL, lock);
  193. }
  194. EXPORT_SYMBOL_GPL(clk_hw_register_mux);
  195. void clk_unregister_mux(struct clk *clk)
  196. {
  197. struct clk_mux *mux;
  198. struct clk_hw *hw;
  199. hw = __clk_get_hw(clk);
  200. if (!hw)
  201. return;
  202. mux = to_clk_mux(hw);
  203. clk_unregister(clk);
  204. kfree(mux);
  205. }
  206. EXPORT_SYMBOL_GPL(clk_unregister_mux);
  207. void clk_hw_unregister_mux(struct clk_hw *hw)
  208. {
  209. struct clk_mux *mux;
  210. mux = to_clk_mux(hw);
  211. clk_hw_unregister(hw);
  212. kfree(mux);
  213. }
  214. EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);