v4l2-clk.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. * V4L2 clock service
  3. *
  4. * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/atomic.h>
  11. #include <linux/clk.h>
  12. #include <linux/device.h>
  13. #include <linux/errno.h>
  14. #include <linux/list.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/of.h>
  18. #include <linux/slab.h>
  19. #include <linux/string.h>
  20. #include <media/v4l2-clk.h>
  21. #include <media/v4l2-subdev.h>
  22. static DEFINE_MUTEX(clk_lock);
  23. static LIST_HEAD(clk_list);
  24. static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
  25. {
  26. struct v4l2_clk *clk;
  27. list_for_each_entry(clk, &clk_list, list)
  28. if (!strcmp(dev_id, clk->dev_id))
  29. return clk;
  30. return ERR_PTR(-ENODEV);
  31. }
  32. struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
  33. {
  34. struct v4l2_clk *clk;
  35. struct clk *ccf_clk = clk_get(dev, id);
  36. char clk_name[V4L2_CLK_NAME_SIZE];
  37. if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
  38. return ERR_PTR(-EPROBE_DEFER);
  39. if (!IS_ERR_OR_NULL(ccf_clk)) {
  40. clk = kzalloc(sizeof(*clk), GFP_KERNEL);
  41. if (!clk) {
  42. clk_put(ccf_clk);
  43. return ERR_PTR(-ENOMEM);
  44. }
  45. clk->clk = ccf_clk;
  46. return clk;
  47. }
  48. mutex_lock(&clk_lock);
  49. clk = v4l2_clk_find(dev_name(dev));
  50. /* if dev_name is not found, try use the OF name to find again */
  51. if (PTR_ERR(clk) == -ENODEV && dev->of_node) {
  52. v4l2_clk_name_of(clk_name, sizeof(clk_name), dev->of_node);
  53. clk = v4l2_clk_find(clk_name);
  54. }
  55. if (!IS_ERR(clk))
  56. atomic_inc(&clk->use_count);
  57. mutex_unlock(&clk_lock);
  58. return clk;
  59. }
  60. EXPORT_SYMBOL(v4l2_clk_get);
  61. void v4l2_clk_put(struct v4l2_clk *clk)
  62. {
  63. struct v4l2_clk *tmp;
  64. if (IS_ERR(clk))
  65. return;
  66. if (clk->clk) {
  67. clk_put(clk->clk);
  68. kfree(clk);
  69. return;
  70. }
  71. mutex_lock(&clk_lock);
  72. list_for_each_entry(tmp, &clk_list, list)
  73. if (tmp == clk)
  74. atomic_dec(&clk->use_count);
  75. mutex_unlock(&clk_lock);
  76. }
  77. EXPORT_SYMBOL(v4l2_clk_put);
  78. static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
  79. {
  80. struct v4l2_clk *tmp;
  81. int ret = -ENODEV;
  82. mutex_lock(&clk_lock);
  83. list_for_each_entry(tmp, &clk_list, list)
  84. if (tmp == clk) {
  85. ret = !try_module_get(clk->ops->owner);
  86. if (ret)
  87. ret = -EFAULT;
  88. break;
  89. }
  90. mutex_unlock(&clk_lock);
  91. return ret;
  92. }
  93. static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
  94. {
  95. module_put(clk->ops->owner);
  96. }
  97. int v4l2_clk_enable(struct v4l2_clk *clk)
  98. {
  99. int ret;
  100. if (clk->clk)
  101. return clk_prepare_enable(clk->clk);
  102. ret = v4l2_clk_lock_driver(clk);
  103. if (ret < 0)
  104. return ret;
  105. mutex_lock(&clk->lock);
  106. if (++clk->enable == 1 && clk->ops->enable) {
  107. ret = clk->ops->enable(clk);
  108. if (ret < 0)
  109. clk->enable--;
  110. }
  111. mutex_unlock(&clk->lock);
  112. return ret;
  113. }
  114. EXPORT_SYMBOL(v4l2_clk_enable);
  115. /*
  116. * You might Oops if you try to disabled a disabled clock, because then the
  117. * driver isn't locked and could have been unloaded by now, so, don't do that
  118. */
  119. void v4l2_clk_disable(struct v4l2_clk *clk)
  120. {
  121. int enable;
  122. if (clk->clk)
  123. return clk_disable_unprepare(clk->clk);
  124. mutex_lock(&clk->lock);
  125. enable = --clk->enable;
  126. if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
  127. clk->dev_id))
  128. clk->enable++;
  129. else if (!enable && clk->ops->disable)
  130. clk->ops->disable(clk);
  131. mutex_unlock(&clk->lock);
  132. v4l2_clk_unlock_driver(clk);
  133. }
  134. EXPORT_SYMBOL(v4l2_clk_disable);
  135. unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
  136. {
  137. int ret;
  138. if (clk->clk)
  139. return clk_get_rate(clk->clk);
  140. ret = v4l2_clk_lock_driver(clk);
  141. if (ret < 0)
  142. return ret;
  143. mutex_lock(&clk->lock);
  144. if (!clk->ops->get_rate)
  145. ret = -ENOSYS;
  146. else
  147. ret = clk->ops->get_rate(clk);
  148. mutex_unlock(&clk->lock);
  149. v4l2_clk_unlock_driver(clk);
  150. return ret;
  151. }
  152. EXPORT_SYMBOL(v4l2_clk_get_rate);
  153. int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
  154. {
  155. int ret;
  156. if (clk->clk) {
  157. long r = clk_round_rate(clk->clk, rate);
  158. if (r < 0)
  159. return r;
  160. return clk_set_rate(clk->clk, r);
  161. }
  162. ret = v4l2_clk_lock_driver(clk);
  163. if (ret < 0)
  164. return ret;
  165. mutex_lock(&clk->lock);
  166. if (!clk->ops->set_rate)
  167. ret = -ENOSYS;
  168. else
  169. ret = clk->ops->set_rate(clk, rate);
  170. mutex_unlock(&clk->lock);
  171. v4l2_clk_unlock_driver(clk);
  172. return ret;
  173. }
  174. EXPORT_SYMBOL(v4l2_clk_set_rate);
  175. struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
  176. const char *dev_id,
  177. void *priv)
  178. {
  179. struct v4l2_clk *clk;
  180. int ret;
  181. if (!ops || !dev_id)
  182. return ERR_PTR(-EINVAL);
  183. clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
  184. if (!clk)
  185. return ERR_PTR(-ENOMEM);
  186. clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
  187. if (!clk->dev_id) {
  188. ret = -ENOMEM;
  189. goto ealloc;
  190. }
  191. clk->ops = ops;
  192. clk->priv = priv;
  193. atomic_set(&clk->use_count, 0);
  194. mutex_init(&clk->lock);
  195. mutex_lock(&clk_lock);
  196. if (!IS_ERR(v4l2_clk_find(dev_id))) {
  197. mutex_unlock(&clk_lock);
  198. ret = -EEXIST;
  199. goto eexist;
  200. }
  201. list_add_tail(&clk->list, &clk_list);
  202. mutex_unlock(&clk_lock);
  203. return clk;
  204. eexist:
  205. ealloc:
  206. kfree(clk->dev_id);
  207. kfree(clk);
  208. return ERR_PTR(ret);
  209. }
  210. EXPORT_SYMBOL(v4l2_clk_register);
  211. void v4l2_clk_unregister(struct v4l2_clk *clk)
  212. {
  213. if (WARN(atomic_read(&clk->use_count),
  214. "%s(): Refusing to unregister ref-counted %s clock!\n",
  215. __func__, clk->dev_id))
  216. return;
  217. mutex_lock(&clk_lock);
  218. list_del(&clk->list);
  219. mutex_unlock(&clk_lock);
  220. kfree(clk->dev_id);
  221. kfree(clk);
  222. }
  223. EXPORT_SYMBOL(v4l2_clk_unregister);
  224. struct v4l2_clk_fixed {
  225. unsigned long rate;
  226. struct v4l2_clk_ops ops;
  227. };
  228. static unsigned long fixed_get_rate(struct v4l2_clk *clk)
  229. {
  230. struct v4l2_clk_fixed *priv = clk->priv;
  231. return priv->rate;
  232. }
  233. struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
  234. unsigned long rate, struct module *owner)
  235. {
  236. struct v4l2_clk *clk;
  237. struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  238. if (!priv)
  239. return ERR_PTR(-ENOMEM);
  240. priv->rate = rate;
  241. priv->ops.get_rate = fixed_get_rate;
  242. priv->ops.owner = owner;
  243. clk = v4l2_clk_register(&priv->ops, dev_id, priv);
  244. if (IS_ERR(clk))
  245. kfree(priv);
  246. return clk;
  247. }
  248. EXPORT_SYMBOL(__v4l2_clk_register_fixed);
  249. void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
  250. {
  251. kfree(clk->priv);
  252. v4l2_clk_unregister(clk);
  253. }
  254. EXPORT_SYMBOL(v4l2_clk_unregister_fixed);