clk-gate.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * mmp gate clock operation source file
  3. *
  4. * Copyright (C) 2014 Marvell
  5. * Chao Xie <chao.xie@marvell.com>
  6. *
  7. * This file is licensed under the terms of the GNU General Public
  8. * License version 2. This program is licensed "as is" without any
  9. * warranty of any kind, whether express or implied.
  10. */
  11. #include <linux/clk-provider.h>
  12. #include <linux/slab.h>
  13. #include <linux/io.h>
  14. #include <linux/err.h>
  15. #include <linux/delay.h>
  16. #include "clk.h"
  17. /*
  18. * Some clocks will have mutiple bits to enable the clocks, and
  19. * the bits to disable the clock is not same as enabling bits.
  20. */
  21. #define to_clk_mmp_gate(hw) container_of(hw, struct mmp_clk_gate, hw)
  22. static int mmp_clk_gate_enable(struct clk_hw *hw)
  23. {
  24. struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
  25. struct clk *clk = hw->clk;
  26. unsigned long flags = 0;
  27. unsigned long rate;
  28. u32 tmp;
  29. if (gate->lock)
  30. spin_lock_irqsave(gate->lock, flags);
  31. tmp = readl(gate->reg);
  32. tmp &= ~gate->mask;
  33. tmp |= gate->val_enable;
  34. writel(tmp, gate->reg);
  35. if (gate->lock)
  36. spin_unlock_irqrestore(gate->lock, flags);
  37. if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
  38. rate = __clk_get_rate(clk);
  39. /* Need delay 2 cycles. */
  40. udelay(2000000/rate);
  41. }
  42. return 0;
  43. }
  44. static void mmp_clk_gate_disable(struct clk_hw *hw)
  45. {
  46. struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
  47. unsigned long flags = 0;
  48. u32 tmp;
  49. if (gate->lock)
  50. spin_lock_irqsave(gate->lock, flags);
  51. tmp = readl(gate->reg);
  52. tmp &= ~gate->mask;
  53. tmp |= gate->val_disable;
  54. writel(tmp, gate->reg);
  55. if (gate->lock)
  56. spin_unlock_irqrestore(gate->lock, flags);
  57. }
  58. static int mmp_clk_gate_is_enabled(struct clk_hw *hw)
  59. {
  60. struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
  61. unsigned long flags = 0;
  62. u32 tmp;
  63. if (gate->lock)
  64. spin_lock_irqsave(gate->lock, flags);
  65. tmp = readl(gate->reg);
  66. if (gate->lock)
  67. spin_unlock_irqrestore(gate->lock, flags);
  68. return (tmp & gate->mask) == gate->val_enable;
  69. }
  70. const struct clk_ops mmp_clk_gate_ops = {
  71. .enable = mmp_clk_gate_enable,
  72. .disable = mmp_clk_gate_disable,
  73. .is_enabled = mmp_clk_gate_is_enabled,
  74. };
  75. struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
  76. const char *parent_name, unsigned long flags,
  77. void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable,
  78. unsigned int gate_flags, spinlock_t *lock)
  79. {
  80. struct mmp_clk_gate *gate;
  81. struct clk *clk;
  82. struct clk_init_data init;
  83. /* allocate the gate */
  84. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  85. if (!gate) {
  86. pr_err("%s:%s could not allocate gate clk\n", __func__, name);
  87. return ERR_PTR(-ENOMEM);
  88. }
  89. init.name = name;
  90. init.ops = &mmp_clk_gate_ops;
  91. init.flags = flags | CLK_IS_BASIC;
  92. init.parent_names = (parent_name ? &parent_name : NULL);
  93. init.num_parents = (parent_name ? 1 : 0);
  94. /* struct clk_gate assignments */
  95. gate->reg = reg;
  96. gate->mask = mask;
  97. gate->val_enable = val_enable;
  98. gate->val_disable = val_disable;
  99. gate->flags = gate_flags;
  100. gate->lock = lock;
  101. gate->hw.init = &init;
  102. clk = clk_register(dev, &gate->hw);
  103. if (IS_ERR(clk))
  104. kfree(gate);
  105. return clk;
  106. }