hwspinlock.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Hardware spinlock public header
  4. *
  5. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
  6. *
  7. * Contact: Ohad Ben-Cohen <ohad@wizery.com>
  8. */
  9. #ifndef __LINUX_HWSPINLOCK_H
  10. #define __LINUX_HWSPINLOCK_H
  11. #include <linux/err.h>
  12. #include <linux/sched.h>
  13. /* hwspinlock mode argument */
  14. #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
  15. #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
  16. #define HWLOCK_RAW 0x03
  17. struct device;
  18. struct device_node;
  19. struct hwspinlock;
  20. struct hwspinlock_device;
  21. struct hwspinlock_ops;
  22. /**
  23. * struct hwspinlock_pdata - platform data for hwspinlock drivers
  24. * @base_id: base id for this hwspinlock device
  25. *
  26. * hwspinlock devices provide system-wide hardware locks that are used
  27. * by remote processors that have no other way to achieve synchronization.
  28. *
  29. * To achieve that, each physical lock must have a system-wide id number
  30. * that is agreed upon, otherwise remote processors can't possibly assume
  31. * they're using the same hardware lock.
  32. *
  33. * Usually boards have a single hwspinlock device, which provides several
  34. * hwspinlocks, and in this case, they can be trivially numbered 0 to
  35. * (num-of-locks - 1).
  36. *
  37. * In case boards have several hwspinlocks devices, a different base id
  38. * should be used for each hwspinlock device (they can't all use 0 as
  39. * a starting id!).
  40. *
  41. * This platform data structure should be used to provide the base id
  42. * for each device (which is trivially 0 when only a single hwspinlock
  43. * device exists). It can be shared between different platforms, hence
  44. * its location.
  45. */
  46. struct hwspinlock_pdata {
  47. int base_id;
  48. };
  49. #ifdef CONFIG_HWSPINLOCK
  50. int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
  51. const struct hwspinlock_ops *ops, int base_id, int num_locks);
  52. int hwspin_lock_unregister(struct hwspinlock_device *bank);
  53. struct hwspinlock *hwspin_lock_request(void);
  54. struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
  55. int hwspin_lock_free(struct hwspinlock *hwlock);
  56. int of_hwspin_lock_get_id(struct device_node *np, int index);
  57. int hwspin_lock_get_id(struct hwspinlock *hwlock);
  58. int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
  59. unsigned long *);
  60. int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
  61. void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
  62. int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
  63. int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
  64. struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
  65. struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
  66. unsigned int id);
  67. int devm_hwspin_lock_unregister(struct device *dev,
  68. struct hwspinlock_device *bank);
  69. int devm_hwspin_lock_register(struct device *dev,
  70. struct hwspinlock_device *bank,
  71. const struct hwspinlock_ops *ops,
  72. int base_id, int num_locks);
  73. #else /* !CONFIG_HWSPINLOCK */
  74. /*
  75. * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
  76. * enabled. We prefer to silently succeed in this case, and let the
  77. * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
  78. * required on a given setup, users will still work.
  79. *
  80. * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
  81. * we _do_ want users to fail (no point in registering hwspinlock instances if
  82. * the framework is not available).
  83. *
  84. * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
  85. * users. Others, which care, can still check this with IS_ERR.
  86. */
  87. static inline struct hwspinlock *hwspin_lock_request(void)
  88. {
  89. return ERR_PTR(-ENODEV);
  90. }
  91. static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
  92. {
  93. return ERR_PTR(-ENODEV);
  94. }
  95. static inline int hwspin_lock_free(struct hwspinlock *hwlock)
  96. {
  97. return 0;
  98. }
  99. static inline
  100. int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
  101. int mode, unsigned long *flags)
  102. {
  103. return 0;
  104. }
  105. static inline
  106. int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
  107. {
  108. return 0;
  109. }
  110. static inline
  111. void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
  112. {
  113. }
  114. static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
  115. {
  116. return 0;
  117. }
  118. static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
  119. {
  120. return 0;
  121. }
  122. static inline
  123. int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
  124. {
  125. return 0;
  126. }
  127. static inline
  128. int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
  129. {
  130. return 0;
  131. }
  132. static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
  133. {
  134. return ERR_PTR(-ENODEV);
  135. }
  136. static inline
  137. struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
  138. unsigned int id)
  139. {
  140. return ERR_PTR(-ENODEV);
  141. }
  142. #endif /* !CONFIG_HWSPINLOCK */
  143. /**
  144. * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
  145. * @hwlock: an hwspinlock which we want to trylock
  146. * @flags: a pointer to where the caller's interrupt state will be saved at
  147. *
  148. * This function attempts to lock the underlying hwspinlock, and will
  149. * immediately fail if the hwspinlock is already locked.
  150. *
  151. * Upon a successful return from this function, preemption and local
  152. * interrupts are disabled (previous interrupts state is saved at @flags),
  153. * so the caller must not sleep, and is advised to release the hwspinlock
  154. * as soon as possible.
  155. *
  156. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  157. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  158. */
  159. static inline
  160. int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
  161. {
  162. return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
  163. }
  164. /**
  165. * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
  166. * @hwlock: an hwspinlock which we want to trylock
  167. *
  168. * This function attempts to lock the underlying hwspinlock, and will
  169. * immediately fail if the hwspinlock is already locked.
  170. *
  171. * Upon a successful return from this function, preemption and local
  172. * interrupts are disabled, so the caller must not sleep, and is advised
  173. * to release the hwspinlock as soon as possible.
  174. *
  175. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  176. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  177. */
  178. static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
  179. {
  180. return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
  181. }
  182. /**
  183. * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
  184. * @hwlock: an hwspinlock which we want to trylock
  185. *
  186. * This function attempts to lock an hwspinlock, and will immediately fail
  187. * if the hwspinlock is already taken.
  188. *
  189. * Caution: User must protect the routine of getting hardware lock with mutex
  190. * or spinlock to avoid dead-lock, that will let user can do some time-consuming
  191. * or sleepable operations under the hardware lock.
  192. *
  193. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  194. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  195. */
  196. static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
  197. {
  198. return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
  199. }
  200. /**
  201. * hwspin_trylock() - attempt to lock a specific hwspinlock
  202. * @hwlock: an hwspinlock which we want to trylock
  203. *
  204. * This function attempts to lock an hwspinlock, and will immediately fail
  205. * if the hwspinlock is already taken.
  206. *
  207. * Upon a successful return from this function, preemption is disabled,
  208. * so the caller must not sleep, and is advised to release the hwspinlock
  209. * as soon as possible. This is required in order to minimize remote cores
  210. * polling on the hardware interconnect.
  211. *
  212. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  213. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  214. */
  215. static inline int hwspin_trylock(struct hwspinlock *hwlock)
  216. {
  217. return __hwspin_trylock(hwlock, 0, NULL);
  218. }
  219. /**
  220. * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
  221. * @hwlock: the hwspinlock to be locked
  222. * @to: timeout value in msecs
  223. * @flags: a pointer to where the caller's interrupt state will be saved at
  224. *
  225. * This function locks the underlying @hwlock. If the @hwlock
  226. * is already taken, the function will busy loop waiting for it to
  227. * be released, but give up when @timeout msecs have elapsed.
  228. *
  229. * Upon a successful return from this function, preemption and local interrupts
  230. * are disabled (plus previous interrupt state is saved), so the caller must
  231. * not sleep, and is advised to release the hwspinlock as soon as possible.
  232. *
  233. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  234. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  235. * busy after @timeout msecs). The function will never sleep.
  236. */
  237. static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
  238. unsigned int to, unsigned long *flags)
  239. {
  240. return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
  241. }
  242. /**
  243. * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
  244. * @hwlock: the hwspinlock to be locked
  245. * @to: timeout value in msecs
  246. *
  247. * This function locks the underlying @hwlock. If the @hwlock
  248. * is already taken, the function will busy loop waiting for it to
  249. * be released, but give up when @timeout msecs have elapsed.
  250. *
  251. * Upon a successful return from this function, preemption and local interrupts
  252. * are disabled so the caller must not sleep, and is advised to release the
  253. * hwspinlock as soon as possible.
  254. *
  255. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  256. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  257. * busy after @timeout msecs). The function will never sleep.
  258. */
  259. static inline
  260. int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
  261. {
  262. return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
  263. }
  264. /**
  265. * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
  266. * @hwlock: the hwspinlock to be locked
  267. * @to: timeout value in msecs
  268. *
  269. * This function locks the underlying @hwlock. If the @hwlock
  270. * is already taken, the function will busy loop waiting for it to
  271. * be released, but give up when @timeout msecs have elapsed.
  272. *
  273. * Caution: User must protect the routine of getting hardware lock with mutex
  274. * or spinlock to avoid dead-lock, that will let user can do some time-consuming
  275. * or sleepable operations under the hardware lock.
  276. *
  277. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  278. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  279. * busy after @timeout msecs). The function will never sleep.
  280. */
  281. static inline
  282. int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
  283. {
  284. return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
  285. }
  286. /**
  287. * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
  288. * @hwlock: the hwspinlock to be locked
  289. * @to: timeout value in msecs
  290. *
  291. * This function locks the underlying @hwlock. If the @hwlock
  292. * is already taken, the function will busy loop waiting for it to
  293. * be released, but give up when @timeout msecs have elapsed.
  294. *
  295. * Upon a successful return from this function, preemption is disabled
  296. * so the caller must not sleep, and is advised to release the hwspinlock
  297. * as soon as possible.
  298. * This is required in order to minimize remote cores polling on the
  299. * hardware interconnect.
  300. *
  301. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  302. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  303. * busy after @timeout msecs). The function will never sleep.
  304. */
  305. static inline
  306. int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
  307. {
  308. return __hwspin_lock_timeout(hwlock, to, 0, NULL);
  309. }
  310. /**
  311. * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
  312. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  313. * @flags: previous caller's interrupt state to restore
  314. *
  315. * This function will unlock a specific hwspinlock, enable preemption and
  316. * restore the previous state of the local interrupts. It should be used
  317. * to undo, e.g., hwspin_trylock_irqsave().
  318. *
  319. * @hwlock must be already locked before calling this function: it is a bug
  320. * to call unlock on a @hwlock that is already unlocked.
  321. */
  322. static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
  323. unsigned long *flags)
  324. {
  325. __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
  326. }
  327. /**
  328. * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
  329. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  330. *
  331. * This function will unlock a specific hwspinlock, enable preemption and
  332. * enable local interrupts. Should be used to undo hwspin_lock_irq().
  333. *
  334. * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
  335. * calling this function: it is a bug to call unlock on a @hwlock that is
  336. * already unlocked.
  337. */
  338. static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
  339. {
  340. __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
  341. }
  342. /**
  343. * hwspin_unlock_raw() - unlock hwspinlock
  344. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  345. *
  346. * This function will unlock a specific hwspinlock.
  347. *
  348. * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
  349. * this function: it is a bug to call unlock on a @hwlock that is already
  350. * unlocked.
  351. */
  352. static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
  353. {
  354. __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
  355. }
  356. /**
  357. * hwspin_unlock() - unlock hwspinlock
  358. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  359. *
  360. * This function will unlock a specific hwspinlock and enable preemption
  361. * back.
  362. *
  363. * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
  364. * this function: it is a bug to call unlock on a @hwlock that is already
  365. * unlocked.
  366. */
  367. static inline void hwspin_unlock(struct hwspinlock *hwlock)
  368. {
  369. __hwspin_unlock(hwlock, 0, NULL);
  370. }
  371. #endif /* __LINUX_HWSPINLOCK_H */