rpmh.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/atomic.h>
  6. #include <linux/bug.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/jiffies.h>
  9. #include <linux/kernel.h>
  10. #include <linux/list.h>
  11. #include <linux/module.h>
  12. #include <linux/of.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/slab.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/types.h>
  17. #include <linux/wait.h>
  18. #include <soc/qcom/rpmh.h>
  19. #include "rpmh-internal.h"
  20. #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
  21. #define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
  22. struct rpmh_request name = { \
  23. .msg = { \
  24. .state = s, \
  25. .cmds = name.cmd, \
  26. .num_cmds = 0, \
  27. .wait_for_compl = true, \
  28. }, \
  29. .cmd = { { 0 } }, \
  30. .completion = q, \
  31. .dev = dev, \
  32. .needs_free = false, \
  33. }
  34. #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
  35. /**
  36. * struct cache_req: the request object for caching
  37. *
  38. * @addr: the address of the resource
  39. * @sleep_val: the sleep vote
  40. * @wake_val: the wake vote
  41. * @list: linked list obj
  42. */
  43. struct cache_req {
  44. u32 addr;
  45. u32 sleep_val;
  46. u32 wake_val;
  47. struct list_head list;
  48. };
  49. /**
  50. * struct batch_cache_req - An entry in our batch catch
  51. *
  52. * @list: linked list obj
  53. * @count: number of messages
  54. * @rpm_msgs: the messages
  55. */
  56. struct batch_cache_req {
  57. struct list_head list;
  58. int count;
  59. struct rpmh_request rpm_msgs[];
  60. };
  61. static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
  62. {
  63. struct rsc_drv *drv = dev_get_drvdata(dev->parent);
  64. return &drv->client;
  65. }
  66. void rpmh_tx_done(const struct tcs_request *msg, int r)
  67. {
  68. struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
  69. msg);
  70. struct completion *compl = rpm_msg->completion;
  71. bool free = rpm_msg->needs_free;
  72. rpm_msg->err = r;
  73. if (r)
  74. dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
  75. rpm_msg->msg.cmds[0].addr, r);
  76. if (!compl)
  77. goto exit;
  78. /* Signal the blocking thread we are done */
  79. complete(compl);
  80. exit:
  81. if (free)
  82. kfree(rpm_msg);
  83. }
  84. static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
  85. {
  86. struct cache_req *p, *req = NULL;
  87. list_for_each_entry(p, &ctrlr->cache, list) {
  88. if (p->addr == addr) {
  89. req = p;
  90. break;
  91. }
  92. }
  93. return req;
  94. }
  95. static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
  96. enum rpmh_state state,
  97. struct tcs_cmd *cmd)
  98. {
  99. struct cache_req *req;
  100. unsigned long flags;
  101. u32 old_sleep_val, old_wake_val;
  102. spin_lock_irqsave(&ctrlr->cache_lock, flags);
  103. req = __find_req(ctrlr, cmd->addr);
  104. if (req)
  105. goto existing;
  106. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  107. if (!req) {
  108. req = ERR_PTR(-ENOMEM);
  109. goto unlock;
  110. }
  111. req->addr = cmd->addr;
  112. req->sleep_val = req->wake_val = UINT_MAX;
  113. list_add_tail(&req->list, &ctrlr->cache);
  114. existing:
  115. old_sleep_val = req->sleep_val;
  116. old_wake_val = req->wake_val;
  117. switch (state) {
  118. case RPMH_ACTIVE_ONLY_STATE:
  119. case RPMH_WAKE_ONLY_STATE:
  120. req->wake_val = cmd->data;
  121. break;
  122. case RPMH_SLEEP_STATE:
  123. req->sleep_val = cmd->data;
  124. break;
  125. }
  126. ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
  127. req->wake_val != old_wake_val) &&
  128. req->sleep_val != UINT_MAX &&
  129. req->wake_val != UINT_MAX;
  130. unlock:
  131. spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
  132. return req;
  133. }
  134. /**
  135. * __rpmh_write: Cache and send the RPMH request
  136. *
  137. * @dev: The device making the request
  138. * @state: Active/Sleep request type
  139. * @rpm_msg: The data that needs to be sent (cmds).
  140. *
  141. * Cache the RPMH request and send if the state is ACTIVE_ONLY.
  142. * SLEEP/WAKE_ONLY requests are not sent to the controller at
  143. * this time. Use rpmh_flush() to send them to the controller.
  144. */
  145. static int __rpmh_write(const struct device *dev, enum rpmh_state state,
  146. struct rpmh_request *rpm_msg)
  147. {
  148. struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
  149. int ret = -EINVAL;
  150. struct cache_req *req;
  151. int i;
  152. rpm_msg->msg.state = state;
  153. /* Cache the request in our store and link the payload */
  154. for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
  155. req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
  156. if (IS_ERR(req))
  157. return PTR_ERR(req);
  158. }
  159. rpm_msg->msg.state = state;
  160. if (state == RPMH_ACTIVE_ONLY_STATE) {
  161. WARN_ON(irqs_disabled());
  162. ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
  163. } else {
  164. /* Clean up our call by spoofing tx_done */
  165. ret = 0;
  166. rpmh_tx_done(&rpm_msg->msg, ret);
  167. }
  168. return ret;
  169. }
  170. static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
  171. const struct tcs_cmd *cmd, u32 n)
  172. {
  173. if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
  174. return -EINVAL;
  175. memcpy(req->cmd, cmd, n * sizeof(*cmd));
  176. req->msg.state = state;
  177. req->msg.cmds = req->cmd;
  178. req->msg.num_cmds = n;
  179. return 0;
  180. }
  181. /**
  182. * rpmh_write_async: Write a set of RPMH commands
  183. *
  184. * @dev: The device making the request
  185. * @state: Active/sleep set
  186. * @cmd: The payload data
  187. * @n: The number of elements in payload
  188. *
  189. * Write a set of RPMH commands, the order of commands is maintained
  190. * and will be sent as a single shot.
  191. */
  192. int rpmh_write_async(const struct device *dev, enum rpmh_state state,
  193. const struct tcs_cmd *cmd, u32 n)
  194. {
  195. struct rpmh_request *rpm_msg;
  196. int ret;
  197. rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
  198. if (!rpm_msg)
  199. return -ENOMEM;
  200. rpm_msg->needs_free = true;
  201. ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
  202. if (ret) {
  203. kfree(rpm_msg);
  204. return ret;
  205. }
  206. return __rpmh_write(dev, state, rpm_msg);
  207. }
  208. EXPORT_SYMBOL(rpmh_write_async);
  209. /**
  210. * rpmh_write: Write a set of RPMH commands and block until response
  211. *
  212. * @rc: The RPMH handle got from rpmh_get_client
  213. * @state: Active/sleep set
  214. * @cmd: The payload data
  215. * @n: The number of elements in @cmd
  216. *
  217. * May sleep. Do not call from atomic contexts.
  218. */
  219. int rpmh_write(const struct device *dev, enum rpmh_state state,
  220. const struct tcs_cmd *cmd, u32 n)
  221. {
  222. DECLARE_COMPLETION_ONSTACK(compl);
  223. DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
  224. int ret;
  225. if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
  226. return -EINVAL;
  227. memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
  228. rpm_msg.msg.num_cmds = n;
  229. ret = __rpmh_write(dev, state, &rpm_msg);
  230. if (ret)
  231. return ret;
  232. ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
  233. WARN_ON(!ret);
  234. return (ret > 0) ? 0 : -ETIMEDOUT;
  235. }
  236. EXPORT_SYMBOL(rpmh_write);
  237. static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
  238. {
  239. unsigned long flags;
  240. spin_lock_irqsave(&ctrlr->cache_lock, flags);
  241. list_add_tail(&req->list, &ctrlr->batch_cache);
  242. ctrlr->dirty = true;
  243. spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
  244. }
  245. static int flush_batch(struct rpmh_ctrlr *ctrlr)
  246. {
  247. struct batch_cache_req *req;
  248. const struct rpmh_request *rpm_msg;
  249. unsigned long flags;
  250. int ret = 0;
  251. int i;
  252. /* Send Sleep/Wake requests to the controller, expect no response */
  253. spin_lock_irqsave(&ctrlr->cache_lock, flags);
  254. list_for_each_entry(req, &ctrlr->batch_cache, list) {
  255. for (i = 0; i < req->count; i++) {
  256. rpm_msg = req->rpm_msgs + i;
  257. ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
  258. &rpm_msg->msg);
  259. if (ret)
  260. break;
  261. }
  262. }
  263. spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
  264. return ret;
  265. }
  266. /**
  267. * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
  268. * batch to finish.
  269. *
  270. * @dev: the device making the request
  271. * @state: Active/sleep set
  272. * @cmd: The payload data
  273. * @n: The array of count of elements in each batch, 0 terminated.
  274. *
  275. * Write a request to the RSC controller without caching. If the request
  276. * state is ACTIVE, then the requests are treated as completion request
  277. * and sent to the controller immediately. The function waits until all the
  278. * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
  279. * request is sent as fire-n-forget and no ack is expected.
  280. *
  281. * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
  282. */
  283. int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
  284. const struct tcs_cmd *cmd, u32 *n)
  285. {
  286. struct batch_cache_req *req;
  287. struct rpmh_request *rpm_msgs;
  288. struct completion *compls;
  289. struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
  290. unsigned long time_left;
  291. int count = 0;
  292. int ret, i;
  293. void *ptr;
  294. if (!cmd || !n)
  295. return -EINVAL;
  296. while (n[count] > 0)
  297. count++;
  298. if (!count)
  299. return -EINVAL;
  300. ptr = kzalloc(sizeof(*req) +
  301. count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
  302. GFP_ATOMIC);
  303. if (!ptr)
  304. return -ENOMEM;
  305. req = ptr;
  306. compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
  307. req->count = count;
  308. rpm_msgs = req->rpm_msgs;
  309. for (i = 0; i < count; i++) {
  310. __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
  311. cmd += n[i];
  312. }
  313. if (state != RPMH_ACTIVE_ONLY_STATE) {
  314. cache_batch(ctrlr, req);
  315. return 0;
  316. }
  317. for (i = 0; i < count; i++) {
  318. struct completion *compl = &compls[i];
  319. init_completion(compl);
  320. rpm_msgs[i].completion = compl;
  321. ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
  322. if (ret) {
  323. pr_err("Error(%d) sending RPMH message addr=%#x\n",
  324. ret, rpm_msgs[i].msg.cmds[0].addr);
  325. break;
  326. }
  327. }
  328. time_left = RPMH_TIMEOUT_MS;
  329. while (i--) {
  330. time_left = wait_for_completion_timeout(&compls[i], time_left);
  331. if (!time_left) {
  332. /*
  333. * Better hope they never finish because they'll signal
  334. * the completion that we're going to free once
  335. * we've returned from this function.
  336. */
  337. WARN_ON(1);
  338. ret = -ETIMEDOUT;
  339. goto exit;
  340. }
  341. }
  342. exit:
  343. kfree(ptr);
  344. return ret;
  345. }
  346. EXPORT_SYMBOL(rpmh_write_batch);
  347. static int is_req_valid(struct cache_req *req)
  348. {
  349. return (req->sleep_val != UINT_MAX &&
  350. req->wake_val != UINT_MAX &&
  351. req->sleep_val != req->wake_val);
  352. }
  353. static int send_single(const struct device *dev, enum rpmh_state state,
  354. u32 addr, u32 data)
  355. {
  356. DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
  357. struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
  358. /* Wake sets are always complete and sleep sets are not */
  359. rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
  360. rpm_msg.cmd[0].addr = addr;
  361. rpm_msg.cmd[0].data = data;
  362. rpm_msg.msg.num_cmds = 1;
  363. return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
  364. }
  365. /**
  366. * rpmh_flush: Flushes the buffered active and sleep sets to TCS
  367. *
  368. * @dev: The device making the request
  369. *
  370. * Return: -EBUSY if the controller is busy, probably waiting on a response
  371. * to a RPMH request sent earlier.
  372. *
  373. * This function is always called from the sleep code from the last CPU
  374. * that is powering down the entire system. Since no other RPMH API would be
  375. * executing at this time, it is safe to run lockless.
  376. */
  377. int rpmh_flush(const struct device *dev)
  378. {
  379. struct cache_req *p;
  380. struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
  381. int ret;
  382. if (!ctrlr->dirty) {
  383. pr_debug("Skipping flush, TCS has latest data.\n");
  384. return 0;
  385. }
  386. /* Invalidate the TCSes first to avoid stale data */
  387. do {
  388. ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
  389. } while (ret == -EAGAIN);
  390. if (ret)
  391. return ret;
  392. /* First flush the cached batch requests */
  393. ret = flush_batch(ctrlr);
  394. if (ret)
  395. return ret;
  396. /*
  397. * Nobody else should be calling this function other than system PM,
  398. * hence we can run without locks.
  399. */
  400. list_for_each_entry(p, &ctrlr->cache, list) {
  401. if (!is_req_valid(p)) {
  402. pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
  403. __func__, p->addr, p->sleep_val, p->wake_val);
  404. continue;
  405. }
  406. ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
  407. if (ret)
  408. return ret;
  409. ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
  410. p->addr, p->wake_val);
  411. if (ret)
  412. return ret;
  413. }
  414. ctrlr->dirty = false;
  415. return 0;
  416. }
  417. EXPORT_SYMBOL(rpmh_flush);
  418. /**
  419. * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
  420. *
  421. * @dev: The device making the request
  422. *
  423. * Invalidate the sleep and wake values in batch_cache.
  424. */
  425. int rpmh_invalidate(const struct device *dev)
  426. {
  427. struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
  428. struct batch_cache_req *req, *tmp;
  429. unsigned long flags;
  430. spin_lock_irqsave(&ctrlr->cache_lock, flags);
  431. list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
  432. kfree(req);
  433. INIT_LIST_HEAD(&ctrlr->batch_cache);
  434. ctrlr->dirty = true;
  435. spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
  436. return 0;
  437. }
  438. EXPORT_SYMBOL(rpmh_invalidate);