rpmh-rsc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
  6. #include <linux/atomic.h>
  7. #include <linux/delay.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/io.h>
  10. #include <linux/kernel.h>
  11. #include <linux/list.h>
  12. #include <linux/of.h>
  13. #include <linux/of_irq.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #include <soc/qcom/cmd-db.h>
  19. #include <soc/qcom/tcs.h>
  20. #include <dt-bindings/soc/qcom,rpmh-rsc.h>
  21. #include "rpmh-internal.h"
  22. #define CREATE_TRACE_POINTS
  23. #include "trace-rpmh.h"
  24. #define RSC_DRV_TCS_OFFSET 672
  25. #define RSC_DRV_CMD_OFFSET 20
  26. /* DRV Configuration Information Register */
  27. #define DRV_PRNT_CHLD_CONFIG 0x0C
  28. #define DRV_NUM_TCS_MASK 0x3F
  29. #define DRV_NUM_TCS_SHIFT 6
  30. #define DRV_NCPT_MASK 0x1F
  31. #define DRV_NCPT_SHIFT 27
  32. /* Register offsets */
  33. #define RSC_DRV_IRQ_ENABLE 0x00
  34. #define RSC_DRV_IRQ_STATUS 0x04
  35. #define RSC_DRV_IRQ_CLEAR 0x08
  36. #define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
  37. #define RSC_DRV_CONTROL 0x14
  38. #define RSC_DRV_STATUS 0x18
  39. #define RSC_DRV_CMD_ENABLE 0x1C
  40. #define RSC_DRV_CMD_MSGID 0x30
  41. #define RSC_DRV_CMD_ADDR 0x34
  42. #define RSC_DRV_CMD_DATA 0x38
  43. #define RSC_DRV_CMD_STATUS 0x3C
  44. #define RSC_DRV_CMD_RESP_DATA 0x40
  45. #define TCS_AMC_MODE_ENABLE BIT(16)
  46. #define TCS_AMC_MODE_TRIGGER BIT(24)
  47. /* TCS CMD register bit mask */
  48. #define CMD_MSGID_LEN 8
  49. #define CMD_MSGID_RESP_REQ BIT(8)
  50. #define CMD_MSGID_WRITE BIT(16)
  51. #define CMD_STATUS_ISSUED BIT(8)
  52. #define CMD_STATUS_COMPL BIT(16)
  53. static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
  54. {
  55. return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
  56. RSC_DRV_CMD_OFFSET * cmd_id);
  57. }
  58. static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
  59. u32 data)
  60. {
  61. writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
  62. RSC_DRV_CMD_OFFSET * cmd_id);
  63. }
  64. static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
  65. {
  66. writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
  67. }
  68. static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
  69. u32 data)
  70. {
  71. writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
  72. for (;;) {
  73. if (data == readl(drv->tcs_base + reg +
  74. RSC_DRV_TCS_OFFSET * tcs_id))
  75. break;
  76. udelay(1);
  77. }
  78. }
  79. static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
  80. {
  81. return !test_bit(tcs_id, drv->tcs_in_use) &&
  82. read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
  83. }
  84. static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
  85. {
  86. return &drv->tcs[type];
  87. }
  88. static int tcs_invalidate(struct rsc_drv *drv, int type)
  89. {
  90. int m;
  91. struct tcs_group *tcs;
  92. tcs = get_tcs_of_type(drv, type);
  93. spin_lock(&tcs->lock);
  94. if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
  95. spin_unlock(&tcs->lock);
  96. return 0;
  97. }
  98. for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
  99. if (!tcs_is_free(drv, m)) {
  100. spin_unlock(&tcs->lock);
  101. return -EAGAIN;
  102. }
  103. write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
  104. write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
  105. }
  106. bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
  107. spin_unlock(&tcs->lock);
  108. return 0;
  109. }
  110. /**
  111. * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
  112. *
  113. * @drv: the RSC controller
  114. */
  115. int rpmh_rsc_invalidate(struct rsc_drv *drv)
  116. {
  117. int ret;
  118. ret = tcs_invalidate(drv, SLEEP_TCS);
  119. if (!ret)
  120. ret = tcs_invalidate(drv, WAKE_TCS);
  121. return ret;
  122. }
  123. static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
  124. const struct tcs_request *msg)
  125. {
  126. int type;
  127. struct tcs_group *tcs;
  128. switch (msg->state) {
  129. case RPMH_ACTIVE_ONLY_STATE:
  130. type = ACTIVE_TCS;
  131. break;
  132. case RPMH_WAKE_ONLY_STATE:
  133. type = WAKE_TCS;
  134. break;
  135. case RPMH_SLEEP_STATE:
  136. type = SLEEP_TCS;
  137. break;
  138. default:
  139. return ERR_PTR(-EINVAL);
  140. }
  141. /*
  142. * If we are making an active request on a RSC that does not have a
  143. * dedicated TCS for active state use, then re-purpose a wake TCS to
  144. * send active votes.
  145. */
  146. tcs = get_tcs_of_type(drv, type);
  147. if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
  148. tcs = get_tcs_of_type(drv, WAKE_TCS);
  149. return tcs;
  150. }
  151. static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
  152. int tcs_id)
  153. {
  154. struct tcs_group *tcs;
  155. int i;
  156. for (i = 0; i < TCS_TYPE_NR; i++) {
  157. tcs = &drv->tcs[i];
  158. if (tcs->mask & BIT(tcs_id))
  159. return tcs->req[tcs_id - tcs->offset];
  160. }
  161. return NULL;
  162. }
  163. static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
  164. {
  165. u32 enable;
  166. /*
  167. * HW req: Clear the DRV_CONTROL and enable TCS again
  168. * While clearing ensure that the AMC mode trigger is cleared
  169. * and then the mode enable is cleared.
  170. */
  171. enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
  172. enable &= ~TCS_AMC_MODE_TRIGGER;
  173. write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
  174. enable &= ~TCS_AMC_MODE_ENABLE;
  175. write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
  176. if (trigger) {
  177. /* Enable the AMC mode on the TCS and then trigger the TCS */
  178. enable = TCS_AMC_MODE_ENABLE;
  179. write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
  180. enable |= TCS_AMC_MODE_TRIGGER;
  181. write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
  182. }
  183. }
  184. static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
  185. {
  186. u32 data;
  187. data = read_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, 0);
  188. if (enable)
  189. data |= BIT(tcs_id);
  190. else
  191. data &= ~BIT(tcs_id);
  192. write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, data);
  193. }
  194. /**
  195. * tcs_tx_done: TX Done interrupt handler
  196. */
  197. static irqreturn_t tcs_tx_done(int irq, void *p)
  198. {
  199. struct rsc_drv *drv = p;
  200. int i, j, err = 0;
  201. unsigned long irq_status;
  202. const struct tcs_request *req;
  203. struct tcs_cmd *cmd;
  204. irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
  205. for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
  206. req = get_req_from_tcs(drv, i);
  207. if (!req) {
  208. WARN_ON(1);
  209. goto skip;
  210. }
  211. err = 0;
  212. for (j = 0; j < req->num_cmds; j++) {
  213. u32 sts;
  214. cmd = &req->cmds[j];
  215. sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
  216. if (!(sts & CMD_STATUS_ISSUED) ||
  217. ((req->wait_for_compl || cmd->wait) &&
  218. !(sts & CMD_STATUS_COMPL))) {
  219. pr_err("Incomplete request: %s: addr=%#x data=%#x",
  220. drv->name, cmd->addr, cmd->data);
  221. err = -EIO;
  222. }
  223. }
  224. trace_rpmh_tx_done(drv, i, req, err);
  225. /*
  226. * If wake tcs was re-purposed for sending active
  227. * votes, clear AMC trigger & enable modes and
  228. * disable interrupt for this TCS
  229. */
  230. if (!drv->tcs[ACTIVE_TCS].num_tcs)
  231. __tcs_set_trigger(drv, i, false);
  232. skip:
  233. /* Reclaim the TCS */
  234. write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
  235. write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
  236. write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
  237. spin_lock(&drv->lock);
  238. clear_bit(i, drv->tcs_in_use);
  239. /*
  240. * Disable interrupt for WAKE TCS to avoid being
  241. * spammed with interrupts coming when the solver
  242. * sends its wake votes.
  243. */
  244. if (!drv->tcs[ACTIVE_TCS].num_tcs)
  245. enable_tcs_irq(drv, i, false);
  246. spin_unlock(&drv->lock);
  247. if (req)
  248. rpmh_tx_done(req, err);
  249. }
  250. return IRQ_HANDLED;
  251. }
  252. static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
  253. const struct tcs_request *msg)
  254. {
  255. u32 msgid, cmd_msgid;
  256. u32 cmd_enable = 0;
  257. u32 cmd_complete;
  258. struct tcs_cmd *cmd;
  259. int i, j;
  260. cmd_msgid = CMD_MSGID_LEN;
  261. cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
  262. cmd_msgid |= CMD_MSGID_WRITE;
  263. cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
  264. for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
  265. cmd = &msg->cmds[i];
  266. cmd_enable |= BIT(j);
  267. cmd_complete |= cmd->wait << j;
  268. msgid = cmd_msgid;
  269. msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
  270. write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
  271. write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
  272. write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
  273. trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
  274. }
  275. write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
  276. cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
  277. write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
  278. }
  279. static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
  280. const struct tcs_request *msg)
  281. {
  282. unsigned long curr_enabled;
  283. u32 addr;
  284. int i, j, k;
  285. int tcs_id = tcs->offset;
  286. for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
  287. if (tcs_is_free(drv, tcs_id))
  288. continue;
  289. curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
  290. for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
  291. addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
  292. for (k = 0; k < msg->num_cmds; k++) {
  293. if (addr == msg->cmds[k].addr)
  294. return -EBUSY;
  295. }
  296. }
  297. }
  298. return 0;
  299. }
  300. static int find_free_tcs(struct tcs_group *tcs)
  301. {
  302. int i;
  303. for (i = 0; i < tcs->num_tcs; i++) {
  304. if (tcs_is_free(tcs->drv, tcs->offset + i))
  305. return tcs->offset + i;
  306. }
  307. return -EBUSY;
  308. }
  309. static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
  310. {
  311. struct tcs_group *tcs;
  312. int tcs_id;
  313. unsigned long flags;
  314. int ret;
  315. tcs = get_tcs_for_msg(drv, msg);
  316. if (IS_ERR(tcs))
  317. return PTR_ERR(tcs);
  318. spin_lock_irqsave(&tcs->lock, flags);
  319. spin_lock(&drv->lock);
  320. /*
  321. * The h/w does not like if we send a request to the same address,
  322. * when one is already in-flight or being processed.
  323. */
  324. ret = check_for_req_inflight(drv, tcs, msg);
  325. if (ret) {
  326. spin_unlock(&drv->lock);
  327. goto done_write;
  328. }
  329. tcs_id = find_free_tcs(tcs);
  330. if (tcs_id < 0) {
  331. ret = tcs_id;
  332. spin_unlock(&drv->lock);
  333. goto done_write;
  334. }
  335. tcs->req[tcs_id - tcs->offset] = msg;
  336. set_bit(tcs_id, drv->tcs_in_use);
  337. if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
  338. /*
  339. * Clear previously programmed WAKE commands in selected
  340. * repurposed TCS to avoid triggering them. tcs->slots will be
  341. * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
  342. */
  343. write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
  344. write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
  345. enable_tcs_irq(drv, tcs_id, true);
  346. }
  347. spin_unlock(&drv->lock);
  348. __tcs_buffer_write(drv, tcs_id, 0, msg);
  349. __tcs_set_trigger(drv, tcs_id, true);
  350. done_write:
  351. spin_unlock_irqrestore(&tcs->lock, flags);
  352. return ret;
  353. }
  354. /**
  355. * rpmh_rsc_send_data: Validate the incoming message and write to the
  356. * appropriate TCS block.
  357. *
  358. * @drv: the controller
  359. * @msg: the data to be sent
  360. *
  361. * Return: 0 on success, -EINVAL on error.
  362. * Note: This call blocks until a valid data is written to the TCS.
  363. */
  364. int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
  365. {
  366. int ret;
  367. if (!msg || !msg->cmds || !msg->num_cmds ||
  368. msg->num_cmds > MAX_RPMH_PAYLOAD) {
  369. WARN_ON(1);
  370. return -EINVAL;
  371. }
  372. do {
  373. ret = tcs_write(drv, msg);
  374. if (ret == -EBUSY) {
  375. pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
  376. msg->cmds[0].addr);
  377. udelay(10);
  378. }
  379. } while (ret == -EBUSY);
  380. return ret;
  381. }
  382. static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
  383. int len)
  384. {
  385. int i, j;
  386. /* Check for already cached commands */
  387. for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
  388. if (tcs->cmd_cache[i] != cmd[0].addr)
  389. continue;
  390. if (i + len >= tcs->num_tcs * tcs->ncpt)
  391. goto seq_err;
  392. for (j = 0; j < len; j++) {
  393. if (tcs->cmd_cache[i + j] != cmd[j].addr)
  394. goto seq_err;
  395. }
  396. return i;
  397. }
  398. return -ENODATA;
  399. seq_err:
  400. WARN(1, "Message does not match previous sequence.\n");
  401. return -EINVAL;
  402. }
  403. static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
  404. int *tcs_id, int *cmd_id)
  405. {
  406. int slot, offset;
  407. int i = 0;
  408. /* Find if we already have the msg in our TCS */
  409. slot = find_match(tcs, msg->cmds, msg->num_cmds);
  410. if (slot >= 0)
  411. goto copy_data;
  412. /* Do over, until we can fit the full payload in a TCS */
  413. do {
  414. slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
  415. i, msg->num_cmds, 0);
  416. if (slot >= tcs->num_tcs * tcs->ncpt)
  417. return -ENOMEM;
  418. i += tcs->ncpt;
  419. } while (slot + msg->num_cmds - 1 >= i);
  420. copy_data:
  421. bitmap_set(tcs->slots, slot, msg->num_cmds);
  422. /* Copy the addresses of the resources over to the slots */
  423. for (i = 0; i < msg->num_cmds; i++)
  424. tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
  425. offset = slot / tcs->ncpt;
  426. *tcs_id = offset + tcs->offset;
  427. *cmd_id = slot % tcs->ncpt;
  428. return 0;
  429. }
  430. static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
  431. {
  432. struct tcs_group *tcs;
  433. int tcs_id = 0, cmd_id = 0;
  434. unsigned long flags;
  435. int ret;
  436. tcs = get_tcs_for_msg(drv, msg);
  437. if (IS_ERR(tcs))
  438. return PTR_ERR(tcs);
  439. spin_lock_irqsave(&tcs->lock, flags);
  440. /* find the TCS id and the command in the TCS to write to */
  441. ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
  442. if (!ret)
  443. __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
  444. spin_unlock_irqrestore(&tcs->lock, flags);
  445. return ret;
  446. }
  447. /**
  448. * rpmh_rsc_write_ctrl_data: Write request to the controller
  449. *
  450. * @drv: the controller
  451. * @msg: the data to be written to the controller
  452. *
  453. * There is no response returned for writing the request to the controller.
  454. */
  455. int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
  456. {
  457. if (!msg || !msg->cmds || !msg->num_cmds ||
  458. msg->num_cmds > MAX_RPMH_PAYLOAD) {
  459. pr_err("Payload error\n");
  460. return -EINVAL;
  461. }
  462. /* Data sent to this API will not be sent immediately */
  463. if (msg->state == RPMH_ACTIVE_ONLY_STATE)
  464. return -EINVAL;
  465. return tcs_ctrl_write(drv, msg);
  466. }
  467. static int rpmh_probe_tcs_config(struct platform_device *pdev,
  468. struct rsc_drv *drv)
  469. {
  470. struct tcs_type_config {
  471. u32 type;
  472. u32 n;
  473. } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
  474. struct device_node *dn = pdev->dev.of_node;
  475. u32 config, max_tcs, ncpt, offset;
  476. int i, ret, n, st = 0;
  477. struct tcs_group *tcs;
  478. struct resource *res;
  479. void __iomem *base;
  480. char drv_id[10] = {0};
  481. snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
  482. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
  483. base = devm_ioremap_resource(&pdev->dev, res);
  484. if (IS_ERR(base))
  485. return PTR_ERR(base);
  486. ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
  487. if (ret)
  488. return ret;
  489. drv->tcs_base = base + offset;
  490. config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
  491. max_tcs = config;
  492. max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
  493. max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
  494. ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
  495. ncpt = ncpt >> DRV_NCPT_SHIFT;
  496. n = of_property_count_u32_elems(dn, "qcom,tcs-config");
  497. if (n != 2 * TCS_TYPE_NR)
  498. return -EINVAL;
  499. for (i = 0; i < TCS_TYPE_NR; i++) {
  500. ret = of_property_read_u32_index(dn, "qcom,tcs-config",
  501. i * 2, &tcs_cfg[i].type);
  502. if (ret)
  503. return ret;
  504. if (tcs_cfg[i].type >= TCS_TYPE_NR)
  505. return -EINVAL;
  506. ret = of_property_read_u32_index(dn, "qcom,tcs-config",
  507. i * 2 + 1, &tcs_cfg[i].n);
  508. if (ret)
  509. return ret;
  510. if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
  511. return -EINVAL;
  512. }
  513. for (i = 0; i < TCS_TYPE_NR; i++) {
  514. tcs = &drv->tcs[tcs_cfg[i].type];
  515. if (tcs->drv)
  516. return -EINVAL;
  517. tcs->drv = drv;
  518. tcs->type = tcs_cfg[i].type;
  519. tcs->num_tcs = tcs_cfg[i].n;
  520. tcs->ncpt = ncpt;
  521. spin_lock_init(&tcs->lock);
  522. if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
  523. continue;
  524. if (st + tcs->num_tcs > max_tcs ||
  525. st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
  526. return -EINVAL;
  527. tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
  528. tcs->offset = st;
  529. st += tcs->num_tcs;
  530. /*
  531. * Allocate memory to cache sleep and wake requests to
  532. * avoid reading TCS register memory.
  533. */
  534. if (tcs->type == ACTIVE_TCS)
  535. continue;
  536. tcs->cmd_cache = devm_kcalloc(&pdev->dev,
  537. tcs->num_tcs * ncpt, sizeof(u32),
  538. GFP_KERNEL);
  539. if (!tcs->cmd_cache)
  540. return -ENOMEM;
  541. }
  542. drv->num_tcs = st;
  543. return 0;
  544. }
  545. static int rpmh_rsc_probe(struct platform_device *pdev)
  546. {
  547. struct device_node *dn = pdev->dev.of_node;
  548. struct rsc_drv *drv;
  549. int ret, irq;
  550. /*
  551. * Even though RPMh doesn't directly use cmd-db, all of its children
  552. * do. To avoid adding this check to our children we'll do it now.
  553. */
  554. ret = cmd_db_ready();
  555. if (ret) {
  556. if (ret != -EPROBE_DEFER)
  557. dev_err(&pdev->dev, "Command DB not available (%d)\n",
  558. ret);
  559. return ret;
  560. }
  561. drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
  562. if (!drv)
  563. return -ENOMEM;
  564. ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
  565. if (ret)
  566. return ret;
  567. drv->name = of_get_property(dn, "label", NULL);
  568. if (!drv->name)
  569. drv->name = dev_name(&pdev->dev);
  570. ret = rpmh_probe_tcs_config(pdev, drv);
  571. if (ret)
  572. return ret;
  573. spin_lock_init(&drv->lock);
  574. bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
  575. irq = platform_get_irq(pdev, drv->id);
  576. if (irq < 0)
  577. return irq;
  578. ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
  579. IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
  580. drv->name, drv);
  581. if (ret)
  582. return ret;
  583. /* Enable the active TCS to send requests immediately */
  584. write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
  585. spin_lock_init(&drv->client.cache_lock);
  586. INIT_LIST_HEAD(&drv->client.cache);
  587. INIT_LIST_HEAD(&drv->client.batch_cache);
  588. dev_set_drvdata(&pdev->dev, drv);
  589. return devm_of_platform_populate(&pdev->dev);
  590. }
  591. static const struct of_device_id rpmh_drv_match[] = {
  592. { .compatible = "qcom,rpmh-rsc", },
  593. { }
  594. };
  595. static struct platform_driver rpmh_driver = {
  596. .probe = rpmh_rsc_probe,
  597. .driver = {
  598. .name = "rpmh",
  599. .of_match_table = rpmh_drv_match,
  600. .suppress_bind_attrs = true,
  601. },
  602. };
  603. static int __init rpmh_driver_init(void)
  604. {
  605. return platform_driver_register(&rpmh_driver);
  606. }
  607. arch_initcall(rpmh_driver_init);