ti_sci.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Texas Instruments System Control Interface Protocol Driver
  4. *
  5. * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
  6. * Nishanth Menon
  7. */
  8. #define pr_fmt(fmt) "%s: " fmt, __func__
  9. #include <linux/bitmap.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/export.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mailbox_client.h>
  15. #include <linux/module.h>
  16. #include <linux/of_device.h>
  17. #include <linux/semaphore.h>
  18. #include <linux/slab.h>
  19. #include <linux/soc/ti/ti-msgmgr.h>
  20. #include <linux/soc/ti/ti_sci_protocol.h>
  21. #include <linux/reboot.h>
  22. #include "ti_sci.h"
  23. /* List of all TI SCI devices active in system */
  24. static LIST_HEAD(ti_sci_list);
  25. /* Protection for the entire list */
  26. static DEFINE_MUTEX(ti_sci_list_mutex);
  27. /**
  28. * struct ti_sci_xfer - Structure representing a message flow
  29. * @tx_message: Transmit message
  30. * @rx_len: Receive message length
  31. * @xfer_buf: Preallocated buffer to store receive message
  32. * Since we work with request-ACK protocol, we can
  33. * reuse the same buffer for the rx path as we
  34. * use for the tx path.
  35. * @done: completion event
  36. */
  37. struct ti_sci_xfer {
  38. struct ti_msgmgr_message tx_message;
  39. u8 rx_len;
  40. u8 *xfer_buf;
  41. struct completion done;
  42. };
  43. /**
  44. * struct ti_sci_xfers_info - Structure to manage transfer information
  45. * @sem_xfer_count: Counting Semaphore for managing max simultaneous
  46. * Messages.
  47. * @xfer_block: Preallocated Message array
  48. * @xfer_alloc_table: Bitmap table for allocated messages.
  49. * Index of this bitmap table is also used for message
  50. * sequence identifier.
  51. * @xfer_lock: Protection for message allocation
  52. */
  53. struct ti_sci_xfers_info {
  54. struct semaphore sem_xfer_count;
  55. struct ti_sci_xfer *xfer_block;
  56. unsigned long *xfer_alloc_table;
  57. /* protect transfer allocation */
  58. spinlock_t xfer_lock;
  59. };
  60. /**
  61. * struct ti_sci_desc - Description of SoC integration
  62. * @host_id: Host identifier representing the compute entity
  63. * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
  64. * @max_msgs: Maximum number of messages that can be pending
  65. * simultaneously in the system
  66. * @max_msg_size: Maximum size of data per message that can be handled.
  67. */
  68. struct ti_sci_desc {
  69. u8 host_id;
  70. int max_rx_timeout_ms;
  71. int max_msgs;
  72. int max_msg_size;
  73. };
  74. /**
  75. * struct ti_sci_info - Structure representing a TI SCI instance
  76. * @dev: Device pointer
  77. * @desc: SoC description for this instance
  78. * @nb: Reboot Notifier block
  79. * @d: Debugfs file entry
  80. * @debug_region: Memory region where the debug message are available
  81. * @debug_region_size: Debug region size
  82. * @debug_buffer: Buffer allocated to copy debug messages.
  83. * @handle: Instance of TI SCI handle to send to clients.
  84. * @cl: Mailbox Client
  85. * @chan_tx: Transmit mailbox channel
  86. * @chan_rx: Receive mailbox channel
  87. * @minfo: Message info
  88. * @node: list head
  89. * @users: Number of users of this instance
  90. */
  91. struct ti_sci_info {
  92. struct device *dev;
  93. struct notifier_block nb;
  94. const struct ti_sci_desc *desc;
  95. struct dentry *d;
  96. void __iomem *debug_region;
  97. char *debug_buffer;
  98. size_t debug_region_size;
  99. struct ti_sci_handle handle;
  100. struct mbox_client cl;
  101. struct mbox_chan *chan_tx;
  102. struct mbox_chan *chan_rx;
  103. struct ti_sci_xfers_info minfo;
  104. struct list_head node;
  105. /* protected by ti_sci_list_mutex */
  106. int users;
  107. };
  108. #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
  109. #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
  110. #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
  111. #ifdef CONFIG_DEBUG_FS
  112. /**
  113. * ti_sci_debug_show() - Helper to dump the debug log
  114. * @s: sequence file pointer
  115. * @unused: unused.
  116. *
  117. * Return: 0
  118. */
  119. static int ti_sci_debug_show(struct seq_file *s, void *unused)
  120. {
  121. struct ti_sci_info *info = s->private;
  122. memcpy_fromio(info->debug_buffer, info->debug_region,
  123. info->debug_region_size);
  124. /*
  125. * We don't trust firmware to leave NULL terminated last byte (hence
  126. * we have allocated 1 extra 0 byte). Since we cannot guarantee any
  127. * specific data format for debug messages, We just present the data
  128. * in the buffer as is - we expect the messages to be self explanatory.
  129. */
  130. seq_puts(s, info->debug_buffer);
  131. return 0;
  132. }
  133. /**
  134. * ti_sci_debug_open() - debug file open
  135. * @inode: inode pointer
  136. * @file: file pointer
  137. *
  138. * Return: result of single_open
  139. */
  140. static int ti_sci_debug_open(struct inode *inode, struct file *file)
  141. {
  142. return single_open(file, ti_sci_debug_show, inode->i_private);
  143. }
  144. /* log file operations */
  145. static const struct file_operations ti_sci_debug_fops = {
  146. .open = ti_sci_debug_open,
  147. .read = seq_read,
  148. .llseek = seq_lseek,
  149. .release = single_release,
  150. };
  151. /**
  152. * ti_sci_debugfs_create() - Create log debug file
  153. * @pdev: platform device pointer
  154. * @info: Pointer to SCI entity information
  155. *
  156. * Return: 0 if all went fine, else corresponding error.
  157. */
  158. static int ti_sci_debugfs_create(struct platform_device *pdev,
  159. struct ti_sci_info *info)
  160. {
  161. struct device *dev = &pdev->dev;
  162. struct resource *res;
  163. char debug_name[50] = "ti_sci_debug@";
  164. /* Debug region is optional */
  165. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  166. "debug_messages");
  167. info->debug_region = devm_ioremap_resource(dev, res);
  168. if (IS_ERR(info->debug_region))
  169. return 0;
  170. info->debug_region_size = resource_size(res);
  171. info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
  172. sizeof(char), GFP_KERNEL);
  173. if (!info->debug_buffer)
  174. return -ENOMEM;
  175. /* Setup NULL termination */
  176. info->debug_buffer[info->debug_region_size] = 0;
  177. info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
  178. sizeof(debug_name) -
  179. sizeof("ti_sci_debug@")),
  180. 0444, NULL, info, &ti_sci_debug_fops);
  181. if (IS_ERR(info->d))
  182. return PTR_ERR(info->d);
  183. dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
  184. info->debug_region, info->debug_region_size, res);
  185. return 0;
  186. }
  187. /**
  188. * ti_sci_debugfs_destroy() - clean up log debug file
  189. * @pdev: platform device pointer
  190. * @info: Pointer to SCI entity information
  191. */
  192. static void ti_sci_debugfs_destroy(struct platform_device *pdev,
  193. struct ti_sci_info *info)
  194. {
  195. if (IS_ERR(info->debug_region))
  196. return;
  197. debugfs_remove(info->d);
  198. }
  199. #else /* CONFIG_DEBUG_FS */
  200. static inline int ti_sci_debugfs_create(struct platform_device *dev,
  201. struct ti_sci_info *info)
  202. {
  203. return 0;
  204. }
  205. static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
  206. struct ti_sci_info *info)
  207. {
  208. }
  209. #endif /* CONFIG_DEBUG_FS */
  210. /**
  211. * ti_sci_dump_header_dbg() - Helper to dump a message header.
  212. * @dev: Device pointer corresponding to the SCI entity
  213. * @hdr: pointer to header.
  214. */
  215. static inline void ti_sci_dump_header_dbg(struct device *dev,
  216. struct ti_sci_msg_hdr *hdr)
  217. {
  218. dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
  219. hdr->type, hdr->host, hdr->seq, hdr->flags);
  220. }
  221. /**
  222. * ti_sci_rx_callback() - mailbox client callback for receive messages
  223. * @cl: client pointer
  224. * @m: mailbox message
  225. *
  226. * Processes one received message to appropriate transfer information and
  227. * signals completion of the transfer.
  228. *
  229. * NOTE: This function will be invoked in IRQ context, hence should be
  230. * as optimal as possible.
  231. */
  232. static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
  233. {
  234. struct ti_sci_info *info = cl_to_ti_sci_info(cl);
  235. struct device *dev = info->dev;
  236. struct ti_sci_xfers_info *minfo = &info->minfo;
  237. struct ti_msgmgr_message *mbox_msg = m;
  238. struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
  239. struct ti_sci_xfer *xfer;
  240. u8 xfer_id;
  241. xfer_id = hdr->seq;
  242. /*
  243. * Are we even expecting this?
  244. * NOTE: barriers were implicit in locks used for modifying the bitmap
  245. */
  246. if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
  247. dev_err(dev, "Message for %d is not expected!\n", xfer_id);
  248. return;
  249. }
  250. xfer = &minfo->xfer_block[xfer_id];
  251. /* Is the message of valid length? */
  252. if (mbox_msg->len > info->desc->max_msg_size) {
  253. dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
  254. mbox_msg->len, info->desc->max_msg_size);
  255. ti_sci_dump_header_dbg(dev, hdr);
  256. return;
  257. }
  258. if (mbox_msg->len < xfer->rx_len) {
  259. dev_err(dev, "Recv xfer %zu < expected %d length\n",
  260. mbox_msg->len, xfer->rx_len);
  261. ti_sci_dump_header_dbg(dev, hdr);
  262. return;
  263. }
  264. ti_sci_dump_header_dbg(dev, hdr);
  265. /* Take a copy to the rx buffer.. */
  266. memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
  267. complete(&xfer->done);
  268. }
  269. /**
  270. * ti_sci_get_one_xfer() - Allocate one message
  271. * @info: Pointer to SCI entity information
  272. * @msg_type: Message type
  273. * @msg_flags: Flag to set for the message
  274. * @tx_message_size: transmit message size
  275. * @rx_message_size: receive message size
  276. *
  277. * Helper function which is used by various command functions that are
  278. * exposed to clients of this driver for allocating a message traffic event.
  279. *
  280. * This function can sleep depending on pending requests already in the system
  281. * for the SCI entity. Further, this also holds a spinlock to maintain integrity
  282. * of internal data structures.
  283. *
  284. * Return: 0 if all went fine, else corresponding error.
  285. */
  286. static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
  287. u16 msg_type, u32 msg_flags,
  288. size_t tx_message_size,
  289. size_t rx_message_size)
  290. {
  291. struct ti_sci_xfers_info *minfo = &info->minfo;
  292. struct ti_sci_xfer *xfer;
  293. struct ti_sci_msg_hdr *hdr;
  294. unsigned long flags;
  295. unsigned long bit_pos;
  296. u8 xfer_id;
  297. int ret;
  298. int timeout;
  299. /* Ensure we have sane transfer sizes */
  300. if (rx_message_size > info->desc->max_msg_size ||
  301. tx_message_size > info->desc->max_msg_size ||
  302. rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
  303. return ERR_PTR(-ERANGE);
  304. /*
  305. * Ensure we have only controlled number of pending messages.
  306. * Ideally, we might just have to wait a single message, be
  307. * conservative and wait 5 times that..
  308. */
  309. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
  310. ret = down_timeout(&minfo->sem_xfer_count, timeout);
  311. if (ret < 0)
  312. return ERR_PTR(ret);
  313. /* Keep the locked section as small as possible */
  314. spin_lock_irqsave(&minfo->xfer_lock, flags);
  315. bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
  316. info->desc->max_msgs);
  317. set_bit(bit_pos, minfo->xfer_alloc_table);
  318. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  319. /*
  320. * We already ensured in probe that we can have max messages that can
  321. * fit in hdr.seq - NOTE: this improves access latencies
  322. * to predictable O(1) access, BUT, it opens us to risk if
  323. * remote misbehaves with corrupted message sequence responses.
  324. * If that happens, we are going to be messed up anyways..
  325. */
  326. xfer_id = (u8)bit_pos;
  327. xfer = &minfo->xfer_block[xfer_id];
  328. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  329. xfer->tx_message.len = tx_message_size;
  330. xfer->rx_len = (u8)rx_message_size;
  331. reinit_completion(&xfer->done);
  332. hdr->seq = xfer_id;
  333. hdr->type = msg_type;
  334. hdr->host = info->desc->host_id;
  335. hdr->flags = msg_flags;
  336. return xfer;
  337. }
  338. /**
  339. * ti_sci_put_one_xfer() - Release a message
  340. * @minfo: transfer info pointer
  341. * @xfer: message that was reserved by ti_sci_get_one_xfer
  342. *
  343. * This holds a spinlock to maintain integrity of internal data structures.
  344. */
  345. static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
  346. struct ti_sci_xfer *xfer)
  347. {
  348. unsigned long flags;
  349. struct ti_sci_msg_hdr *hdr;
  350. u8 xfer_id;
  351. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  352. xfer_id = hdr->seq;
  353. /*
  354. * Keep the locked section as small as possible
  355. * NOTE: we might escape with smp_mb and no lock here..
  356. * but just be conservative and symmetric.
  357. */
  358. spin_lock_irqsave(&minfo->xfer_lock, flags);
  359. clear_bit(xfer_id, minfo->xfer_alloc_table);
  360. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  361. /* Increment the count for the next user to get through */
  362. up(&minfo->sem_xfer_count);
  363. }
  364. /**
  365. * ti_sci_do_xfer() - Do one transfer
  366. * @info: Pointer to SCI entity information
  367. * @xfer: Transfer to initiate and wait for response
  368. *
  369. * Return: -ETIMEDOUT in case of no response, if transmit error,
  370. * return corresponding error, else if all goes well,
  371. * return 0.
  372. */
  373. static inline int ti_sci_do_xfer(struct ti_sci_info *info,
  374. struct ti_sci_xfer *xfer)
  375. {
  376. int ret;
  377. int timeout;
  378. struct device *dev = info->dev;
  379. ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
  380. if (ret < 0)
  381. return ret;
  382. ret = 0;
  383. /* And we wait for the response. */
  384. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
  385. if (!wait_for_completion_timeout(&xfer->done, timeout)) {
  386. dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
  387. (void *)_RET_IP_);
  388. ret = -ETIMEDOUT;
  389. }
  390. /*
  391. * NOTE: we might prefer not to need the mailbox ticker to manage the
  392. * transfer queueing since the protocol layer queues things by itself.
  393. * Unfortunately, we have to kick the mailbox framework after we have
  394. * received our message.
  395. */
  396. mbox_client_txdone(info->chan_tx, ret);
  397. return ret;
  398. }
  399. /**
  400. * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
  401. * @info: Pointer to SCI entity information
  402. *
  403. * Updates the SCI information in the internal data structure.
  404. *
  405. * Return: 0 if all went fine, else return appropriate error.
  406. */
  407. static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
  408. {
  409. struct device *dev = info->dev;
  410. struct ti_sci_handle *handle = &info->handle;
  411. struct ti_sci_version_info *ver = &handle->version;
  412. struct ti_sci_msg_resp_version *rev_info;
  413. struct ti_sci_xfer *xfer;
  414. int ret;
  415. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
  416. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  417. sizeof(struct ti_sci_msg_hdr),
  418. sizeof(*rev_info));
  419. if (IS_ERR(xfer)) {
  420. ret = PTR_ERR(xfer);
  421. dev_err(dev, "Message alloc failed(%d)\n", ret);
  422. return ret;
  423. }
  424. rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
  425. ret = ti_sci_do_xfer(info, xfer);
  426. if (ret) {
  427. dev_err(dev, "Mbox send fail %d\n", ret);
  428. goto fail;
  429. }
  430. ver->abi_major = rev_info->abi_major;
  431. ver->abi_minor = rev_info->abi_minor;
  432. ver->firmware_revision = rev_info->firmware_revision;
  433. strncpy(ver->firmware_description, rev_info->firmware_description,
  434. sizeof(ver->firmware_description));
  435. fail:
  436. ti_sci_put_one_xfer(&info->minfo, xfer);
  437. return ret;
  438. }
  439. /**
  440. * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
  441. * @r: pointer to response buffer
  442. *
  443. * Return: true if the response was an ACK, else returns false.
  444. */
  445. static inline bool ti_sci_is_response_ack(void *r)
  446. {
  447. struct ti_sci_msg_hdr *hdr = r;
  448. return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
  449. }
  450. /**
  451. * ti_sci_set_device_state() - Set device state helper
  452. * @handle: pointer to TI SCI handle
  453. * @id: Device identifier
  454. * @flags: flags to setup for the device
  455. * @state: State to move the device to
  456. *
  457. * Return: 0 if all went well, else returns appropriate error value.
  458. */
  459. static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
  460. u32 id, u32 flags, u8 state)
  461. {
  462. struct ti_sci_info *info;
  463. struct ti_sci_msg_req_set_device_state *req;
  464. struct ti_sci_msg_hdr *resp;
  465. struct ti_sci_xfer *xfer;
  466. struct device *dev;
  467. int ret = 0;
  468. if (IS_ERR(handle))
  469. return PTR_ERR(handle);
  470. if (!handle)
  471. return -EINVAL;
  472. info = handle_to_ti_sci_info(handle);
  473. dev = info->dev;
  474. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
  475. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  476. sizeof(*req), sizeof(*resp));
  477. if (IS_ERR(xfer)) {
  478. ret = PTR_ERR(xfer);
  479. dev_err(dev, "Message alloc failed(%d)\n", ret);
  480. return ret;
  481. }
  482. req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
  483. req->id = id;
  484. req->state = state;
  485. ret = ti_sci_do_xfer(info, xfer);
  486. if (ret) {
  487. dev_err(dev, "Mbox send fail %d\n", ret);
  488. goto fail;
  489. }
  490. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  491. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  492. fail:
  493. ti_sci_put_one_xfer(&info->minfo, xfer);
  494. return ret;
  495. }
  496. /**
  497. * ti_sci_get_device_state() - Get device state helper
  498. * @handle: Handle to the device
  499. * @id: Device Identifier
  500. * @clcnt: Pointer to Context Loss Count
  501. * @resets: pointer to resets
  502. * @p_state: pointer to p_state
  503. * @c_state: pointer to c_state
  504. *
  505. * Return: 0 if all went fine, else return appropriate error.
  506. */
  507. static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
  508. u32 id, u32 *clcnt, u32 *resets,
  509. u8 *p_state, u8 *c_state)
  510. {
  511. struct ti_sci_info *info;
  512. struct ti_sci_msg_req_get_device_state *req;
  513. struct ti_sci_msg_resp_get_device_state *resp;
  514. struct ti_sci_xfer *xfer;
  515. struct device *dev;
  516. int ret = 0;
  517. if (IS_ERR(handle))
  518. return PTR_ERR(handle);
  519. if (!handle)
  520. return -EINVAL;
  521. if (!clcnt && !resets && !p_state && !c_state)
  522. return -EINVAL;
  523. info = handle_to_ti_sci_info(handle);
  524. dev = info->dev;
  525. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
  526. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  527. sizeof(*req), sizeof(*resp));
  528. if (IS_ERR(xfer)) {
  529. ret = PTR_ERR(xfer);
  530. dev_err(dev, "Message alloc failed(%d)\n", ret);
  531. return ret;
  532. }
  533. req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
  534. req->id = id;
  535. ret = ti_sci_do_xfer(info, xfer);
  536. if (ret) {
  537. dev_err(dev, "Mbox send fail %d\n", ret);
  538. goto fail;
  539. }
  540. resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
  541. if (!ti_sci_is_response_ack(resp)) {
  542. ret = -ENODEV;
  543. goto fail;
  544. }
  545. if (clcnt)
  546. *clcnt = resp->context_loss_count;
  547. if (resets)
  548. *resets = resp->resets;
  549. if (p_state)
  550. *p_state = resp->programmed_state;
  551. if (c_state)
  552. *c_state = resp->current_state;
  553. fail:
  554. ti_sci_put_one_xfer(&info->minfo, xfer);
  555. return ret;
  556. }
  557. /**
  558. * ti_sci_cmd_get_device() - command to request for device managed by TISCI
  559. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  560. * @id: Device Identifier
  561. *
  562. * Request for the device - NOTE: the client MUST maintain integrity of
  563. * usage count by balancing get_device with put_device. No refcounting is
  564. * managed by driver for that purpose.
  565. *
  566. * NOTE: The request is for exclusive access for the processor.
  567. *
  568. * Return: 0 if all went fine, else return appropriate error.
  569. */
  570. static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
  571. {
  572. return ti_sci_set_device_state(handle, id,
  573. MSG_FLAG_DEVICE_EXCLUSIVE,
  574. MSG_DEVICE_SW_STATE_ON);
  575. }
  576. /**
  577. * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
  578. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  579. * @id: Device Identifier
  580. *
  581. * Request for the device - NOTE: the client MUST maintain integrity of
  582. * usage count by balancing get_device with put_device. No refcounting is
  583. * managed by driver for that purpose.
  584. *
  585. * Return: 0 if all went fine, else return appropriate error.
  586. */
  587. static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
  588. {
  589. return ti_sci_set_device_state(handle, id,
  590. MSG_FLAG_DEVICE_EXCLUSIVE,
  591. MSG_DEVICE_SW_STATE_RETENTION);
  592. }
  593. /**
  594. * ti_sci_cmd_put_device() - command to release a device managed by TISCI
  595. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  596. * @id: Device Identifier
  597. *
  598. * Request for the device - NOTE: the client MUST maintain integrity of
  599. * usage count by balancing get_device with put_device. No refcounting is
  600. * managed by driver for that purpose.
  601. *
  602. * Return: 0 if all went fine, else return appropriate error.
  603. */
  604. static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
  605. {
  606. return ti_sci_set_device_state(handle, id,
  607. 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
  608. }
  609. /**
  610. * ti_sci_cmd_dev_is_valid() - Is the device valid
  611. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  612. * @id: Device Identifier
  613. *
  614. * Return: 0 if all went fine and the device ID is valid, else return
  615. * appropriate error.
  616. */
  617. static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
  618. {
  619. u8 unused;
  620. /* check the device state which will also tell us if the ID is valid */
  621. return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
  622. }
  623. /**
  624. * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
  625. * @handle: Pointer to TISCI handle
  626. * @id: Device Identifier
  627. * @count: Pointer to Context Loss counter to populate
  628. *
  629. * Return: 0 if all went fine, else return appropriate error.
  630. */
  631. static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
  632. u32 *count)
  633. {
  634. return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
  635. }
  636. /**
  637. * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
  638. * @handle: Pointer to TISCI handle
  639. * @id: Device Identifier
  640. * @r_state: true if requested to be idle
  641. *
  642. * Return: 0 if all went fine, else return appropriate error.
  643. */
  644. static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
  645. bool *r_state)
  646. {
  647. int ret;
  648. u8 state;
  649. if (!r_state)
  650. return -EINVAL;
  651. ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
  652. if (ret)
  653. return ret;
  654. *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
  655. return 0;
  656. }
  657. /**
  658. * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
  659. * @handle: Pointer to TISCI handle
  660. * @id: Device Identifier
  661. * @r_state: true if requested to be stopped
  662. * @curr_state: true if currently stopped.
  663. *
  664. * Return: 0 if all went fine, else return appropriate error.
  665. */
  666. static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
  667. bool *r_state, bool *curr_state)
  668. {
  669. int ret;
  670. u8 p_state, c_state;
  671. if (!r_state && !curr_state)
  672. return -EINVAL;
  673. ret =
  674. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  675. if (ret)
  676. return ret;
  677. if (r_state)
  678. *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
  679. if (curr_state)
  680. *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
  681. return 0;
  682. }
  683. /**
  684. * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
  685. * @handle: Pointer to TISCI handle
  686. * @id: Device Identifier
  687. * @r_state: true if requested to be ON
  688. * @curr_state: true if currently ON and active
  689. *
  690. * Return: 0 if all went fine, else return appropriate error.
  691. */
  692. static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
  693. bool *r_state, bool *curr_state)
  694. {
  695. int ret;
  696. u8 p_state, c_state;
  697. if (!r_state && !curr_state)
  698. return -EINVAL;
  699. ret =
  700. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  701. if (ret)
  702. return ret;
  703. if (r_state)
  704. *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
  705. if (curr_state)
  706. *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
  707. return 0;
  708. }
  709. /**
  710. * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
  711. * @handle: Pointer to TISCI handle
  712. * @id: Device Identifier
  713. * @curr_state: true if currently transitioning.
  714. *
  715. * Return: 0 if all went fine, else return appropriate error.
  716. */
  717. static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
  718. bool *curr_state)
  719. {
  720. int ret;
  721. u8 state;
  722. if (!curr_state)
  723. return -EINVAL;
  724. ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
  725. if (ret)
  726. return ret;
  727. *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
  728. return 0;
  729. }
  730. /**
  731. * ti_sci_cmd_set_device_resets() - command to set resets for device managed
  732. * by TISCI
  733. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  734. * @id: Device Identifier
  735. * @reset_state: Device specific reset bit field
  736. *
  737. * Return: 0 if all went fine, else return appropriate error.
  738. */
  739. static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
  740. u32 id, u32 reset_state)
  741. {
  742. struct ti_sci_info *info;
  743. struct ti_sci_msg_req_set_device_resets *req;
  744. struct ti_sci_msg_hdr *resp;
  745. struct ti_sci_xfer *xfer;
  746. struct device *dev;
  747. int ret = 0;
  748. if (IS_ERR(handle))
  749. return PTR_ERR(handle);
  750. if (!handle)
  751. return -EINVAL;
  752. info = handle_to_ti_sci_info(handle);
  753. dev = info->dev;
  754. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
  755. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  756. sizeof(*req), sizeof(*resp));
  757. if (IS_ERR(xfer)) {
  758. ret = PTR_ERR(xfer);
  759. dev_err(dev, "Message alloc failed(%d)\n", ret);
  760. return ret;
  761. }
  762. req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
  763. req->id = id;
  764. req->resets = reset_state;
  765. ret = ti_sci_do_xfer(info, xfer);
  766. if (ret) {
  767. dev_err(dev, "Mbox send fail %d\n", ret);
  768. goto fail;
  769. }
  770. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  771. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  772. fail:
  773. ti_sci_put_one_xfer(&info->minfo, xfer);
  774. return ret;
  775. }
  776. /**
  777. * ti_sci_cmd_get_device_resets() - Get reset state for device managed
  778. * by TISCI
  779. * @handle: Pointer to TISCI handle
  780. * @id: Device Identifier
  781. * @reset_state: Pointer to reset state to populate
  782. *
  783. * Return: 0 if all went fine, else return appropriate error.
  784. */
  785. static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
  786. u32 id, u32 *reset_state)
  787. {
  788. return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
  789. NULL);
  790. }
  791. /**
  792. * ti_sci_set_clock_state() - Set clock state helper
  793. * @handle: pointer to TI SCI handle
  794. * @dev_id: Device identifier this request is for
  795. * @clk_id: Clock identifier for the device for this request.
  796. * Each device has it's own set of clock inputs. This indexes
  797. * which clock input to modify.
  798. * @flags: Header flags as needed
  799. * @state: State to request for the clock.
  800. *
  801. * Return: 0 if all went well, else returns appropriate error value.
  802. */
  803. static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
  804. u32 dev_id, u8 clk_id,
  805. u32 flags, u8 state)
  806. {
  807. struct ti_sci_info *info;
  808. struct ti_sci_msg_req_set_clock_state *req;
  809. struct ti_sci_msg_hdr *resp;
  810. struct ti_sci_xfer *xfer;
  811. struct device *dev;
  812. int ret = 0;
  813. if (IS_ERR(handle))
  814. return PTR_ERR(handle);
  815. if (!handle)
  816. return -EINVAL;
  817. info = handle_to_ti_sci_info(handle);
  818. dev = info->dev;
  819. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
  820. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  821. sizeof(*req), sizeof(*resp));
  822. if (IS_ERR(xfer)) {
  823. ret = PTR_ERR(xfer);
  824. dev_err(dev, "Message alloc failed(%d)\n", ret);
  825. return ret;
  826. }
  827. req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
  828. req->dev_id = dev_id;
  829. req->clk_id = clk_id;
  830. req->request_state = state;
  831. ret = ti_sci_do_xfer(info, xfer);
  832. if (ret) {
  833. dev_err(dev, "Mbox send fail %d\n", ret);
  834. goto fail;
  835. }
  836. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  837. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  838. fail:
  839. ti_sci_put_one_xfer(&info->minfo, xfer);
  840. return ret;
  841. }
  842. /**
  843. * ti_sci_cmd_get_clock_state() - Get clock state helper
  844. * @handle: pointer to TI SCI handle
  845. * @dev_id: Device identifier this request is for
  846. * @clk_id: Clock identifier for the device for this request.
  847. * Each device has it's own set of clock inputs. This indexes
  848. * which clock input to modify.
  849. * @programmed_state: State requested for clock to move to
  850. * @current_state: State that the clock is currently in
  851. *
  852. * Return: 0 if all went well, else returns appropriate error value.
  853. */
  854. static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
  855. u32 dev_id, u8 clk_id,
  856. u8 *programmed_state, u8 *current_state)
  857. {
  858. struct ti_sci_info *info;
  859. struct ti_sci_msg_req_get_clock_state *req;
  860. struct ti_sci_msg_resp_get_clock_state *resp;
  861. struct ti_sci_xfer *xfer;
  862. struct device *dev;
  863. int ret = 0;
  864. if (IS_ERR(handle))
  865. return PTR_ERR(handle);
  866. if (!handle)
  867. return -EINVAL;
  868. if (!programmed_state && !current_state)
  869. return -EINVAL;
  870. info = handle_to_ti_sci_info(handle);
  871. dev = info->dev;
  872. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
  873. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  874. sizeof(*req), sizeof(*resp));
  875. if (IS_ERR(xfer)) {
  876. ret = PTR_ERR(xfer);
  877. dev_err(dev, "Message alloc failed(%d)\n", ret);
  878. return ret;
  879. }
  880. req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
  881. req->dev_id = dev_id;
  882. req->clk_id = clk_id;
  883. ret = ti_sci_do_xfer(info, xfer);
  884. if (ret) {
  885. dev_err(dev, "Mbox send fail %d\n", ret);
  886. goto fail;
  887. }
  888. resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
  889. if (!ti_sci_is_response_ack(resp)) {
  890. ret = -ENODEV;
  891. goto fail;
  892. }
  893. if (programmed_state)
  894. *programmed_state = resp->programmed_state;
  895. if (current_state)
  896. *current_state = resp->current_state;
  897. fail:
  898. ti_sci_put_one_xfer(&info->minfo, xfer);
  899. return ret;
  900. }
  901. /**
  902. * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
  903. * @handle: pointer to TI SCI handle
  904. * @dev_id: Device identifier this request is for
  905. * @clk_id: Clock identifier for the device for this request.
  906. * Each device has it's own set of clock inputs. This indexes
  907. * which clock input to modify.
  908. * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
  909. * @can_change_freq: 'true' if frequency change is desired, else 'false'
  910. * @enable_input_term: 'true' if input termination is desired, else 'false'
  911. *
  912. * Return: 0 if all went well, else returns appropriate error value.
  913. */
  914. static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
  915. u8 clk_id, bool needs_ssc, bool can_change_freq,
  916. bool enable_input_term)
  917. {
  918. u32 flags = 0;
  919. flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
  920. flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
  921. flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
  922. return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
  923. MSG_CLOCK_SW_STATE_REQ);
  924. }
  925. /**
  926. * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
  927. * @handle: pointer to TI SCI handle
  928. * @dev_id: Device identifier this request is for
  929. * @clk_id: Clock identifier for the device for this request.
  930. * Each device has it's own set of clock inputs. This indexes
  931. * which clock input to modify.
  932. *
  933. * NOTE: This clock must have been requested by get_clock previously.
  934. *
  935. * Return: 0 if all went well, else returns appropriate error value.
  936. */
  937. static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
  938. u32 dev_id, u8 clk_id)
  939. {
  940. return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
  941. MSG_CLOCK_SW_STATE_UNREQ);
  942. }
  943. /**
  944. * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
  945. * @handle: pointer to TI SCI handle
  946. * @dev_id: Device identifier this request is for
  947. * @clk_id: Clock identifier for the device for this request.
  948. * Each device has it's own set of clock inputs. This indexes
  949. * which clock input to modify.
  950. *
  951. * NOTE: This clock must have been requested by get_clock previously.
  952. *
  953. * Return: 0 if all went well, else returns appropriate error value.
  954. */
  955. static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
  956. u32 dev_id, u8 clk_id)
  957. {
  958. return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
  959. MSG_CLOCK_SW_STATE_AUTO);
  960. }
  961. /**
  962. * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
  963. * @handle: pointer to TI SCI handle
  964. * @dev_id: Device identifier this request is for
  965. * @clk_id: Clock identifier for the device for this request.
  966. * Each device has it's own set of clock inputs. This indexes
  967. * which clock input to modify.
  968. * @req_state: state indicating if the clock is auto managed
  969. *
  970. * Return: 0 if all went well, else returns appropriate error value.
  971. */
  972. static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
  973. u32 dev_id, u8 clk_id, bool *req_state)
  974. {
  975. u8 state = 0;
  976. int ret;
  977. if (!req_state)
  978. return -EINVAL;
  979. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
  980. if (ret)
  981. return ret;
  982. *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
  983. return 0;
  984. }
  985. /**
  986. * ti_sci_cmd_clk_is_on() - Is the clock ON
  987. * @handle: pointer to TI SCI handle
  988. * @dev_id: Device identifier this request is for
  989. * @clk_id: Clock identifier for the device for this request.
  990. * Each device has it's own set of clock inputs. This indexes
  991. * which clock input to modify.
  992. * @req_state: state indicating if the clock is managed by us and enabled
  993. * @curr_state: state indicating if the clock is ready for operation
  994. *
  995. * Return: 0 if all went well, else returns appropriate error value.
  996. */
  997. static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
  998. u8 clk_id, bool *req_state, bool *curr_state)
  999. {
  1000. u8 c_state = 0, r_state = 0;
  1001. int ret;
  1002. if (!req_state && !curr_state)
  1003. return -EINVAL;
  1004. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1005. &r_state, &c_state);
  1006. if (ret)
  1007. return ret;
  1008. if (req_state)
  1009. *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
  1010. if (curr_state)
  1011. *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
  1012. return 0;
  1013. }
  1014. /**
  1015. * ti_sci_cmd_clk_is_off() - Is the clock OFF
  1016. * @handle: pointer to TI SCI handle
  1017. * @dev_id: Device identifier this request is for
  1018. * @clk_id: Clock identifier for the device for this request.
  1019. * Each device has it's own set of clock inputs. This indexes
  1020. * which clock input to modify.
  1021. * @req_state: state indicating if the clock is managed by us and disabled
  1022. * @curr_state: state indicating if the clock is NOT ready for operation
  1023. *
  1024. * Return: 0 if all went well, else returns appropriate error value.
  1025. */
  1026. static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
  1027. u8 clk_id, bool *req_state, bool *curr_state)
  1028. {
  1029. u8 c_state = 0, r_state = 0;
  1030. int ret;
  1031. if (!req_state && !curr_state)
  1032. return -EINVAL;
  1033. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1034. &r_state, &c_state);
  1035. if (ret)
  1036. return ret;
  1037. if (req_state)
  1038. *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
  1039. if (curr_state)
  1040. *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
  1041. return 0;
  1042. }
  1043. /**
  1044. * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
  1045. * @handle: pointer to TI SCI handle
  1046. * @dev_id: Device identifier this request is for
  1047. * @clk_id: Clock identifier for the device for this request.
  1048. * Each device has it's own set of clock inputs. This indexes
  1049. * which clock input to modify.
  1050. * @parent_id: Parent clock identifier to set
  1051. *
  1052. * Return: 0 if all went well, else returns appropriate error value.
  1053. */
  1054. static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
  1055. u32 dev_id, u8 clk_id, u8 parent_id)
  1056. {
  1057. struct ti_sci_info *info;
  1058. struct ti_sci_msg_req_set_clock_parent *req;
  1059. struct ti_sci_msg_hdr *resp;
  1060. struct ti_sci_xfer *xfer;
  1061. struct device *dev;
  1062. int ret = 0;
  1063. if (IS_ERR(handle))
  1064. return PTR_ERR(handle);
  1065. if (!handle)
  1066. return -EINVAL;
  1067. info = handle_to_ti_sci_info(handle);
  1068. dev = info->dev;
  1069. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
  1070. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1071. sizeof(*req), sizeof(*resp));
  1072. if (IS_ERR(xfer)) {
  1073. ret = PTR_ERR(xfer);
  1074. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1075. return ret;
  1076. }
  1077. req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
  1078. req->dev_id = dev_id;
  1079. req->clk_id = clk_id;
  1080. req->parent_id = parent_id;
  1081. ret = ti_sci_do_xfer(info, xfer);
  1082. if (ret) {
  1083. dev_err(dev, "Mbox send fail %d\n", ret);
  1084. goto fail;
  1085. }
  1086. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1087. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1088. fail:
  1089. ti_sci_put_one_xfer(&info->minfo, xfer);
  1090. return ret;
  1091. }
  1092. /**
  1093. * ti_sci_cmd_clk_get_parent() - Get current parent clock source
  1094. * @handle: pointer to TI SCI handle
  1095. * @dev_id: Device identifier this request is for
  1096. * @clk_id: Clock identifier for the device for this request.
  1097. * Each device has it's own set of clock inputs. This indexes
  1098. * which clock input to modify.
  1099. * @parent_id: Current clock parent
  1100. *
  1101. * Return: 0 if all went well, else returns appropriate error value.
  1102. */
  1103. static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
  1104. u32 dev_id, u8 clk_id, u8 *parent_id)
  1105. {
  1106. struct ti_sci_info *info;
  1107. struct ti_sci_msg_req_get_clock_parent *req;
  1108. struct ti_sci_msg_resp_get_clock_parent *resp;
  1109. struct ti_sci_xfer *xfer;
  1110. struct device *dev;
  1111. int ret = 0;
  1112. if (IS_ERR(handle))
  1113. return PTR_ERR(handle);
  1114. if (!handle || !parent_id)
  1115. return -EINVAL;
  1116. info = handle_to_ti_sci_info(handle);
  1117. dev = info->dev;
  1118. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
  1119. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1120. sizeof(*req), sizeof(*resp));
  1121. if (IS_ERR(xfer)) {
  1122. ret = PTR_ERR(xfer);
  1123. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1124. return ret;
  1125. }
  1126. req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
  1127. req->dev_id = dev_id;
  1128. req->clk_id = clk_id;
  1129. ret = ti_sci_do_xfer(info, xfer);
  1130. if (ret) {
  1131. dev_err(dev, "Mbox send fail %d\n", ret);
  1132. goto fail;
  1133. }
  1134. resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
  1135. if (!ti_sci_is_response_ack(resp))
  1136. ret = -ENODEV;
  1137. else
  1138. *parent_id = resp->parent_id;
  1139. fail:
  1140. ti_sci_put_one_xfer(&info->minfo, xfer);
  1141. return ret;
  1142. }
  1143. /**
  1144. * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
  1145. * @handle: pointer to TI SCI handle
  1146. * @dev_id: Device identifier this request is for
  1147. * @clk_id: Clock identifier for the device for this request.
  1148. * Each device has it's own set of clock inputs. This indexes
  1149. * which clock input to modify.
  1150. * @num_parents: Returns he number of parents to the current clock.
  1151. *
  1152. * Return: 0 if all went well, else returns appropriate error value.
  1153. */
  1154. static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
  1155. u32 dev_id, u8 clk_id,
  1156. u8 *num_parents)
  1157. {
  1158. struct ti_sci_info *info;
  1159. struct ti_sci_msg_req_get_clock_num_parents *req;
  1160. struct ti_sci_msg_resp_get_clock_num_parents *resp;
  1161. struct ti_sci_xfer *xfer;
  1162. struct device *dev;
  1163. int ret = 0;
  1164. if (IS_ERR(handle))
  1165. return PTR_ERR(handle);
  1166. if (!handle || !num_parents)
  1167. return -EINVAL;
  1168. info = handle_to_ti_sci_info(handle);
  1169. dev = info->dev;
  1170. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
  1171. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1172. sizeof(*req), sizeof(*resp));
  1173. if (IS_ERR(xfer)) {
  1174. ret = PTR_ERR(xfer);
  1175. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1176. return ret;
  1177. }
  1178. req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
  1179. req->dev_id = dev_id;
  1180. req->clk_id = clk_id;
  1181. ret = ti_sci_do_xfer(info, xfer);
  1182. if (ret) {
  1183. dev_err(dev, "Mbox send fail %d\n", ret);
  1184. goto fail;
  1185. }
  1186. resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
  1187. if (!ti_sci_is_response_ack(resp))
  1188. ret = -ENODEV;
  1189. else
  1190. *num_parents = resp->num_parents;
  1191. fail:
  1192. ti_sci_put_one_xfer(&info->minfo, xfer);
  1193. return ret;
  1194. }
  1195. /**
  1196. * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
  1197. * @handle: pointer to TI SCI handle
  1198. * @dev_id: Device identifier this request is for
  1199. * @clk_id: Clock identifier for the device for this request.
  1200. * Each device has it's own set of clock inputs. This indexes
  1201. * which clock input to modify.
  1202. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1203. * allowable programmed frequency and does not account for clock
  1204. * tolerances and jitter.
  1205. * @target_freq: The target clock frequency in Hz. A frequency will be
  1206. * processed as close to this target frequency as possible.
  1207. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1208. * allowable programmed frequency and does not account for clock
  1209. * tolerances and jitter.
  1210. * @match_freq: Frequency match in Hz response.
  1211. *
  1212. * Return: 0 if all went well, else returns appropriate error value.
  1213. */
  1214. static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
  1215. u32 dev_id, u8 clk_id, u64 min_freq,
  1216. u64 target_freq, u64 max_freq,
  1217. u64 *match_freq)
  1218. {
  1219. struct ti_sci_info *info;
  1220. struct ti_sci_msg_req_query_clock_freq *req;
  1221. struct ti_sci_msg_resp_query_clock_freq *resp;
  1222. struct ti_sci_xfer *xfer;
  1223. struct device *dev;
  1224. int ret = 0;
  1225. if (IS_ERR(handle))
  1226. return PTR_ERR(handle);
  1227. if (!handle || !match_freq)
  1228. return -EINVAL;
  1229. info = handle_to_ti_sci_info(handle);
  1230. dev = info->dev;
  1231. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
  1232. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1233. sizeof(*req), sizeof(*resp));
  1234. if (IS_ERR(xfer)) {
  1235. ret = PTR_ERR(xfer);
  1236. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1237. return ret;
  1238. }
  1239. req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
  1240. req->dev_id = dev_id;
  1241. req->clk_id = clk_id;
  1242. req->min_freq_hz = min_freq;
  1243. req->target_freq_hz = target_freq;
  1244. req->max_freq_hz = max_freq;
  1245. ret = ti_sci_do_xfer(info, xfer);
  1246. if (ret) {
  1247. dev_err(dev, "Mbox send fail %d\n", ret);
  1248. goto fail;
  1249. }
  1250. resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
  1251. if (!ti_sci_is_response_ack(resp))
  1252. ret = -ENODEV;
  1253. else
  1254. *match_freq = resp->freq_hz;
  1255. fail:
  1256. ti_sci_put_one_xfer(&info->minfo, xfer);
  1257. return ret;
  1258. }
  1259. /**
  1260. * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
  1261. * @handle: pointer to TI SCI handle
  1262. * @dev_id: Device identifier this request is for
  1263. * @clk_id: Clock identifier for the device for this request.
  1264. * Each device has it's own set of clock inputs. This indexes
  1265. * which clock input to modify.
  1266. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1267. * allowable programmed frequency and does not account for clock
  1268. * tolerances and jitter.
  1269. * @target_freq: The target clock frequency in Hz. A frequency will be
  1270. * processed as close to this target frequency as possible.
  1271. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1272. * allowable programmed frequency and does not account for clock
  1273. * tolerances and jitter.
  1274. *
  1275. * Return: 0 if all went well, else returns appropriate error value.
  1276. */
  1277. static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
  1278. u32 dev_id, u8 clk_id, u64 min_freq,
  1279. u64 target_freq, u64 max_freq)
  1280. {
  1281. struct ti_sci_info *info;
  1282. struct ti_sci_msg_req_set_clock_freq *req;
  1283. struct ti_sci_msg_hdr *resp;
  1284. struct ti_sci_xfer *xfer;
  1285. struct device *dev;
  1286. int ret = 0;
  1287. if (IS_ERR(handle))
  1288. return PTR_ERR(handle);
  1289. if (!handle)
  1290. return -EINVAL;
  1291. info = handle_to_ti_sci_info(handle);
  1292. dev = info->dev;
  1293. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
  1294. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1295. sizeof(*req), sizeof(*resp));
  1296. if (IS_ERR(xfer)) {
  1297. ret = PTR_ERR(xfer);
  1298. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1299. return ret;
  1300. }
  1301. req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
  1302. req->dev_id = dev_id;
  1303. req->clk_id = clk_id;
  1304. req->min_freq_hz = min_freq;
  1305. req->target_freq_hz = target_freq;
  1306. req->max_freq_hz = max_freq;
  1307. ret = ti_sci_do_xfer(info, xfer);
  1308. if (ret) {
  1309. dev_err(dev, "Mbox send fail %d\n", ret);
  1310. goto fail;
  1311. }
  1312. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1313. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1314. fail:
  1315. ti_sci_put_one_xfer(&info->minfo, xfer);
  1316. return ret;
  1317. }
  1318. /**
  1319. * ti_sci_cmd_clk_get_freq() - Get current frequency
  1320. * @handle: pointer to TI SCI handle
  1321. * @dev_id: Device identifier this request is for
  1322. * @clk_id: Clock identifier for the device for this request.
  1323. * Each device has it's own set of clock inputs. This indexes
  1324. * which clock input to modify.
  1325. * @freq: Currently frequency in Hz
  1326. *
  1327. * Return: 0 if all went well, else returns appropriate error value.
  1328. */
  1329. static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
  1330. u32 dev_id, u8 clk_id, u64 *freq)
  1331. {
  1332. struct ti_sci_info *info;
  1333. struct ti_sci_msg_req_get_clock_freq *req;
  1334. struct ti_sci_msg_resp_get_clock_freq *resp;
  1335. struct ti_sci_xfer *xfer;
  1336. struct device *dev;
  1337. int ret = 0;
  1338. if (IS_ERR(handle))
  1339. return PTR_ERR(handle);
  1340. if (!handle || !freq)
  1341. return -EINVAL;
  1342. info = handle_to_ti_sci_info(handle);
  1343. dev = info->dev;
  1344. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
  1345. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1346. sizeof(*req), sizeof(*resp));
  1347. if (IS_ERR(xfer)) {
  1348. ret = PTR_ERR(xfer);
  1349. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1350. return ret;
  1351. }
  1352. req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
  1353. req->dev_id = dev_id;
  1354. req->clk_id = clk_id;
  1355. ret = ti_sci_do_xfer(info, xfer);
  1356. if (ret) {
  1357. dev_err(dev, "Mbox send fail %d\n", ret);
  1358. goto fail;
  1359. }
  1360. resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
  1361. if (!ti_sci_is_response_ack(resp))
  1362. ret = -ENODEV;
  1363. else
  1364. *freq = resp->freq_hz;
  1365. fail:
  1366. ti_sci_put_one_xfer(&info->minfo, xfer);
  1367. return ret;
  1368. }
  1369. static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
  1370. {
  1371. struct ti_sci_info *info;
  1372. struct ti_sci_msg_req_reboot *req;
  1373. struct ti_sci_msg_hdr *resp;
  1374. struct ti_sci_xfer *xfer;
  1375. struct device *dev;
  1376. int ret = 0;
  1377. if (IS_ERR(handle))
  1378. return PTR_ERR(handle);
  1379. if (!handle)
  1380. return -EINVAL;
  1381. info = handle_to_ti_sci_info(handle);
  1382. dev = info->dev;
  1383. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
  1384. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1385. sizeof(*req), sizeof(*resp));
  1386. if (IS_ERR(xfer)) {
  1387. ret = PTR_ERR(xfer);
  1388. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1389. return ret;
  1390. }
  1391. req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
  1392. ret = ti_sci_do_xfer(info, xfer);
  1393. if (ret) {
  1394. dev_err(dev, "Mbox send fail %d\n", ret);
  1395. goto fail;
  1396. }
  1397. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1398. if (!ti_sci_is_response_ack(resp))
  1399. ret = -ENODEV;
  1400. else
  1401. ret = 0;
  1402. fail:
  1403. ti_sci_put_one_xfer(&info->minfo, xfer);
  1404. return ret;
  1405. }
  1406. /*
  1407. * ti_sci_setup_ops() - Setup the operations structures
  1408. * @info: pointer to TISCI pointer
  1409. */
  1410. static void ti_sci_setup_ops(struct ti_sci_info *info)
  1411. {
  1412. struct ti_sci_ops *ops = &info->handle.ops;
  1413. struct ti_sci_core_ops *core_ops = &ops->core_ops;
  1414. struct ti_sci_dev_ops *dops = &ops->dev_ops;
  1415. struct ti_sci_clk_ops *cops = &ops->clk_ops;
  1416. core_ops->reboot_device = ti_sci_cmd_core_reboot;
  1417. dops->get_device = ti_sci_cmd_get_device;
  1418. dops->idle_device = ti_sci_cmd_idle_device;
  1419. dops->put_device = ti_sci_cmd_put_device;
  1420. dops->is_valid = ti_sci_cmd_dev_is_valid;
  1421. dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
  1422. dops->is_idle = ti_sci_cmd_dev_is_idle;
  1423. dops->is_stop = ti_sci_cmd_dev_is_stop;
  1424. dops->is_on = ti_sci_cmd_dev_is_on;
  1425. dops->is_transitioning = ti_sci_cmd_dev_is_trans;
  1426. dops->set_device_resets = ti_sci_cmd_set_device_resets;
  1427. dops->get_device_resets = ti_sci_cmd_get_device_resets;
  1428. cops->get_clock = ti_sci_cmd_get_clock;
  1429. cops->idle_clock = ti_sci_cmd_idle_clock;
  1430. cops->put_clock = ti_sci_cmd_put_clock;
  1431. cops->is_auto = ti_sci_cmd_clk_is_auto;
  1432. cops->is_on = ti_sci_cmd_clk_is_on;
  1433. cops->is_off = ti_sci_cmd_clk_is_off;
  1434. cops->set_parent = ti_sci_cmd_clk_set_parent;
  1435. cops->get_parent = ti_sci_cmd_clk_get_parent;
  1436. cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
  1437. cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
  1438. cops->set_freq = ti_sci_cmd_clk_set_freq;
  1439. cops->get_freq = ti_sci_cmd_clk_get_freq;
  1440. }
  1441. /**
  1442. * ti_sci_get_handle() - Get the TI SCI handle for a device
  1443. * @dev: Pointer to device for which we want SCI handle
  1444. *
  1445. * NOTE: The function does not track individual clients of the framework
  1446. * and is expected to be maintained by caller of TI SCI protocol library.
  1447. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  1448. * Return: pointer to handle if successful, else:
  1449. * -EPROBE_DEFER if the instance is not ready
  1450. * -ENODEV if the required node handler is missing
  1451. * -EINVAL if invalid conditions are encountered.
  1452. */
  1453. const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
  1454. {
  1455. struct device_node *ti_sci_np;
  1456. struct list_head *p;
  1457. struct ti_sci_handle *handle = NULL;
  1458. struct ti_sci_info *info;
  1459. if (!dev) {
  1460. pr_err("I need a device pointer\n");
  1461. return ERR_PTR(-EINVAL);
  1462. }
  1463. ti_sci_np = of_get_parent(dev->of_node);
  1464. if (!ti_sci_np) {
  1465. dev_err(dev, "No OF information\n");
  1466. return ERR_PTR(-EINVAL);
  1467. }
  1468. mutex_lock(&ti_sci_list_mutex);
  1469. list_for_each(p, &ti_sci_list) {
  1470. info = list_entry(p, struct ti_sci_info, node);
  1471. if (ti_sci_np == info->dev->of_node) {
  1472. handle = &info->handle;
  1473. info->users++;
  1474. break;
  1475. }
  1476. }
  1477. mutex_unlock(&ti_sci_list_mutex);
  1478. of_node_put(ti_sci_np);
  1479. if (!handle)
  1480. return ERR_PTR(-EPROBE_DEFER);
  1481. return handle;
  1482. }
  1483. EXPORT_SYMBOL_GPL(ti_sci_get_handle);
  1484. /**
  1485. * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
  1486. * @handle: Handle acquired by ti_sci_get_handle
  1487. *
  1488. * NOTE: The function does not track individual clients of the framework
  1489. * and is expected to be maintained by caller of TI SCI protocol library.
  1490. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  1491. *
  1492. * Return: 0 is successfully released
  1493. * if an error pointer was passed, it returns the error value back,
  1494. * if null was passed, it returns -EINVAL;
  1495. */
  1496. int ti_sci_put_handle(const struct ti_sci_handle *handle)
  1497. {
  1498. struct ti_sci_info *info;
  1499. if (IS_ERR(handle))
  1500. return PTR_ERR(handle);
  1501. if (!handle)
  1502. return -EINVAL;
  1503. info = handle_to_ti_sci_info(handle);
  1504. mutex_lock(&ti_sci_list_mutex);
  1505. if (!WARN_ON(!info->users))
  1506. info->users--;
  1507. mutex_unlock(&ti_sci_list_mutex);
  1508. return 0;
  1509. }
  1510. EXPORT_SYMBOL_GPL(ti_sci_put_handle);
  1511. static void devm_ti_sci_release(struct device *dev, void *res)
  1512. {
  1513. const struct ti_sci_handle **ptr = res;
  1514. const struct ti_sci_handle *handle = *ptr;
  1515. int ret;
  1516. ret = ti_sci_put_handle(handle);
  1517. if (ret)
  1518. dev_err(dev, "failed to put handle %d\n", ret);
  1519. }
  1520. /**
  1521. * devm_ti_sci_get_handle() - Managed get handle
  1522. * @dev: device for which we want SCI handle for.
  1523. *
  1524. * NOTE: This releases the handle once the device resources are
  1525. * no longer needed. MUST NOT BE released with ti_sci_put_handle.
  1526. * The function does not track individual clients of the framework
  1527. * and is expected to be maintained by caller of TI SCI protocol library.
  1528. *
  1529. * Return: 0 if all went fine, else corresponding error.
  1530. */
  1531. const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
  1532. {
  1533. const struct ti_sci_handle **ptr;
  1534. const struct ti_sci_handle *handle;
  1535. ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
  1536. if (!ptr)
  1537. return ERR_PTR(-ENOMEM);
  1538. handle = ti_sci_get_handle(dev);
  1539. if (!IS_ERR(handle)) {
  1540. *ptr = handle;
  1541. devres_add(dev, ptr);
  1542. } else {
  1543. devres_free(ptr);
  1544. }
  1545. return handle;
  1546. }
  1547. EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
  1548. static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
  1549. void *cmd)
  1550. {
  1551. struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
  1552. const struct ti_sci_handle *handle = &info->handle;
  1553. ti_sci_cmd_core_reboot(handle);
  1554. /* call fail OR pass, we should not be here in the first place */
  1555. return NOTIFY_BAD;
  1556. }
  1557. /* Description for K2G */
  1558. static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
  1559. .host_id = 2,
  1560. /* Conservative duration */
  1561. .max_rx_timeout_ms = 1000,
  1562. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  1563. .max_msgs = 20,
  1564. .max_msg_size = 64,
  1565. };
  1566. static const struct of_device_id ti_sci_of_match[] = {
  1567. {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
  1568. { /* Sentinel */ },
  1569. };
  1570. MODULE_DEVICE_TABLE(of, ti_sci_of_match);
  1571. static int ti_sci_probe(struct platform_device *pdev)
  1572. {
  1573. struct device *dev = &pdev->dev;
  1574. const struct of_device_id *of_id;
  1575. const struct ti_sci_desc *desc;
  1576. struct ti_sci_xfer *xfer;
  1577. struct ti_sci_info *info = NULL;
  1578. struct ti_sci_xfers_info *minfo;
  1579. struct mbox_client *cl;
  1580. int ret = -EINVAL;
  1581. int i;
  1582. int reboot = 0;
  1583. of_id = of_match_device(ti_sci_of_match, dev);
  1584. if (!of_id) {
  1585. dev_err(dev, "OF data missing\n");
  1586. return -EINVAL;
  1587. }
  1588. desc = of_id->data;
  1589. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  1590. if (!info)
  1591. return -ENOMEM;
  1592. info->dev = dev;
  1593. info->desc = desc;
  1594. reboot = of_property_read_bool(dev->of_node,
  1595. "ti,system-reboot-controller");
  1596. INIT_LIST_HEAD(&info->node);
  1597. minfo = &info->minfo;
  1598. /*
  1599. * Pre-allocate messages
  1600. * NEVER allocate more than what we can indicate in hdr.seq
  1601. * if we have data description bug, force a fix..
  1602. */
  1603. if (WARN_ON(desc->max_msgs >=
  1604. 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
  1605. return -EINVAL;
  1606. minfo->xfer_block = devm_kcalloc(dev,
  1607. desc->max_msgs,
  1608. sizeof(*minfo->xfer_block),
  1609. GFP_KERNEL);
  1610. if (!minfo->xfer_block)
  1611. return -ENOMEM;
  1612. minfo->xfer_alloc_table = devm_kcalloc(dev,
  1613. BITS_TO_LONGS(desc->max_msgs),
  1614. sizeof(unsigned long),
  1615. GFP_KERNEL);
  1616. if (!minfo->xfer_alloc_table)
  1617. return -ENOMEM;
  1618. bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
  1619. /* Pre-initialize the buffer pointer to pre-allocated buffers */
  1620. for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
  1621. xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
  1622. GFP_KERNEL);
  1623. if (!xfer->xfer_buf)
  1624. return -ENOMEM;
  1625. xfer->tx_message.buf = xfer->xfer_buf;
  1626. init_completion(&xfer->done);
  1627. }
  1628. ret = ti_sci_debugfs_create(pdev, info);
  1629. if (ret)
  1630. dev_warn(dev, "Failed to create debug file\n");
  1631. platform_set_drvdata(pdev, info);
  1632. cl = &info->cl;
  1633. cl->dev = dev;
  1634. cl->tx_block = false;
  1635. cl->rx_callback = ti_sci_rx_callback;
  1636. cl->knows_txdone = true;
  1637. spin_lock_init(&minfo->xfer_lock);
  1638. sema_init(&minfo->sem_xfer_count, desc->max_msgs);
  1639. info->chan_rx = mbox_request_channel_byname(cl, "rx");
  1640. if (IS_ERR(info->chan_rx)) {
  1641. ret = PTR_ERR(info->chan_rx);
  1642. goto out;
  1643. }
  1644. info->chan_tx = mbox_request_channel_byname(cl, "tx");
  1645. if (IS_ERR(info->chan_tx)) {
  1646. ret = PTR_ERR(info->chan_tx);
  1647. goto out;
  1648. }
  1649. ret = ti_sci_cmd_get_revision(info);
  1650. if (ret) {
  1651. dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
  1652. goto out;
  1653. }
  1654. ti_sci_setup_ops(info);
  1655. if (reboot) {
  1656. info->nb.notifier_call = tisci_reboot_handler;
  1657. info->nb.priority = 128;
  1658. ret = register_restart_handler(&info->nb);
  1659. if (ret) {
  1660. dev_err(dev, "reboot registration fail(%d)\n", ret);
  1661. return ret;
  1662. }
  1663. }
  1664. dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
  1665. info->handle.version.abi_major, info->handle.version.abi_minor,
  1666. info->handle.version.firmware_revision,
  1667. info->handle.version.firmware_description);
  1668. mutex_lock(&ti_sci_list_mutex);
  1669. list_add_tail(&info->node, &ti_sci_list);
  1670. mutex_unlock(&ti_sci_list_mutex);
  1671. return of_platform_populate(dev->of_node, NULL, NULL, dev);
  1672. out:
  1673. if (!IS_ERR(info->chan_tx))
  1674. mbox_free_channel(info->chan_tx);
  1675. if (!IS_ERR(info->chan_rx))
  1676. mbox_free_channel(info->chan_rx);
  1677. debugfs_remove(info->d);
  1678. return ret;
  1679. }
  1680. static int ti_sci_remove(struct platform_device *pdev)
  1681. {
  1682. struct ti_sci_info *info;
  1683. struct device *dev = &pdev->dev;
  1684. int ret = 0;
  1685. of_platform_depopulate(dev);
  1686. info = platform_get_drvdata(pdev);
  1687. if (info->nb.notifier_call)
  1688. unregister_restart_handler(&info->nb);
  1689. mutex_lock(&ti_sci_list_mutex);
  1690. if (info->users)
  1691. ret = -EBUSY;
  1692. else
  1693. list_del(&info->node);
  1694. mutex_unlock(&ti_sci_list_mutex);
  1695. if (!ret) {
  1696. ti_sci_debugfs_destroy(pdev, info);
  1697. /* Safe to free channels since no more users */
  1698. mbox_free_channel(info->chan_tx);
  1699. mbox_free_channel(info->chan_rx);
  1700. }
  1701. return ret;
  1702. }
  1703. static struct platform_driver ti_sci_driver = {
  1704. .probe = ti_sci_probe,
  1705. .remove = ti_sci_remove,
  1706. .driver = {
  1707. .name = "ti-sci",
  1708. .of_match_table = of_match_ptr(ti_sci_of_match),
  1709. },
  1710. };
  1711. module_platform_driver(ti_sci_driver);
  1712. MODULE_LICENSE("GPL v2");
  1713. MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
  1714. MODULE_AUTHOR("Nishanth Menon");
  1715. MODULE_ALIAS("platform:ti-sci");