scif_api.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * Intel SCIF driver.
  16. *
  17. */
  18. #include <linux/scif.h>
  19. #include "scif_main.h"
  20. #include "scif_map.h"
  21. static const char * const scif_ep_states[] = {
  22. "Unbound",
  23. "Bound",
  24. "Listening",
  25. "Connected",
  26. "Connecting",
  27. "Mapping",
  28. "Closing",
  29. "Close Listening",
  30. "Disconnected",
  31. "Zombie"};
  32. enum conn_async_state {
  33. ASYNC_CONN_IDLE = 1, /* ep setup for async connect */
  34. ASYNC_CONN_INPROGRESS, /* async connect in progress */
  35. ASYNC_CONN_FLUSH_WORK /* async work flush in progress */
  36. };
  37. scif_epd_t scif_open(void)
  38. {
  39. struct scif_endpt *ep;
  40. might_sleep();
  41. ep = kzalloc(sizeof(*ep), GFP_KERNEL);
  42. if (!ep)
  43. goto err_ep_alloc;
  44. ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL);
  45. if (!ep->qp_info.qp)
  46. goto err_qp_alloc;
  47. spin_lock_init(&ep->lock);
  48. mutex_init(&ep->sendlock);
  49. mutex_init(&ep->recvlock);
  50. ep->state = SCIFEP_UNBOUND;
  51. dev_dbg(scif_info.mdev.this_device,
  52. "SCIFAPI open: ep %p success\n", ep);
  53. return ep;
  54. err_qp_alloc:
  55. kfree(ep);
  56. err_ep_alloc:
  57. return NULL;
  58. }
  59. EXPORT_SYMBOL_GPL(scif_open);
  60. /*
  61. * scif_disconnect_ep - Disconnects the endpoint if found
  62. * @epd: The end point returned from scif_open()
  63. */
  64. static struct scif_endpt *scif_disconnect_ep(struct scif_endpt *ep)
  65. {
  66. struct scifmsg msg;
  67. struct scif_endpt *fep = NULL;
  68. struct scif_endpt *tmpep;
  69. struct list_head *pos, *tmpq;
  70. int err;
  71. /*
  72. * Wake up any threads blocked in send()/recv() before closing
  73. * out the connection. Grabbing and releasing the send/recv lock
  74. * will ensure that any blocked senders/receivers have exited for
  75. * Ring 0 endpoints. It is a Ring 0 bug to call send/recv after
  76. * close. Ring 3 endpoints are not affected since close will not
  77. * be called while there are IOCTLs executing.
  78. */
  79. wake_up_interruptible(&ep->sendwq);
  80. wake_up_interruptible(&ep->recvwq);
  81. mutex_lock(&ep->sendlock);
  82. mutex_unlock(&ep->sendlock);
  83. mutex_lock(&ep->recvlock);
  84. mutex_unlock(&ep->recvlock);
  85. /* Remove from the connected list */
  86. mutex_lock(&scif_info.connlock);
  87. list_for_each_safe(pos, tmpq, &scif_info.connected) {
  88. tmpep = list_entry(pos, struct scif_endpt, list);
  89. if (tmpep == ep) {
  90. list_del(pos);
  91. fep = tmpep;
  92. spin_lock(&ep->lock);
  93. break;
  94. }
  95. }
  96. if (!fep) {
  97. /*
  98. * The other side has completed the disconnect before
  99. * the end point can be removed from the list. Therefore
  100. * the ep lock is not locked, traverse the disconnected
  101. * list to find the endpoint and release the conn lock.
  102. */
  103. list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
  104. tmpep = list_entry(pos, struct scif_endpt, list);
  105. if (tmpep == ep) {
  106. list_del(pos);
  107. break;
  108. }
  109. }
  110. mutex_unlock(&scif_info.connlock);
  111. return NULL;
  112. }
  113. init_completion(&ep->discon);
  114. msg.uop = SCIF_DISCNCT;
  115. msg.src = ep->port;
  116. msg.dst = ep->peer;
  117. msg.payload[0] = (u64)ep;
  118. msg.payload[1] = ep->remote_ep;
  119. err = scif_nodeqp_send(ep->remote_dev, &msg);
  120. spin_unlock(&ep->lock);
  121. mutex_unlock(&scif_info.connlock);
  122. if (!err)
  123. /* Wait for the remote node to respond with SCIF_DISCNT_ACK */
  124. wait_for_completion_timeout(&ep->discon,
  125. SCIF_NODE_ALIVE_TIMEOUT);
  126. return ep;
  127. }
  128. int scif_close(scif_epd_t epd)
  129. {
  130. struct scif_endpt *ep = (struct scif_endpt *)epd;
  131. struct scif_endpt *tmpep;
  132. struct list_head *pos, *tmpq;
  133. enum scif_epd_state oldstate;
  134. bool flush_conn;
  135. dev_dbg(scif_info.mdev.this_device, "SCIFAPI close: ep %p %s\n",
  136. ep, scif_ep_states[ep->state]);
  137. might_sleep();
  138. spin_lock(&ep->lock);
  139. flush_conn = (ep->conn_async_state == ASYNC_CONN_INPROGRESS);
  140. spin_unlock(&ep->lock);
  141. if (flush_conn)
  142. flush_work(&scif_info.conn_work);
  143. spin_lock(&ep->lock);
  144. oldstate = ep->state;
  145. ep->state = SCIFEP_CLOSING;
  146. switch (oldstate) {
  147. case SCIFEP_ZOMBIE:
  148. case SCIFEP_DISCONNECTED:
  149. spin_unlock(&ep->lock);
  150. /* Remove from the disconnected list */
  151. mutex_lock(&scif_info.connlock);
  152. list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
  153. tmpep = list_entry(pos, struct scif_endpt, list);
  154. if (tmpep == ep) {
  155. list_del(pos);
  156. break;
  157. }
  158. }
  159. mutex_unlock(&scif_info.connlock);
  160. break;
  161. case SCIFEP_UNBOUND:
  162. case SCIFEP_BOUND:
  163. case SCIFEP_CONNECTING:
  164. spin_unlock(&ep->lock);
  165. break;
  166. case SCIFEP_MAPPING:
  167. case SCIFEP_CONNECTED:
  168. case SCIFEP_CLOSING:
  169. {
  170. spin_unlock(&ep->lock);
  171. scif_disconnect_ep(ep);
  172. break;
  173. }
  174. case SCIFEP_LISTENING:
  175. case SCIFEP_CLLISTEN:
  176. {
  177. struct scif_conreq *conreq;
  178. struct scifmsg msg;
  179. struct scif_endpt *aep;
  180. spin_unlock(&ep->lock);
  181. spin_lock(&scif_info.eplock);
  182. /* remove from listen list */
  183. list_for_each_safe(pos, tmpq, &scif_info.listen) {
  184. tmpep = list_entry(pos, struct scif_endpt, list);
  185. if (tmpep == ep)
  186. list_del(pos);
  187. }
  188. /* Remove any dangling accepts */
  189. while (ep->acceptcnt) {
  190. aep = list_first_entry(&ep->li_accept,
  191. struct scif_endpt, liacceptlist);
  192. list_del(&aep->liacceptlist);
  193. scif_put_port(aep->port.port);
  194. list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
  195. tmpep = list_entry(pos, struct scif_endpt,
  196. miacceptlist);
  197. if (tmpep == aep) {
  198. list_del(pos);
  199. break;
  200. }
  201. }
  202. spin_unlock(&scif_info.eplock);
  203. mutex_lock(&scif_info.connlock);
  204. list_for_each_safe(pos, tmpq, &scif_info.connected) {
  205. tmpep = list_entry(pos,
  206. struct scif_endpt, list);
  207. if (tmpep == aep) {
  208. list_del(pos);
  209. break;
  210. }
  211. }
  212. list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
  213. tmpep = list_entry(pos,
  214. struct scif_endpt, list);
  215. if (tmpep == aep) {
  216. list_del(pos);
  217. break;
  218. }
  219. }
  220. mutex_unlock(&scif_info.connlock);
  221. scif_teardown_ep(aep);
  222. spin_lock(&scif_info.eplock);
  223. scif_add_epd_to_zombie_list(aep, SCIF_EPLOCK_HELD);
  224. ep->acceptcnt--;
  225. }
  226. spin_lock(&ep->lock);
  227. spin_unlock(&scif_info.eplock);
  228. /* Remove and reject any pending connection requests. */
  229. while (ep->conreqcnt) {
  230. conreq = list_first_entry(&ep->conlist,
  231. struct scif_conreq, list);
  232. list_del(&conreq->list);
  233. msg.uop = SCIF_CNCT_REJ;
  234. msg.dst.node = conreq->msg.src.node;
  235. msg.dst.port = conreq->msg.src.port;
  236. msg.payload[0] = conreq->msg.payload[0];
  237. msg.payload[1] = conreq->msg.payload[1];
  238. /*
  239. * No Error Handling on purpose for scif_nodeqp_send().
  240. * If the remote node is lost we still want free the
  241. * connection requests on the self node.
  242. */
  243. scif_nodeqp_send(&scif_dev[conreq->msg.src.node],
  244. &msg);
  245. ep->conreqcnt--;
  246. kfree(conreq);
  247. }
  248. spin_unlock(&ep->lock);
  249. /* If a kSCIF accept is waiting wake it up */
  250. wake_up_interruptible(&ep->conwq);
  251. break;
  252. }
  253. }
  254. scif_put_port(ep->port.port);
  255. scif_teardown_ep(ep);
  256. scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD);
  257. return 0;
  258. }
  259. EXPORT_SYMBOL_GPL(scif_close);
  260. /**
  261. * scif_flush() - Wakes up any blocking accepts. The endpoint will no longer
  262. * accept new connections.
  263. * @epd: The end point returned from scif_open()
  264. */
  265. int __scif_flush(scif_epd_t epd)
  266. {
  267. struct scif_endpt *ep = (struct scif_endpt *)epd;
  268. switch (ep->state) {
  269. case SCIFEP_LISTENING:
  270. {
  271. ep->state = SCIFEP_CLLISTEN;
  272. /* If an accept is waiting wake it up */
  273. wake_up_interruptible(&ep->conwq);
  274. break;
  275. }
  276. default:
  277. break;
  278. }
  279. return 0;
  280. }
  281. int scif_bind(scif_epd_t epd, u16 pn)
  282. {
  283. struct scif_endpt *ep = (struct scif_endpt *)epd;
  284. int ret = 0;
  285. int tmp;
  286. dev_dbg(scif_info.mdev.this_device,
  287. "SCIFAPI bind: ep %p %s requested port number %d\n",
  288. ep, scif_ep_states[ep->state], pn);
  289. if (pn) {
  290. /*
  291. * Similar to IETF RFC 1700, SCIF ports below
  292. * SCIF_ADMIN_PORT_END can only be bound by system (or root)
  293. * processes or by processes executed by privileged users.
  294. */
  295. if (pn < SCIF_ADMIN_PORT_END && !capable(CAP_SYS_ADMIN)) {
  296. ret = -EACCES;
  297. goto scif_bind_admin_exit;
  298. }
  299. }
  300. spin_lock(&ep->lock);
  301. if (ep->state == SCIFEP_BOUND) {
  302. ret = -EINVAL;
  303. goto scif_bind_exit;
  304. } else if (ep->state != SCIFEP_UNBOUND) {
  305. ret = -EISCONN;
  306. goto scif_bind_exit;
  307. }
  308. if (pn) {
  309. tmp = scif_rsrv_port(pn);
  310. if (tmp != pn) {
  311. ret = -EINVAL;
  312. goto scif_bind_exit;
  313. }
  314. } else {
  315. pn = scif_get_new_port();
  316. if (!pn) {
  317. ret = -ENOSPC;
  318. goto scif_bind_exit;
  319. }
  320. }
  321. ep->state = SCIFEP_BOUND;
  322. ep->port.node = scif_info.nodeid;
  323. ep->port.port = pn;
  324. ep->conn_async_state = ASYNC_CONN_IDLE;
  325. ret = pn;
  326. dev_dbg(scif_info.mdev.this_device,
  327. "SCIFAPI bind: bound to port number %d\n", pn);
  328. scif_bind_exit:
  329. spin_unlock(&ep->lock);
  330. scif_bind_admin_exit:
  331. return ret;
  332. }
  333. EXPORT_SYMBOL_GPL(scif_bind);
  334. int scif_listen(scif_epd_t epd, int backlog)
  335. {
  336. struct scif_endpt *ep = (struct scif_endpt *)epd;
  337. dev_dbg(scif_info.mdev.this_device,
  338. "SCIFAPI listen: ep %p %s\n", ep, scif_ep_states[ep->state]);
  339. spin_lock(&ep->lock);
  340. switch (ep->state) {
  341. case SCIFEP_ZOMBIE:
  342. case SCIFEP_CLOSING:
  343. case SCIFEP_CLLISTEN:
  344. case SCIFEP_UNBOUND:
  345. case SCIFEP_DISCONNECTED:
  346. spin_unlock(&ep->lock);
  347. return -EINVAL;
  348. case SCIFEP_LISTENING:
  349. case SCIFEP_CONNECTED:
  350. case SCIFEP_CONNECTING:
  351. case SCIFEP_MAPPING:
  352. spin_unlock(&ep->lock);
  353. return -EISCONN;
  354. case SCIFEP_BOUND:
  355. break;
  356. }
  357. ep->state = SCIFEP_LISTENING;
  358. ep->backlog = backlog;
  359. ep->conreqcnt = 0;
  360. ep->acceptcnt = 0;
  361. INIT_LIST_HEAD(&ep->conlist);
  362. init_waitqueue_head(&ep->conwq);
  363. INIT_LIST_HEAD(&ep->li_accept);
  364. spin_unlock(&ep->lock);
  365. /*
  366. * Listen status is complete so delete the qp information not needed
  367. * on a listen before placing on the list of listening ep's
  368. */
  369. scif_teardown_ep(ep);
  370. ep->qp_info.qp = NULL;
  371. spin_lock(&scif_info.eplock);
  372. list_add_tail(&ep->list, &scif_info.listen);
  373. spin_unlock(&scif_info.eplock);
  374. return 0;
  375. }
  376. EXPORT_SYMBOL_GPL(scif_listen);
  377. /*
  378. ************************************************************************
  379. * SCIF connection flow:
  380. *
  381. * 1) A SCIF listening endpoint can call scif_accept(..) to wait for SCIF
  382. * connections via a SCIF_CNCT_REQ message
  383. * 2) A SCIF endpoint can initiate a SCIF connection by calling
  384. * scif_connect(..) which calls scif_setup_qp_connect(..) which
  385. * allocates the local qp for the endpoint ring buffer and then sends
  386. * a SCIF_CNCT_REQ to the remote node and waits for a SCIF_CNCT_GNT or
  387. * a SCIF_CNCT_REJ message
  388. * 3) The peer node handles a SCIF_CNCT_REQ via scif_cnctreq_resp(..) which
  389. * wakes up any threads blocked in step 1 or sends a SCIF_CNCT_REJ
  390. * message otherwise
  391. * 4) A thread blocked waiting for incoming connections allocates its local
  392. * endpoint QP and ring buffer following which it sends a SCIF_CNCT_GNT
  393. * and waits for a SCIF_CNCT_GNT(N)ACK. If the allocation fails then
  394. * the node sends a SCIF_CNCT_REJ message
  395. * 5) Upon receipt of a SCIF_CNCT_GNT or a SCIF_CNCT_REJ message the
  396. * connecting endpoint is woken up as part of handling
  397. * scif_cnctgnt_resp(..) following which it maps the remote endpoints'
  398. * QP, updates its outbound QP and sends a SCIF_CNCT_GNTACK message on
  399. * success or a SCIF_CNCT_GNTNACK message on failure and completes
  400. * the scif_connect(..) API
  401. * 6) Upon receipt of a SCIF_CNCT_GNT(N)ACK the accepting endpoint blocked
  402. * in step 4 is woken up and completes the scif_accept(..) API
  403. * 7) The SCIF connection is now established between the two SCIF endpoints.
  404. */
  405. static int scif_conn_func(struct scif_endpt *ep)
  406. {
  407. int err = 0;
  408. struct scifmsg msg;
  409. struct device *spdev;
  410. /* Initiate the first part of the endpoint QP setup */
  411. err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset,
  412. SCIF_ENDPT_QP_SIZE, ep->remote_dev);
  413. if (err) {
  414. dev_err(&ep->remote_dev->sdev->dev,
  415. "%s err %d qp_offset 0x%llx\n",
  416. __func__, err, ep->qp_info.qp_offset);
  417. ep->state = SCIFEP_BOUND;
  418. goto connect_error_simple;
  419. }
  420. spdev = scif_get_peer_dev(ep->remote_dev);
  421. if (IS_ERR(spdev)) {
  422. err = PTR_ERR(spdev);
  423. goto cleanup_qp;
  424. }
  425. /* Format connect message and send it */
  426. msg.src = ep->port;
  427. msg.dst = ep->conn_port;
  428. msg.uop = SCIF_CNCT_REQ;
  429. msg.payload[0] = (u64)ep;
  430. msg.payload[1] = ep->qp_info.qp_offset;
  431. err = _scif_nodeqp_send(ep->remote_dev, &msg);
  432. if (err)
  433. goto connect_error_dec;
  434. scif_put_peer_dev(spdev);
  435. /*
  436. * Wait for the remote node to respond with SCIF_CNCT_GNT or
  437. * SCIF_CNCT_REJ message.
  438. */
  439. err = wait_event_timeout(ep->conwq, ep->state != SCIFEP_CONNECTING,
  440. SCIF_NODE_ALIVE_TIMEOUT);
  441. if (!err) {
  442. dev_err(&ep->remote_dev->sdev->dev,
  443. "%s %d timeout\n", __func__, __LINE__);
  444. ep->state = SCIFEP_BOUND;
  445. }
  446. spdev = scif_get_peer_dev(ep->remote_dev);
  447. if (IS_ERR(spdev)) {
  448. err = PTR_ERR(spdev);
  449. goto cleanup_qp;
  450. }
  451. if (ep->state == SCIFEP_MAPPING) {
  452. err = scif_setup_qp_connect_response(ep->remote_dev,
  453. ep->qp_info.qp,
  454. ep->qp_info.gnt_pld);
  455. /*
  456. * If the resource to map the queue are not available then
  457. * we need to tell the other side to terminate the accept
  458. */
  459. if (err) {
  460. dev_err(&ep->remote_dev->sdev->dev,
  461. "%s %d err %d\n", __func__, __LINE__, err);
  462. msg.uop = SCIF_CNCT_GNTNACK;
  463. msg.payload[0] = ep->remote_ep;
  464. _scif_nodeqp_send(ep->remote_dev, &msg);
  465. ep->state = SCIFEP_BOUND;
  466. goto connect_error_dec;
  467. }
  468. msg.uop = SCIF_CNCT_GNTACK;
  469. msg.payload[0] = ep->remote_ep;
  470. err = _scif_nodeqp_send(ep->remote_dev, &msg);
  471. if (err) {
  472. ep->state = SCIFEP_BOUND;
  473. goto connect_error_dec;
  474. }
  475. ep->state = SCIFEP_CONNECTED;
  476. mutex_lock(&scif_info.connlock);
  477. list_add_tail(&ep->list, &scif_info.connected);
  478. mutex_unlock(&scif_info.connlock);
  479. dev_dbg(&ep->remote_dev->sdev->dev,
  480. "SCIFAPI connect: ep %p connected\n", ep);
  481. } else if (ep->state == SCIFEP_BOUND) {
  482. dev_dbg(&ep->remote_dev->sdev->dev,
  483. "SCIFAPI connect: ep %p connection refused\n", ep);
  484. err = -ECONNREFUSED;
  485. goto connect_error_dec;
  486. }
  487. scif_put_peer_dev(spdev);
  488. return err;
  489. connect_error_dec:
  490. scif_put_peer_dev(spdev);
  491. cleanup_qp:
  492. scif_cleanup_ep_qp(ep);
  493. connect_error_simple:
  494. return err;
  495. }
  496. /*
  497. * scif_conn_handler:
  498. *
  499. * Workqueue handler for servicing non-blocking SCIF connect
  500. *
  501. */
  502. void scif_conn_handler(struct work_struct *work)
  503. {
  504. struct scif_endpt *ep;
  505. do {
  506. ep = NULL;
  507. spin_lock(&scif_info.nb_connect_lock);
  508. if (!list_empty(&scif_info.nb_connect_list)) {
  509. ep = list_first_entry(&scif_info.nb_connect_list,
  510. struct scif_endpt, conn_list);
  511. list_del(&ep->conn_list);
  512. }
  513. spin_unlock(&scif_info.nb_connect_lock);
  514. if (ep)
  515. ep->conn_err = scif_conn_func(ep);
  516. } while (ep);
  517. }
  518. int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
  519. {
  520. struct scif_endpt *ep = (struct scif_endpt *)epd;
  521. int err = 0;
  522. struct scif_dev *remote_dev;
  523. struct device *spdev;
  524. dev_dbg(scif_info.mdev.this_device, "SCIFAPI connect: ep %p %s\n", ep,
  525. scif_ep_states[ep->state]);
  526. if (!scif_dev || dst->node > scif_info.maxid)
  527. return -ENODEV;
  528. might_sleep();
  529. remote_dev = &scif_dev[dst->node];
  530. spdev = scif_get_peer_dev(remote_dev);
  531. if (IS_ERR(spdev)) {
  532. err = PTR_ERR(spdev);
  533. return err;
  534. }
  535. spin_lock(&ep->lock);
  536. switch (ep->state) {
  537. case SCIFEP_ZOMBIE:
  538. case SCIFEP_CLOSING:
  539. err = -EINVAL;
  540. break;
  541. case SCIFEP_DISCONNECTED:
  542. if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
  543. ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
  544. else
  545. err = -EINVAL;
  546. break;
  547. case SCIFEP_LISTENING:
  548. case SCIFEP_CLLISTEN:
  549. err = -EOPNOTSUPP;
  550. break;
  551. case SCIFEP_CONNECTING:
  552. case SCIFEP_MAPPING:
  553. if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
  554. err = -EINPROGRESS;
  555. else
  556. err = -EISCONN;
  557. break;
  558. case SCIFEP_CONNECTED:
  559. if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
  560. ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
  561. else
  562. err = -EISCONN;
  563. break;
  564. case SCIFEP_UNBOUND:
  565. ep->port.port = scif_get_new_port();
  566. if (!ep->port.port) {
  567. err = -ENOSPC;
  568. } else {
  569. ep->port.node = scif_info.nodeid;
  570. ep->conn_async_state = ASYNC_CONN_IDLE;
  571. }
  572. /* Fall through */
  573. case SCIFEP_BOUND:
  574. /*
  575. * If a non-blocking connect has been already initiated
  576. * (conn_async_state is either ASYNC_CONN_INPROGRESS or
  577. * ASYNC_CONN_FLUSH_WORK), the end point could end up in
  578. * SCIF_BOUND due an error in the connection process
  579. * (e.g., connection refused) If conn_async_state is
  580. * ASYNC_CONN_INPROGRESS - transition to ASYNC_CONN_FLUSH_WORK
  581. * so that the error status can be collected. If the state is
  582. * already ASYNC_CONN_FLUSH_WORK - then set the error to
  583. * EINPROGRESS since some other thread is waiting to collect
  584. * error status.
  585. */
  586. if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
  587. ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
  588. } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
  589. err = -EINPROGRESS;
  590. } else {
  591. ep->conn_port = *dst;
  592. init_waitqueue_head(&ep->sendwq);
  593. init_waitqueue_head(&ep->recvwq);
  594. init_waitqueue_head(&ep->conwq);
  595. ep->conn_async_state = 0;
  596. if (unlikely(non_block))
  597. ep->conn_async_state = ASYNC_CONN_INPROGRESS;
  598. }
  599. break;
  600. }
  601. if (err || ep->conn_async_state == ASYNC_CONN_FLUSH_WORK)
  602. goto connect_simple_unlock1;
  603. ep->state = SCIFEP_CONNECTING;
  604. ep->remote_dev = &scif_dev[dst->node];
  605. ep->qp_info.qp->magic = SCIFEP_MAGIC;
  606. if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
  607. spin_lock(&scif_info.nb_connect_lock);
  608. list_add_tail(&ep->conn_list, &scif_info.nb_connect_list);
  609. spin_unlock(&scif_info.nb_connect_lock);
  610. err = -EINPROGRESS;
  611. schedule_work(&scif_info.conn_work);
  612. }
  613. connect_simple_unlock1:
  614. spin_unlock(&ep->lock);
  615. scif_put_peer_dev(spdev);
  616. if (err) {
  617. return err;
  618. } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
  619. flush_work(&scif_info.conn_work);
  620. err = ep->conn_err;
  621. spin_lock(&ep->lock);
  622. ep->conn_async_state = ASYNC_CONN_IDLE;
  623. spin_unlock(&ep->lock);
  624. } else {
  625. err = scif_conn_func(ep);
  626. }
  627. return err;
  628. }
  629. int scif_connect(scif_epd_t epd, struct scif_port_id *dst)
  630. {
  631. return __scif_connect(epd, dst, false);
  632. }
  633. EXPORT_SYMBOL_GPL(scif_connect);
  634. /**
  635. * scif_accept() - Accept a connection request from the remote node
  636. *
  637. * The function accepts a connection request from the remote node. Successful
  638. * complete is indicate by a new end point being created and passed back
  639. * to the caller for future reference.
  640. *
  641. * Upon successful complete a zero will be returned and the peer information
  642. * will be filled in.
  643. *
  644. * If the end point is not in the listening state -EINVAL will be returned.
  645. *
  646. * If during the connection sequence resource allocation fails the -ENOMEM
  647. * will be returned.
  648. *
  649. * If the function is called with the ASYNC flag set and no connection requests
  650. * are pending it will return -EAGAIN.
  651. *
  652. * If the remote side is not sending any connection requests the caller may
  653. * terminate this function with a signal. If so a -EINTR will be returned.
  654. */
  655. int scif_accept(scif_epd_t epd, struct scif_port_id *peer,
  656. scif_epd_t *newepd, int flags)
  657. {
  658. struct scif_endpt *lep = (struct scif_endpt *)epd;
  659. struct scif_endpt *cep;
  660. struct scif_conreq *conreq;
  661. struct scifmsg msg;
  662. int err;
  663. struct device *spdev;
  664. dev_dbg(scif_info.mdev.this_device,
  665. "SCIFAPI accept: ep %p %s\n", lep, scif_ep_states[lep->state]);
  666. if (flags & ~SCIF_ACCEPT_SYNC)
  667. return -EINVAL;
  668. if (!peer || !newepd)
  669. return -EINVAL;
  670. might_sleep();
  671. spin_lock(&lep->lock);
  672. if (lep->state != SCIFEP_LISTENING) {
  673. spin_unlock(&lep->lock);
  674. return -EINVAL;
  675. }
  676. if (!lep->conreqcnt && !(flags & SCIF_ACCEPT_SYNC)) {
  677. /* No connection request present and we do not want to wait */
  678. spin_unlock(&lep->lock);
  679. return -EAGAIN;
  680. }
  681. lep->files = current->files;
  682. retry_connection:
  683. spin_unlock(&lep->lock);
  684. /* Wait for the remote node to send us a SCIF_CNCT_REQ */
  685. err = wait_event_interruptible(lep->conwq,
  686. (lep->conreqcnt ||
  687. (lep->state != SCIFEP_LISTENING)));
  688. if (err)
  689. return err;
  690. if (lep->state != SCIFEP_LISTENING)
  691. return -EINTR;
  692. spin_lock(&lep->lock);
  693. if (!lep->conreqcnt)
  694. goto retry_connection;
  695. /* Get the first connect request off the list */
  696. conreq = list_first_entry(&lep->conlist, struct scif_conreq, list);
  697. list_del(&conreq->list);
  698. lep->conreqcnt--;
  699. spin_unlock(&lep->lock);
  700. /* Fill in the peer information */
  701. peer->node = conreq->msg.src.node;
  702. peer->port = conreq->msg.src.port;
  703. cep = kzalloc(sizeof(*cep), GFP_KERNEL);
  704. if (!cep) {
  705. err = -ENOMEM;
  706. goto scif_accept_error_epalloc;
  707. }
  708. spin_lock_init(&cep->lock);
  709. mutex_init(&cep->sendlock);
  710. mutex_init(&cep->recvlock);
  711. cep->state = SCIFEP_CONNECTING;
  712. cep->remote_dev = &scif_dev[peer->node];
  713. cep->remote_ep = conreq->msg.payload[0];
  714. cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL);
  715. if (!cep->qp_info.qp) {
  716. err = -ENOMEM;
  717. goto scif_accept_error_qpalloc;
  718. }
  719. cep->qp_info.qp->magic = SCIFEP_MAGIC;
  720. spdev = scif_get_peer_dev(cep->remote_dev);
  721. if (IS_ERR(spdev)) {
  722. err = PTR_ERR(spdev);
  723. goto scif_accept_error_map;
  724. }
  725. err = scif_setup_qp_accept(cep->qp_info.qp, &cep->qp_info.qp_offset,
  726. conreq->msg.payload[1], SCIF_ENDPT_QP_SIZE,
  727. cep->remote_dev);
  728. if (err) {
  729. dev_dbg(&cep->remote_dev->sdev->dev,
  730. "SCIFAPI accept: ep %p new %p scif_setup_qp_accept %d qp_offset 0x%llx\n",
  731. lep, cep, err, cep->qp_info.qp_offset);
  732. scif_put_peer_dev(spdev);
  733. goto scif_accept_error_map;
  734. }
  735. cep->port.node = lep->port.node;
  736. cep->port.port = lep->port.port;
  737. cep->peer.node = peer->node;
  738. cep->peer.port = peer->port;
  739. init_waitqueue_head(&cep->sendwq);
  740. init_waitqueue_head(&cep->recvwq);
  741. init_waitqueue_head(&cep->conwq);
  742. msg.uop = SCIF_CNCT_GNT;
  743. msg.src = cep->port;
  744. msg.payload[0] = cep->remote_ep;
  745. msg.payload[1] = cep->qp_info.qp_offset;
  746. msg.payload[2] = (u64)cep;
  747. err = _scif_nodeqp_send(cep->remote_dev, &msg);
  748. scif_put_peer_dev(spdev);
  749. if (err)
  750. goto scif_accept_error_map;
  751. retry:
  752. /* Wait for the remote node to respond with SCIF_CNCT_GNT(N)ACK */
  753. err = wait_event_timeout(cep->conwq, cep->state != SCIFEP_CONNECTING,
  754. SCIF_NODE_ACCEPT_TIMEOUT);
  755. if (!err && scifdev_alive(cep))
  756. goto retry;
  757. err = !err ? -ENODEV : 0;
  758. if (err)
  759. goto scif_accept_error_map;
  760. kfree(conreq);
  761. spin_lock(&cep->lock);
  762. if (cep->state == SCIFEP_CLOSING) {
  763. /*
  764. * Remote failed to allocate resources and NAKed the grant.
  765. * There is at this point nothing referencing the new end point.
  766. */
  767. spin_unlock(&cep->lock);
  768. scif_teardown_ep(cep);
  769. kfree(cep);
  770. /* If call with sync flag then go back and wait. */
  771. if (flags & SCIF_ACCEPT_SYNC) {
  772. spin_lock(&lep->lock);
  773. goto retry_connection;
  774. }
  775. return -EAGAIN;
  776. }
  777. scif_get_port(cep->port.port);
  778. *newepd = (scif_epd_t)cep;
  779. spin_unlock(&cep->lock);
  780. return 0;
  781. scif_accept_error_map:
  782. scif_teardown_ep(cep);
  783. scif_accept_error_qpalloc:
  784. kfree(cep);
  785. scif_accept_error_epalloc:
  786. msg.uop = SCIF_CNCT_REJ;
  787. msg.dst.node = conreq->msg.src.node;
  788. msg.dst.port = conreq->msg.src.port;
  789. msg.payload[0] = conreq->msg.payload[0];
  790. msg.payload[1] = conreq->msg.payload[1];
  791. scif_nodeqp_send(&scif_dev[conreq->msg.src.node], &msg);
  792. kfree(conreq);
  793. return err;
  794. }
  795. EXPORT_SYMBOL_GPL(scif_accept);
  796. /*
  797. * scif_msg_param_check:
  798. * @epd: The end point returned from scif_open()
  799. * @len: Length to receive
  800. * @flags: blocking or non blocking
  801. *
  802. * Validate parameters for messaging APIs scif_send(..)/scif_recv(..).
  803. */
  804. static inline int scif_msg_param_check(scif_epd_t epd, int len, int flags)
  805. {
  806. int ret = -EINVAL;
  807. if (len < 0)
  808. goto err_ret;
  809. if (flags && (!(flags & SCIF_RECV_BLOCK)))
  810. goto err_ret;
  811. ret = 0;
  812. err_ret:
  813. return ret;
  814. }
  815. static int _scif_send(scif_epd_t epd, void *msg, int len, int flags)
  816. {
  817. struct scif_endpt *ep = (struct scif_endpt *)epd;
  818. struct scifmsg notif_msg;
  819. int curr_xfer_len = 0, sent_len = 0, write_count;
  820. int ret = 0;
  821. struct scif_qp *qp = ep->qp_info.qp;
  822. if (flags & SCIF_SEND_BLOCK)
  823. might_sleep();
  824. spin_lock(&ep->lock);
  825. while (sent_len != len && SCIFEP_CONNECTED == ep->state) {
  826. write_count = scif_rb_space(&qp->outbound_q);
  827. if (write_count) {
  828. /* Best effort to send as much data as possible */
  829. curr_xfer_len = min(len - sent_len, write_count);
  830. ret = scif_rb_write(&qp->outbound_q, msg,
  831. curr_xfer_len);
  832. if (ret < 0)
  833. break;
  834. /* Success. Update write pointer */
  835. scif_rb_commit(&qp->outbound_q);
  836. /*
  837. * Send a notification to the peer about the
  838. * produced data message.
  839. */
  840. notif_msg.src = ep->port;
  841. notif_msg.uop = SCIF_CLIENT_SENT;
  842. notif_msg.payload[0] = ep->remote_ep;
  843. ret = _scif_nodeqp_send(ep->remote_dev, &notif_msg);
  844. if (ret)
  845. break;
  846. sent_len += curr_xfer_len;
  847. msg = msg + curr_xfer_len;
  848. continue;
  849. }
  850. curr_xfer_len = min(len - sent_len, SCIF_ENDPT_QP_SIZE - 1);
  851. /* Not enough RB space. return for the Non Blocking case */
  852. if (!(flags & SCIF_SEND_BLOCK))
  853. break;
  854. spin_unlock(&ep->lock);
  855. /* Wait for a SCIF_CLIENT_RCVD message in the Blocking case */
  856. ret =
  857. wait_event_interruptible(ep->sendwq,
  858. (SCIFEP_CONNECTED != ep->state) ||
  859. (scif_rb_space(&qp->outbound_q) >=
  860. curr_xfer_len));
  861. spin_lock(&ep->lock);
  862. if (ret)
  863. break;
  864. }
  865. if (sent_len)
  866. ret = sent_len;
  867. else if (!ret && SCIFEP_CONNECTED != ep->state)
  868. ret = SCIFEP_DISCONNECTED == ep->state ?
  869. -ECONNRESET : -ENOTCONN;
  870. spin_unlock(&ep->lock);
  871. return ret;
  872. }
  873. static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
  874. {
  875. int read_size;
  876. struct scif_endpt *ep = (struct scif_endpt *)epd;
  877. struct scifmsg notif_msg;
  878. int curr_recv_len = 0, remaining_len = len, read_count;
  879. int ret = 0;
  880. struct scif_qp *qp = ep->qp_info.qp;
  881. if (flags & SCIF_RECV_BLOCK)
  882. might_sleep();
  883. spin_lock(&ep->lock);
  884. while (remaining_len && (SCIFEP_CONNECTED == ep->state ||
  885. SCIFEP_DISCONNECTED == ep->state)) {
  886. read_count = scif_rb_count(&qp->inbound_q, remaining_len);
  887. if (read_count) {
  888. /*
  889. * Best effort to recv as much data as there
  890. * are bytes to read in the RB particularly
  891. * important for the Non Blocking case.
  892. */
  893. curr_recv_len = min(remaining_len, read_count);
  894. read_size = scif_rb_get_next(&qp->inbound_q,
  895. msg, curr_recv_len);
  896. if (ep->state == SCIFEP_CONNECTED) {
  897. /*
  898. * Update the read pointer only if the endpoint
  899. * is still connected else the read pointer
  900. * might no longer exist since the peer has
  901. * freed resources!
  902. */
  903. scif_rb_update_read_ptr(&qp->inbound_q);
  904. /*
  905. * Send a notification to the peer about the
  906. * consumed data message only if the EP is in
  907. * SCIFEP_CONNECTED state.
  908. */
  909. notif_msg.src = ep->port;
  910. notif_msg.uop = SCIF_CLIENT_RCVD;
  911. notif_msg.payload[0] = ep->remote_ep;
  912. ret = _scif_nodeqp_send(ep->remote_dev,
  913. &notif_msg);
  914. if (ret)
  915. break;
  916. }
  917. remaining_len -= curr_recv_len;
  918. msg = msg + curr_recv_len;
  919. continue;
  920. }
  921. /*
  922. * Bail out now if the EP is in SCIFEP_DISCONNECTED state else
  923. * we will keep looping forever.
  924. */
  925. if (ep->state == SCIFEP_DISCONNECTED)
  926. break;
  927. /*
  928. * Return in the Non Blocking case if there is no data
  929. * to read in this iteration.
  930. */
  931. if (!(flags & SCIF_RECV_BLOCK))
  932. break;
  933. curr_recv_len = min(remaining_len, SCIF_ENDPT_QP_SIZE - 1);
  934. spin_unlock(&ep->lock);
  935. /*
  936. * Wait for a SCIF_CLIENT_SEND message in the blocking case
  937. * or until other side disconnects.
  938. */
  939. ret =
  940. wait_event_interruptible(ep->recvwq,
  941. SCIFEP_CONNECTED != ep->state ||
  942. scif_rb_count(&qp->inbound_q,
  943. curr_recv_len)
  944. >= curr_recv_len);
  945. spin_lock(&ep->lock);
  946. if (ret)
  947. break;
  948. }
  949. if (len - remaining_len)
  950. ret = len - remaining_len;
  951. else if (!ret && ep->state != SCIFEP_CONNECTED)
  952. ret = ep->state == SCIFEP_DISCONNECTED ?
  953. -ECONNRESET : -ENOTCONN;
  954. spin_unlock(&ep->lock);
  955. return ret;
  956. }
  957. /**
  958. * scif_user_send() - Send data to connection queue
  959. * @epd: The end point returned from scif_open()
  960. * @msg: Address to place data
  961. * @len: Length to receive
  962. * @flags: blocking or non blocking
  963. *
  964. * This function is called from the driver IOCTL entry point
  965. * only and is a wrapper for _scif_send().
  966. */
  967. int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags)
  968. {
  969. struct scif_endpt *ep = (struct scif_endpt *)epd;
  970. int err = 0;
  971. int sent_len = 0;
  972. char *tmp;
  973. int loop_len;
  974. int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
  975. dev_dbg(scif_info.mdev.this_device,
  976. "SCIFAPI send (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
  977. if (!len)
  978. return 0;
  979. err = scif_msg_param_check(epd, len, flags);
  980. if (err)
  981. goto send_err;
  982. tmp = kmalloc(chunk_len, GFP_KERNEL);
  983. if (!tmp) {
  984. err = -ENOMEM;
  985. goto send_err;
  986. }
  987. /*
  988. * Grabbing the lock before breaking up the transfer in
  989. * multiple chunks is required to ensure that messages do
  990. * not get fragmented and reordered.
  991. */
  992. mutex_lock(&ep->sendlock);
  993. while (sent_len != len) {
  994. loop_len = len - sent_len;
  995. loop_len = min(chunk_len, loop_len);
  996. if (copy_from_user(tmp, msg, loop_len)) {
  997. err = -EFAULT;
  998. goto send_free_err;
  999. }
  1000. err = _scif_send(epd, tmp, loop_len, flags);
  1001. if (err < 0)
  1002. goto send_free_err;
  1003. sent_len += err;
  1004. msg += err;
  1005. if (err != loop_len)
  1006. goto send_free_err;
  1007. }
  1008. send_free_err:
  1009. mutex_unlock(&ep->sendlock);
  1010. kfree(tmp);
  1011. send_err:
  1012. return err < 0 ? err : sent_len;
  1013. }
  1014. /**
  1015. * scif_user_recv() - Receive data from connection queue
  1016. * @epd: The end point returned from scif_open()
  1017. * @msg: Address to place data
  1018. * @len: Length to receive
  1019. * @flags: blocking or non blocking
  1020. *
  1021. * This function is called from the driver IOCTL entry point
  1022. * only and is a wrapper for _scif_recv().
  1023. */
  1024. int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags)
  1025. {
  1026. struct scif_endpt *ep = (struct scif_endpt *)epd;
  1027. int err = 0;
  1028. int recv_len = 0;
  1029. char *tmp;
  1030. int loop_len;
  1031. int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
  1032. dev_dbg(scif_info.mdev.this_device,
  1033. "SCIFAPI recv (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
  1034. if (!len)
  1035. return 0;
  1036. err = scif_msg_param_check(epd, len, flags);
  1037. if (err)
  1038. goto recv_err;
  1039. tmp = kmalloc(chunk_len, GFP_KERNEL);
  1040. if (!tmp) {
  1041. err = -ENOMEM;
  1042. goto recv_err;
  1043. }
  1044. /*
  1045. * Grabbing the lock before breaking up the transfer in
  1046. * multiple chunks is required to ensure that messages do
  1047. * not get fragmented and reordered.
  1048. */
  1049. mutex_lock(&ep->recvlock);
  1050. while (recv_len != len) {
  1051. loop_len = len - recv_len;
  1052. loop_len = min(chunk_len, loop_len);
  1053. err = _scif_recv(epd, tmp, loop_len, flags);
  1054. if (err < 0)
  1055. goto recv_free_err;
  1056. if (copy_to_user(msg, tmp, err)) {
  1057. err = -EFAULT;
  1058. goto recv_free_err;
  1059. }
  1060. recv_len += err;
  1061. msg += err;
  1062. if (err != loop_len)
  1063. goto recv_free_err;
  1064. }
  1065. recv_free_err:
  1066. mutex_unlock(&ep->recvlock);
  1067. kfree(tmp);
  1068. recv_err:
  1069. return err < 0 ? err : recv_len;
  1070. }
  1071. /**
  1072. * scif_send() - Send data to connection queue
  1073. * @epd: The end point returned from scif_open()
  1074. * @msg: Address to place data
  1075. * @len: Length to receive
  1076. * @flags: blocking or non blocking
  1077. *
  1078. * This function is called from the kernel mode only and is
  1079. * a wrapper for _scif_send().
  1080. */
  1081. int scif_send(scif_epd_t epd, void *msg, int len, int flags)
  1082. {
  1083. struct scif_endpt *ep = (struct scif_endpt *)epd;
  1084. int ret;
  1085. dev_dbg(scif_info.mdev.this_device,
  1086. "SCIFAPI send (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
  1087. if (!len)
  1088. return 0;
  1089. ret = scif_msg_param_check(epd, len, flags);
  1090. if (ret)
  1091. return ret;
  1092. if (!ep->remote_dev)
  1093. return -ENOTCONN;
  1094. /*
  1095. * Grab the mutex lock in the blocking case only
  1096. * to ensure messages do not get fragmented/reordered.
  1097. * The non blocking mode is protected using spin locks
  1098. * in _scif_send().
  1099. */
  1100. if (flags & SCIF_SEND_BLOCK)
  1101. mutex_lock(&ep->sendlock);
  1102. ret = _scif_send(epd, msg, len, flags);
  1103. if (flags & SCIF_SEND_BLOCK)
  1104. mutex_unlock(&ep->sendlock);
  1105. return ret;
  1106. }
  1107. EXPORT_SYMBOL_GPL(scif_send);
  1108. /**
  1109. * scif_recv() - Receive data from connection queue
  1110. * @epd: The end point returned from scif_open()
  1111. * @msg: Address to place data
  1112. * @len: Length to receive
  1113. * @flags: blocking or non blocking
  1114. *
  1115. * This function is called from the kernel mode only and is
  1116. * a wrapper for _scif_recv().
  1117. */
  1118. int scif_recv(scif_epd_t epd, void *msg, int len, int flags)
  1119. {
  1120. struct scif_endpt *ep = (struct scif_endpt *)epd;
  1121. int ret;
  1122. dev_dbg(scif_info.mdev.this_device,
  1123. "SCIFAPI recv (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
  1124. if (!len)
  1125. return 0;
  1126. ret = scif_msg_param_check(epd, len, flags);
  1127. if (ret)
  1128. return ret;
  1129. /*
  1130. * Grab the mutex lock in the blocking case only
  1131. * to ensure messages do not get fragmented/reordered.
  1132. * The non blocking mode is protected using spin locks
  1133. * in _scif_send().
  1134. */
  1135. if (flags & SCIF_RECV_BLOCK)
  1136. mutex_lock(&ep->recvlock);
  1137. ret = _scif_recv(epd, msg, len, flags);
  1138. if (flags & SCIF_RECV_BLOCK)
  1139. mutex_unlock(&ep->recvlock);
  1140. return ret;
  1141. }
  1142. EXPORT_SYMBOL_GPL(scif_recv);
  1143. int scif_get_node_ids(u16 *nodes, int len, u16 *self)
  1144. {
  1145. int online = 0;
  1146. int offset = 0;
  1147. int node;
  1148. if (!scif_is_mgmt_node())
  1149. scif_get_node_info();
  1150. *self = scif_info.nodeid;
  1151. mutex_lock(&scif_info.conflock);
  1152. len = min_t(int, len, scif_info.total);
  1153. for (node = 0; node <= scif_info.maxid; node++) {
  1154. if (_scifdev_alive(&scif_dev[node])) {
  1155. online++;
  1156. if (offset < len)
  1157. nodes[offset++] = node;
  1158. }
  1159. }
  1160. dev_dbg(scif_info.mdev.this_device,
  1161. "SCIFAPI get_node_ids total %d online %d filled in %d nodes\n",
  1162. scif_info.total, online, offset);
  1163. mutex_unlock(&scif_info.conflock);
  1164. return online;
  1165. }
  1166. EXPORT_SYMBOL_GPL(scif_get_node_ids);