vudc_tx.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
  3. * Copyright (C) 2015-2016 Samsung Electronics
  4. * Igor Kotrasinski <i.kotrasinsk@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <net/sock.h>
  20. #include <linux/list.h>
  21. #include <linux/kthread.h>
  22. #include "usbip_common.h"
  23. #include "vudc.h"
  24. static inline void setup_base_pdu(struct usbip_header_basic *base,
  25. __u32 command, __u32 seqnum)
  26. {
  27. base->command = command;
  28. base->seqnum = seqnum;
  29. base->devid = 0;
  30. base->ep = 0;
  31. base->direction = 0;
  32. }
  33. static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urbp *urb_p)
  34. {
  35. setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, urb_p->seqnum);
  36. usbip_pack_pdu(rpdu, urb_p->urb, USBIP_RET_SUBMIT, 1);
  37. }
  38. static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
  39. struct v_unlink *unlink)
  40. {
  41. setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
  42. rpdu->u.ret_unlink.status = unlink->status;
  43. }
  44. static int v_send_ret_unlink(struct vudc *udc, struct v_unlink *unlink)
  45. {
  46. struct msghdr msg;
  47. struct kvec iov[1];
  48. size_t txsize;
  49. int ret;
  50. struct usbip_header pdu_header;
  51. txsize = 0;
  52. memset(&pdu_header, 0, sizeof(pdu_header));
  53. memset(&msg, 0, sizeof(msg));
  54. memset(&iov, 0, sizeof(iov));
  55. /* 1. setup usbip_header */
  56. setup_ret_unlink_pdu(&pdu_header, unlink);
  57. usbip_header_correct_endian(&pdu_header, 1);
  58. iov[0].iov_base = &pdu_header;
  59. iov[0].iov_len = sizeof(pdu_header);
  60. txsize += sizeof(pdu_header);
  61. ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov,
  62. 1, txsize);
  63. if (ret != txsize) {
  64. usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
  65. if (ret >= 0)
  66. return -EPIPE;
  67. return ret;
  68. }
  69. kfree(unlink);
  70. return txsize;
  71. }
  72. static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
  73. {
  74. struct urb *urb = urb_p->urb;
  75. struct usbip_header pdu_header;
  76. struct usbip_iso_packet_descriptor *iso_buffer = NULL;
  77. struct kvec *iov = NULL;
  78. int iovnum = 0;
  79. int ret = 0;
  80. size_t txsize;
  81. struct msghdr msg;
  82. txsize = 0;
  83. memset(&pdu_header, 0, sizeof(pdu_header));
  84. memset(&msg, 0, sizeof(msg));
  85. if (urb->actual_length > 0 && !urb->transfer_buffer) {
  86. dev_err(&udc->gadget.dev,
  87. "urb: actual_length %d transfer_buffer null\n",
  88. urb->actual_length);
  89. return -1;
  90. }
  91. if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
  92. iovnum = 2 + urb->number_of_packets;
  93. else
  94. iovnum = 2;
  95. iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
  96. if (!iov) {
  97. usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
  98. ret = -ENOMEM;
  99. goto out;
  100. }
  101. iovnum = 0;
  102. /* 1. setup usbip_header */
  103. setup_ret_submit_pdu(&pdu_header, urb_p);
  104. usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
  105. pdu_header.base.seqnum);
  106. usbip_header_correct_endian(&pdu_header, 1);
  107. iov[iovnum].iov_base = &pdu_header;
  108. iov[iovnum].iov_len = sizeof(pdu_header);
  109. iovnum++;
  110. txsize += sizeof(pdu_header);
  111. /* 2. setup transfer buffer */
  112. if (urb_p->type != USB_ENDPOINT_XFER_ISOC &&
  113. usb_pipein(urb->pipe) && urb->actual_length > 0) {
  114. iov[iovnum].iov_base = urb->transfer_buffer;
  115. iov[iovnum].iov_len = urb->actual_length;
  116. iovnum++;
  117. txsize += urb->actual_length;
  118. } else if (urb_p->type == USB_ENDPOINT_XFER_ISOC &&
  119. usb_pipein(urb->pipe)) {
  120. /* FIXME - copypasted from stub_tx, refactor */
  121. int i;
  122. for (i = 0; i < urb->number_of_packets; i++) {
  123. iov[iovnum].iov_base = urb->transfer_buffer +
  124. urb->iso_frame_desc[i].offset;
  125. iov[iovnum].iov_len =
  126. urb->iso_frame_desc[i].actual_length;
  127. iovnum++;
  128. txsize += urb->iso_frame_desc[i].actual_length;
  129. }
  130. if (txsize != sizeof(pdu_header) + urb->actual_length) {
  131. usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
  132. ret = -EPIPE;
  133. goto out;
  134. }
  135. }
  136. /* else - no buffer to send */
  137. /* 3. setup iso_packet_descriptor */
  138. if (urb_p->type == USB_ENDPOINT_XFER_ISOC) {
  139. ssize_t len = 0;
  140. iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
  141. if (!iso_buffer) {
  142. usbip_event_add(&udc->ud,
  143. VUDC_EVENT_ERROR_MALLOC);
  144. ret = -ENOMEM;
  145. goto out;
  146. }
  147. iov[iovnum].iov_base = iso_buffer;
  148. iov[iovnum].iov_len = len;
  149. txsize += len;
  150. iovnum++;
  151. }
  152. ret = kernel_sendmsg(udc->ud.tcp_socket, &msg,
  153. iov, iovnum, txsize);
  154. if (ret != txsize) {
  155. usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
  156. if (ret >= 0)
  157. ret = -EPIPE;
  158. goto out;
  159. }
  160. out:
  161. kfree(iov);
  162. kfree(iso_buffer);
  163. free_urbp_and_urb(urb_p);
  164. if (ret < 0)
  165. return ret;
  166. return txsize;
  167. }
  168. static int v_send_ret(struct vudc *udc)
  169. {
  170. unsigned long flags;
  171. struct tx_item *txi;
  172. size_t total_size = 0;
  173. int ret = 0;
  174. spin_lock_irqsave(&udc->lock_tx, flags);
  175. while (!list_empty(&udc->tx_queue)) {
  176. txi = list_first_entry(&udc->tx_queue, struct tx_item,
  177. tx_entry);
  178. list_del(&txi->tx_entry);
  179. spin_unlock_irqrestore(&udc->lock_tx, flags);
  180. switch (txi->type) {
  181. case TX_SUBMIT:
  182. ret = v_send_ret_submit(udc, txi->s);
  183. break;
  184. case TX_UNLINK:
  185. ret = v_send_ret_unlink(udc, txi->u);
  186. break;
  187. }
  188. kfree(txi);
  189. if (ret < 0)
  190. return ret;
  191. total_size += ret;
  192. spin_lock_irqsave(&udc->lock_tx, flags);
  193. }
  194. spin_unlock_irqrestore(&udc->lock_tx, flags);
  195. return total_size;
  196. }
  197. int v_tx_loop(void *data)
  198. {
  199. struct usbip_device *ud = (struct usbip_device *) data;
  200. struct vudc *udc = container_of(ud, struct vudc, ud);
  201. int ret;
  202. while (!kthread_should_stop()) {
  203. if (usbip_event_happened(&udc->ud))
  204. break;
  205. ret = v_send_ret(udc);
  206. if (ret < 0) {
  207. pr_warn("v_tx exit with error %d", ret);
  208. break;
  209. }
  210. wait_event_interruptible(udc->tx_waitq,
  211. (!list_empty(&udc->tx_queue) ||
  212. kthread_should_stop()));
  213. }
  214. return 0;
  215. }
  216. /* called with spinlocks held */
  217. void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status)
  218. {
  219. struct tx_item *txi;
  220. struct v_unlink *unlink;
  221. txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
  222. if (!txi) {
  223. usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
  224. return;
  225. }
  226. unlink = kzalloc(sizeof(*unlink), GFP_ATOMIC);
  227. if (!unlink) {
  228. kfree(txi);
  229. usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
  230. return;
  231. }
  232. unlink->seqnum = seqnum;
  233. unlink->status = status;
  234. txi->type = TX_UNLINK;
  235. txi->u = unlink;
  236. list_add_tail(&txi->tx_entry, &udc->tx_queue);
  237. }
  238. /* called with spinlocks held */
  239. void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p)
  240. {
  241. struct tx_item *txi;
  242. txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
  243. if (!txi) {
  244. usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
  245. return;
  246. }
  247. txi->type = TX_SUBMIT;
  248. txi->s = urb_p;
  249. list_add_tail(&txi->tx_entry, &udc->tx_queue);
  250. }