f_ccid.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999
  1. /*
  2. * f_ccid.c -- CCID function Driver
  3. *
  4. * Copyright (c) 2011, The Linux Foundation. All rights reserved.
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details
  12. */
  13. #include <linux/slab.h>
  14. #include <linux/kernel.h>
  15. #include <linux/device.h>
  16. #include <linux/fs.h>
  17. #include <linux/usb/ccid_desc.h>
  18. #include <linux/miscdevice.h>
  19. #include "f_ccid.h"
  20. #define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header)
  21. #define BULK_OUT_BUFFER_SIZE sizeof(struct ccid_bulk_out_header)
  22. #define CTRL_BUF_SIZE 4
  23. #define FUNCTION_NAME "ccid"
  24. #define CCID_NOTIFY_INTERVAL 5
  25. #define CCID_NOTIFY_MAXPACKET 4
  26. /* number of tx requests to allocate */
  27. #define TX_REQ_MAX 4
  28. struct ccid_ctrl_dev {
  29. atomic_t opened;
  30. struct list_head tx_q;
  31. wait_queue_head_t tx_wait_q;
  32. unsigned char buf[CTRL_BUF_SIZE];
  33. int tx_ctrl_done;
  34. };
  35. struct ccid_bulk_dev {
  36. atomic_t error;
  37. atomic_t opened;
  38. atomic_t rx_req_busy;
  39. wait_queue_head_t read_wq;
  40. wait_queue_head_t write_wq;
  41. struct usb_request *rx_req;
  42. int rx_done;
  43. struct list_head tx_idle;
  44. };
  45. struct f_ccid {
  46. struct usb_function function;
  47. struct usb_composite_dev *cdev;
  48. int ifc_id;
  49. spinlock_t lock;
  50. atomic_t online;
  51. /* usb eps*/
  52. struct usb_ep *notify;
  53. struct usb_ep *in;
  54. struct usb_ep *out;
  55. struct usb_request *notify_req;
  56. struct ccid_ctrl_dev ctrl_dev;
  57. struct ccid_bulk_dev bulk_dev;
  58. int dtr_state;
  59. };
  60. static struct f_ccid *_ccid_dev;
  61. static struct miscdevice ccid_bulk_device;
  62. static struct miscdevice ccid_ctrl_device;
  63. /* Interface Descriptor: */
  64. static struct usb_interface_descriptor ccid_interface_desc = {
  65. .bLength = USB_DT_INTERFACE_SIZE,
  66. .bDescriptorType = USB_DT_INTERFACE,
  67. .bNumEndpoints = 3,
  68. .bInterfaceClass = USB_CLASS_CSCID,
  69. .bInterfaceSubClass = 0,
  70. .bInterfaceProtocol = 0,
  71. };
  72. /* CCID Class Descriptor */
  73. static struct usb_ccid_class_descriptor ccid_class_desc = {
  74. .bLength = sizeof(ccid_class_desc),
  75. .bDescriptorType = CCID_DECRIPTOR_TYPE,
  76. .bcdCCID = CCID1_10,
  77. .bMaxSlotIndex = 0,
  78. /* This value indicates what voltages the CCID can supply to slots */
  79. .bVoltageSupport = VOLTS_3_0,
  80. .dwProtocols = PROTOCOL_TO,
  81. /* Default ICC clock frequency in KHz */
  82. .dwDefaultClock = 3580,
  83. /* Maximum supported ICC clock frequency in KHz */
  84. .dwMaximumClock = 3580,
  85. .bNumClockSupported = 0,
  86. /* Default ICC I/O data rate in bps */
  87. .dwDataRate = 9600,
  88. /* Maximum supported ICC I/O data rate in bps */
  89. .dwMaxDataRate = 9600,
  90. .bNumDataRatesSupported = 0,
  91. .dwMaxIFSD = 0,
  92. .dwSynchProtocols = 0,
  93. .dwMechanical = 0,
  94. /* This value indicates what intelligent features the CCID has */
  95. .dwFeatures = CCID_FEATURES_EXC_SAPDU |
  96. CCID_FEATURES_AUTO_PNEGO |
  97. CCID_FEATURES_AUTO_BAUD |
  98. CCID_FEATURES_AUTO_CLOCK |
  99. CCID_FEATURES_AUTO_VOLT |
  100. CCID_FEATURES_AUTO_ACTIV |
  101. CCID_FEATURES_AUTO_PCONF,
  102. /* extended APDU level Message Length */
  103. .dwMaxCCIDMessageLength = 0x200,
  104. .bClassGetResponse = 0x0,
  105. .bClassEnvelope = 0x0,
  106. .wLcdLayout = 0,
  107. .bPINSupport = 0,
  108. .bMaxCCIDBusySlots = 1
  109. };
  110. /* Full speed support: */
  111. static struct usb_endpoint_descriptor ccid_fs_notify_desc = {
  112. .bLength = USB_DT_ENDPOINT_SIZE,
  113. .bDescriptorType = USB_DT_ENDPOINT,
  114. .bEndpointAddress = USB_DIR_IN,
  115. .bmAttributes = USB_ENDPOINT_XFER_INT,
  116. .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET),
  117. .bInterval = 1 << CCID_NOTIFY_INTERVAL,
  118. };
  119. static struct usb_endpoint_descriptor ccid_fs_in_desc = {
  120. .bLength = USB_DT_ENDPOINT_SIZE,
  121. .bDescriptorType = USB_DT_ENDPOINT,
  122. .bEndpointAddress = USB_DIR_IN,
  123. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  124. .wMaxPacketSize = __constant_cpu_to_le16(64),
  125. };
  126. static struct usb_endpoint_descriptor ccid_fs_out_desc = {
  127. .bLength = USB_DT_ENDPOINT_SIZE,
  128. .bDescriptorType = USB_DT_ENDPOINT,
  129. .bEndpointAddress = USB_DIR_OUT,
  130. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  131. .wMaxPacketSize = __constant_cpu_to_le16(64),
  132. };
  133. static struct usb_descriptor_header *ccid_fs_descs[] = {
  134. (struct usb_descriptor_header *) &ccid_interface_desc,
  135. (struct usb_descriptor_header *) &ccid_class_desc,
  136. (struct usb_descriptor_header *) &ccid_fs_notify_desc,
  137. (struct usb_descriptor_header *) &ccid_fs_in_desc,
  138. (struct usb_descriptor_header *) &ccid_fs_out_desc,
  139. NULL,
  140. };
  141. /* High speed support: */
  142. static struct usb_endpoint_descriptor ccid_hs_notify_desc = {
  143. .bLength = USB_DT_ENDPOINT_SIZE,
  144. .bDescriptorType = USB_DT_ENDPOINT,
  145. .bEndpointAddress = USB_DIR_IN,
  146. .bmAttributes = USB_ENDPOINT_XFER_INT,
  147. .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET),
  148. .bInterval = CCID_NOTIFY_INTERVAL + 4,
  149. };
  150. static struct usb_endpoint_descriptor ccid_hs_in_desc = {
  151. .bLength = USB_DT_ENDPOINT_SIZE,
  152. .bDescriptorType = USB_DT_ENDPOINT,
  153. .bEndpointAddress = USB_DIR_IN,
  154. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  155. .wMaxPacketSize = __constant_cpu_to_le16(512),
  156. };
  157. static struct usb_endpoint_descriptor ccid_hs_out_desc = {
  158. .bLength = USB_DT_ENDPOINT_SIZE,
  159. .bDescriptorType = USB_DT_ENDPOINT,
  160. .bEndpointAddress = USB_DIR_OUT,
  161. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  162. .wMaxPacketSize = __constant_cpu_to_le16(512),
  163. };
  164. static struct usb_descriptor_header *ccid_hs_descs[] = {
  165. (struct usb_descriptor_header *) &ccid_interface_desc,
  166. (struct usb_descriptor_header *) &ccid_class_desc,
  167. (struct usb_descriptor_header *) &ccid_hs_notify_desc,
  168. (struct usb_descriptor_header *) &ccid_hs_in_desc,
  169. (struct usb_descriptor_header *) &ccid_hs_out_desc,
  170. NULL,
  171. };
  172. static inline struct f_ccid *func_to_ccid(struct usb_function *f)
  173. {
  174. return container_of(f, struct f_ccid, function);
  175. }
  176. static void ccid_req_put(struct f_ccid *ccid_dev, struct list_head *head,
  177. struct usb_request *req)
  178. {
  179. unsigned long flags;
  180. spin_lock_irqsave(&ccid_dev->lock, flags);
  181. list_add_tail(&req->list, head);
  182. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  183. }
  184. static struct usb_request *ccid_req_get(struct f_ccid *ccid_dev,
  185. struct list_head *head)
  186. {
  187. unsigned long flags;
  188. struct usb_request *req = NULL;
  189. spin_lock_irqsave(&ccid_dev->lock, flags);
  190. if (!list_empty(head)) {
  191. req = list_first_entry(head, struct usb_request, list);
  192. list_del(&req->list);
  193. }
  194. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  195. return req;
  196. }
  197. static void ccid_notify_complete(struct usb_ep *ep, struct usb_request *req)
  198. {
  199. switch (req->status) {
  200. case -ECONNRESET:
  201. case -ESHUTDOWN:
  202. case 0:
  203. break;
  204. default:
  205. pr_err("CCID notify ep error %d\n", req->status);
  206. }
  207. }
  208. static void ccid_bulk_complete_in(struct usb_ep *ep, struct usb_request *req)
  209. {
  210. struct f_ccid *ccid_dev = _ccid_dev;
  211. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  212. if (req->status != 0)
  213. atomic_set(&bulk_dev->error, 1);
  214. ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
  215. wake_up(&bulk_dev->write_wq);
  216. }
  217. static void ccid_bulk_complete_out(struct usb_ep *ep, struct usb_request *req)
  218. {
  219. struct f_ccid *ccid_dev = _ccid_dev;
  220. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  221. if (req->status != 0)
  222. atomic_set(&bulk_dev->error, 1);
  223. bulk_dev->rx_done = 1;
  224. wake_up(&bulk_dev->read_wq);
  225. }
  226. static struct usb_request *
  227. ccid_request_alloc(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
  228. {
  229. struct usb_request *req;
  230. req = usb_ep_alloc_request(ep, kmalloc_flags);
  231. if (req != NULL) {
  232. req->length = len;
  233. req->buf = kmalloc(len, kmalloc_flags);
  234. if (req->buf == NULL) {
  235. usb_ep_free_request(ep, req);
  236. req = NULL;
  237. }
  238. }
  239. return req ? req : ERR_PTR(-ENOMEM);
  240. }
  241. static void ccid_request_free(struct usb_request *req, struct usb_ep *ep)
  242. {
  243. if (req) {
  244. kfree(req->buf);
  245. usb_ep_free_request(ep, req);
  246. }
  247. }
  248. static int
  249. ccid_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
  250. {
  251. struct f_ccid *ccid_dev = container_of(f, struct f_ccid, function);
  252. struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
  253. struct usb_composite_dev *cdev = f->config->cdev;
  254. struct usb_request *req = cdev->req;
  255. int ret = -EOPNOTSUPP;
  256. u16 w_index = le16_to_cpu(ctrl->wIndex);
  257. u16 w_value = le16_to_cpu(ctrl->wValue);
  258. u16 w_length = le16_to_cpu(ctrl->wLength);
  259. if (!atomic_read(&ccid_dev->online))
  260. return -ENOTCONN;
  261. switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
  262. case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  263. | CCIDGENERICREQ_ABORT:
  264. if (w_length != 0)
  265. goto invalid;
  266. ctrl_dev->buf[0] = CCIDGENERICREQ_ABORT;
  267. ctrl_dev->buf[1] = w_value & 0xFF;
  268. ctrl_dev->buf[2] = (w_value >> 8) & 0xFF;
  269. ctrl_dev->buf[3] = 0x00;
  270. ctrl_dev->tx_ctrl_done = 1;
  271. wake_up(&ctrl_dev->tx_wait_q);
  272. return 0;
  273. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  274. | CCIDGENERICREQ_GET_CLOCK_FREQUENCIES:
  275. if (w_length > req->length)
  276. goto invalid;
  277. *(u32 *) req->buf =
  278. cpu_to_le32(ccid_class_desc.dwDefaultClock);
  279. ret = min_t(u32, w_length,
  280. sizeof(ccid_class_desc.dwDefaultClock));
  281. break;
  282. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  283. | CCIDGENERICREQ_GET_DATA_RATES:
  284. if (w_length > req->length)
  285. goto invalid;
  286. *(u32 *) req->buf = cpu_to_le32(ccid_class_desc.dwDataRate);
  287. ret = min_t(u32, w_length, sizeof(ccid_class_desc.dwDataRate));
  288. break;
  289. default:
  290. invalid:
  291. pr_debug("invalid control req%02x.%02x v%04x i%04x l%d\n",
  292. ctrl->bRequestType, ctrl->bRequest,
  293. w_value, w_index, w_length);
  294. }
  295. /* respond with data transfer or status phase? */
  296. if (ret >= 0) {
  297. pr_debug("ccid req%02x.%02x v%04x i%04x l%d\n",
  298. ctrl->bRequestType, ctrl->bRequest,
  299. w_value, w_index, w_length);
  300. req->length = ret;
  301. ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
  302. if (ret < 0)
  303. pr_err("ccid ep0 enqueue err %d\n", ret);
  304. }
  305. return ret;
  306. }
  307. static void ccid_function_disable(struct usb_function *f)
  308. {
  309. struct f_ccid *ccid_dev = func_to_ccid(f);
  310. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  311. struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
  312. struct usb_request *req;
  313. /* Disable endpoints */
  314. usb_ep_disable(ccid_dev->notify);
  315. usb_ep_disable(ccid_dev->in);
  316. usb_ep_disable(ccid_dev->out);
  317. /* Free endpoint related requests */
  318. ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
  319. if (!atomic_read(&bulk_dev->rx_req_busy))
  320. ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
  321. while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
  322. ccid_request_free(req, ccid_dev->in);
  323. ccid_dev->dtr_state = 0;
  324. atomic_set(&ccid_dev->online, 0);
  325. /* Wake up threads */
  326. wake_up(&bulk_dev->write_wq);
  327. wake_up(&bulk_dev->read_wq);
  328. wake_up(&ctrl_dev->tx_wait_q);
  329. }
  330. static int
  331. ccid_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
  332. {
  333. struct f_ccid *ccid_dev = func_to_ccid(f);
  334. struct usb_composite_dev *cdev = ccid_dev->cdev;
  335. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  336. struct usb_request *req;
  337. int ret = 0;
  338. int i;
  339. ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify,
  340. sizeof(struct usb_ccid_notification), GFP_ATOMIC);
  341. if (IS_ERR(ccid_dev->notify_req)) {
  342. pr_err("%s: unable to allocate memory for notify req\n",
  343. __func__);
  344. return PTR_ERR(ccid_dev->notify_req);
  345. }
  346. ccid_dev->notify_req->complete = ccid_notify_complete;
  347. ccid_dev->notify_req->context = ccid_dev;
  348. /* now allocate requests for our endpoints */
  349. req = ccid_request_alloc(ccid_dev->out, BULK_OUT_BUFFER_SIZE,
  350. GFP_ATOMIC);
  351. if (IS_ERR(req)) {
  352. pr_err("%s: unable to allocate memory for out req\n",
  353. __func__);
  354. ret = PTR_ERR(req);
  355. goto free_notify;
  356. }
  357. req->complete = ccid_bulk_complete_out;
  358. req->context = ccid_dev;
  359. bulk_dev->rx_req = req;
  360. for (i = 0; i < TX_REQ_MAX; i++) {
  361. req = ccid_request_alloc(ccid_dev->in, BULK_IN_BUFFER_SIZE,
  362. GFP_ATOMIC);
  363. if (IS_ERR(req)) {
  364. pr_err("%s: unable to allocate memory for in req\n",
  365. __func__);
  366. ret = PTR_ERR(req);
  367. goto free_bulk_out;
  368. }
  369. req->complete = ccid_bulk_complete_in;
  370. req->context = ccid_dev;
  371. ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
  372. }
  373. /* choose the descriptors and enable endpoints */
  374. ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->notify);
  375. if (ret) {
  376. ccid_dev->notify->desc = NULL;
  377. pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
  378. __func__, ccid_dev->notify->name, ret);
  379. goto free_bulk_in;
  380. }
  381. ret = usb_ep_enable(ccid_dev->notify);
  382. if (ret) {
  383. pr_err("%s: usb ep#%s enable failed, err#%d\n",
  384. __func__, ccid_dev->notify->name, ret);
  385. goto free_bulk_in;
  386. }
  387. ccid_dev->notify->driver_data = ccid_dev;
  388. ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->in);
  389. if (ret) {
  390. ccid_dev->in->desc = NULL;
  391. pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
  392. __func__, ccid_dev->in->name, ret);
  393. goto disable_ep_notify;
  394. }
  395. ret = usb_ep_enable(ccid_dev->in);
  396. if (ret) {
  397. pr_err("%s: usb ep#%s enable failed, err#%d\n",
  398. __func__, ccid_dev->in->name, ret);
  399. goto disable_ep_notify;
  400. }
  401. ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->out);
  402. if (ret) {
  403. ccid_dev->out->desc = NULL;
  404. pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
  405. __func__, ccid_dev->out->name, ret);
  406. goto disable_ep_in;
  407. }
  408. ret = usb_ep_enable(ccid_dev->out);
  409. if (ret) {
  410. pr_err("%s: usb ep#%s enable failed, err#%d\n",
  411. __func__, ccid_dev->out->name, ret);
  412. goto disable_ep_in;
  413. }
  414. ccid_dev->dtr_state = 1;
  415. atomic_set(&ccid_dev->online, 1);
  416. return ret;
  417. disable_ep_in:
  418. usb_ep_disable(ccid_dev->in);
  419. disable_ep_notify:
  420. usb_ep_disable(ccid_dev->notify);
  421. ccid_dev->notify->driver_data = NULL;
  422. free_bulk_in:
  423. while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
  424. ccid_request_free(req, ccid_dev->in);
  425. free_bulk_out:
  426. ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
  427. free_notify:
  428. ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
  429. return ret;
  430. }
  431. static void ccid_function_unbind(struct usb_configuration *c,
  432. struct usb_function *f)
  433. {
  434. if (gadget_is_dualspeed(c->cdev->gadget))
  435. usb_free_descriptors(f->hs_descriptors);
  436. usb_free_descriptors(f->fs_descriptors);
  437. }
  438. static int ccid_function_bind(struct usb_configuration *c,
  439. struct usb_function *f)
  440. {
  441. struct f_ccid *ccid_dev = func_to_ccid(f);
  442. struct usb_ep *ep;
  443. struct usb_composite_dev *cdev = c->cdev;
  444. int ret = -ENODEV;
  445. ccid_dev->ifc_id = usb_interface_id(c, f);
  446. if (ccid_dev->ifc_id < 0) {
  447. pr_err("%s: unable to allocate ifc id, err:%d",
  448. __func__, ccid_dev->ifc_id);
  449. return ccid_dev->ifc_id;
  450. }
  451. ccid_interface_desc.bInterfaceNumber = ccid_dev->ifc_id;
  452. ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_notify_desc);
  453. if (!ep) {
  454. pr_err("%s: usb epnotify autoconfig failed\n", __func__);
  455. return -ENODEV;
  456. }
  457. ccid_dev->notify = ep;
  458. ep->driver_data = cdev;
  459. ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_in_desc);
  460. if (!ep) {
  461. pr_err("%s: usb epin autoconfig failed\n", __func__);
  462. ret = -ENODEV;
  463. goto ep_auto_in_fail;
  464. }
  465. ccid_dev->in = ep;
  466. ep->driver_data = cdev;
  467. ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_out_desc);
  468. if (!ep) {
  469. pr_err("%s: usb epout autoconfig failed\n", __func__);
  470. ret = -ENODEV;
  471. goto ep_auto_out_fail;
  472. }
  473. ccid_dev->out = ep;
  474. ep->driver_data = cdev;
  475. f->fs_descriptors = usb_copy_descriptors(ccid_fs_descs);
  476. if (!f->fs_descriptors)
  477. goto ep_auto_out_fail;
  478. if (gadget_is_dualspeed(cdev->gadget)) {
  479. ccid_hs_in_desc.bEndpointAddress =
  480. ccid_fs_in_desc.bEndpointAddress;
  481. ccid_hs_out_desc.bEndpointAddress =
  482. ccid_fs_out_desc.bEndpointAddress;
  483. ccid_hs_notify_desc.bEndpointAddress =
  484. ccid_fs_notify_desc.bEndpointAddress;
  485. /* copy descriptors, and track endpoint copies */
  486. f->hs_descriptors = usb_copy_descriptors(ccid_hs_descs);
  487. if (!f->hs_descriptors)
  488. goto ep_auto_out_fail;
  489. }
  490. pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
  491. gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
  492. ccid_dev->in->name, ccid_dev->out->name);
  493. return 0;
  494. ep_auto_out_fail:
  495. ccid_dev->out->driver_data = NULL;
  496. ccid_dev->out = NULL;
  497. ep_auto_in_fail:
  498. ccid_dev->in->driver_data = NULL;
  499. ccid_dev->in = NULL;
  500. return ret;
  501. }
  502. static int ccid_bulk_open(struct inode *ip, struct file *fp)
  503. {
  504. struct f_ccid *ccid_dev = _ccid_dev;
  505. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  506. unsigned long flags;
  507. pr_debug("ccid_bulk_open\n");
  508. if (!atomic_read(&ccid_dev->online)) {
  509. pr_debug("%s: USB cable not connected\n", __func__);
  510. return -ENODEV;
  511. }
  512. if (atomic_read(&bulk_dev->opened)) {
  513. pr_debug("%s: bulk device is already opened\n", __func__);
  514. return -EBUSY;
  515. }
  516. atomic_set(&bulk_dev->opened, 1);
  517. /* clear the error latch */
  518. atomic_set(&bulk_dev->error, 0);
  519. spin_lock_irqsave(&ccid_dev->lock, flags);
  520. fp->private_data = ccid_dev;
  521. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  522. return 0;
  523. }
  524. static int ccid_bulk_release(struct inode *ip, struct file *fp)
  525. {
  526. struct f_ccid *ccid_dev = fp->private_data;
  527. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  528. pr_debug("ccid_bulk_release\n");
  529. atomic_set(&bulk_dev->opened, 0);
  530. return 0;
  531. }
  532. static ssize_t ccid_bulk_read(struct file *fp, char __user *buf,
  533. size_t count, loff_t *pos)
  534. {
  535. struct f_ccid *ccid_dev = fp->private_data;
  536. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  537. struct usb_request *req;
  538. int r = count, xfer;
  539. int ret;
  540. unsigned long flags;
  541. pr_debug("ccid_bulk_read(%d)\n", count);
  542. if (count > BULK_OUT_BUFFER_SIZE) {
  543. pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n",
  544. __func__, BULK_OUT_BUFFER_SIZE, count);
  545. return -ENOMEM;
  546. }
  547. if (atomic_read(&bulk_dev->error)) {
  548. r = -EIO;
  549. pr_err("%s bulk_dev_error\n", __func__);
  550. goto done;
  551. }
  552. requeue_req:
  553. spin_lock_irqsave(&ccid_dev->lock, flags);
  554. if (!atomic_read(&ccid_dev->online)) {
  555. pr_debug("%s: USB cable not connected\n", __func__);
  556. return -ENODEV;
  557. }
  558. /* queue a request */
  559. req = bulk_dev->rx_req;
  560. req->length = count;
  561. bulk_dev->rx_done = 0;
  562. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  563. ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL);
  564. if (ret < 0) {
  565. r = -EIO;
  566. pr_err("%s usb ep queue failed\n", __func__);
  567. atomic_set(&bulk_dev->error, 1);
  568. goto done;
  569. }
  570. /* wait for a request to complete */
  571. ret = wait_event_interruptible(bulk_dev->read_wq, bulk_dev->rx_done ||
  572. atomic_read(&bulk_dev->error) ||
  573. !atomic_read(&ccid_dev->online));
  574. if (ret < 0) {
  575. atomic_set(&bulk_dev->error, 1);
  576. r = ret;
  577. usb_ep_dequeue(ccid_dev->out, req);
  578. goto done;
  579. }
  580. if (!atomic_read(&bulk_dev->error)) {
  581. spin_lock_irqsave(&ccid_dev->lock, flags);
  582. if (!atomic_read(&ccid_dev->online)) {
  583. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  584. pr_debug("%s: USB cable not connected\n", __func__);
  585. r = -ENODEV;
  586. goto done;
  587. }
  588. /* If we got a 0-len packet, throw it back and try again. */
  589. if (req->actual == 0) {
  590. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  591. goto requeue_req;
  592. }
  593. xfer = (req->actual < count) ? req->actual : count;
  594. atomic_set(&bulk_dev->rx_req_busy, 1);
  595. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  596. if (copy_to_user(buf, req->buf, xfer))
  597. r = -EFAULT;
  598. spin_lock_irqsave(&ccid_dev->lock, flags);
  599. atomic_set(&bulk_dev->rx_req_busy, 0);
  600. if (!atomic_read(&ccid_dev->online)) {
  601. ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
  602. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  603. pr_debug("%s: USB cable not connected\n", __func__);
  604. r = -ENODEV;
  605. goto done;
  606. }
  607. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  608. } else {
  609. r = -EIO;
  610. }
  611. done:
  612. pr_debug("ccid_bulk_read returning %d\n", r);
  613. return r;
  614. }
  615. static ssize_t ccid_bulk_write(struct file *fp, const char __user *buf,
  616. size_t count, loff_t *pos)
  617. {
  618. struct f_ccid *ccid_dev = fp->private_data;
  619. struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
  620. struct usb_request *req = 0;
  621. int r = count;
  622. int ret;
  623. unsigned long flags;
  624. pr_debug("ccid_bulk_write(%d)\n", count);
  625. if (!atomic_read(&ccid_dev->online)) {
  626. pr_debug("%s: USB cable not connected\n", __func__);
  627. return -ENODEV;
  628. }
  629. if (!count) {
  630. pr_err("%s: zero length ctrl pkt\n", __func__);
  631. return -ENODEV;
  632. }
  633. if (count > BULK_IN_BUFFER_SIZE) {
  634. pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n",
  635. __func__, BULK_IN_BUFFER_SIZE, count);
  636. return -ENOMEM;
  637. }
  638. /* get an idle tx request to use */
  639. ret = wait_event_interruptible(bulk_dev->write_wq,
  640. ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)) ||
  641. atomic_read(&bulk_dev->error)));
  642. if (ret < 0) {
  643. r = ret;
  644. goto done;
  645. }
  646. if (atomic_read(&bulk_dev->error)) {
  647. pr_err(" %s dev->error\n", __func__);
  648. r = -EIO;
  649. goto done;
  650. }
  651. if (copy_from_user(req->buf, buf, count)) {
  652. if (!atomic_read(&ccid_dev->online)) {
  653. pr_debug("%s: USB cable not connected\n",
  654. __func__);
  655. ccid_request_free(req, ccid_dev->in);
  656. r = -ENODEV;
  657. } else {
  658. ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
  659. r = -EFAULT;
  660. }
  661. goto done;
  662. }
  663. req->length = count;
  664. ret = usb_ep_queue(ccid_dev->in, req, GFP_KERNEL);
  665. if (ret < 0) {
  666. pr_debug("ccid_bulk_write: xfer error %d\n", ret);
  667. atomic_set(&bulk_dev->error, 1);
  668. ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
  669. r = -EIO;
  670. spin_lock_irqsave(&ccid_dev->lock, flags);
  671. if (!atomic_read(&ccid_dev->online)) {
  672. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  673. pr_debug("%s: USB cable not connected\n",
  674. __func__);
  675. while ((req = ccid_req_get(ccid_dev,
  676. &bulk_dev->tx_idle)))
  677. ccid_request_free(req, ccid_dev->in);
  678. r = -ENODEV;
  679. }
  680. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  681. goto done;
  682. }
  683. done:
  684. pr_debug("ccid_bulk_write returning %d\n", r);
  685. return r;
  686. }
  687. static const struct file_operations ccid_bulk_fops = {
  688. .owner = THIS_MODULE,
  689. .read = ccid_bulk_read,
  690. .write = ccid_bulk_write,
  691. .open = ccid_bulk_open,
  692. .release = ccid_bulk_release,
  693. };
  694. static struct miscdevice ccid_bulk_device = {
  695. .minor = MISC_DYNAMIC_MINOR,
  696. .name = "ccid_bulk",
  697. .fops = &ccid_bulk_fops,
  698. };
  699. static int ccid_bulk_device_init(struct f_ccid *dev)
  700. {
  701. int ret;
  702. struct ccid_bulk_dev *bulk_dev = &dev->bulk_dev;
  703. init_waitqueue_head(&bulk_dev->read_wq);
  704. init_waitqueue_head(&bulk_dev->write_wq);
  705. INIT_LIST_HEAD(&bulk_dev->tx_idle);
  706. ret = misc_register(&ccid_bulk_device);
  707. if (ret) {
  708. pr_err("%s: failed to register misc device\n", __func__);
  709. return ret;
  710. }
  711. return 0;
  712. }
  713. static int ccid_ctrl_open(struct inode *inode, struct file *fp)
  714. {
  715. struct f_ccid *ccid_dev = _ccid_dev;
  716. struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
  717. unsigned long flags;
  718. if (!atomic_read(&ccid_dev->online)) {
  719. pr_debug("%s: USB cable not connected\n", __func__);
  720. return -ENODEV;
  721. }
  722. if (atomic_read(&ctrl_dev->opened)) {
  723. pr_debug("%s: ctrl device is already opened\n", __func__);
  724. return -EBUSY;
  725. }
  726. atomic_set(&ctrl_dev->opened, 1);
  727. spin_lock_irqsave(&ccid_dev->lock, flags);
  728. fp->private_data = ccid_dev;
  729. spin_unlock_irqrestore(&ccid_dev->lock, flags);
  730. return 0;
  731. }
  732. static int ccid_ctrl_release(struct inode *inode, struct file *fp)
  733. {
  734. struct f_ccid *ccid_dev = fp->private_data;
  735. struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
  736. atomic_set(&ctrl_dev->opened, 0);
  737. return 0;
  738. }
  739. static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf,
  740. size_t count, loff_t *ppos)
  741. {
  742. struct f_ccid *ccid_dev = fp->private_data;
  743. struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
  744. int ret = 0;
  745. if (!atomic_read(&ccid_dev->online)) {
  746. pr_debug("%s: USB cable not connected\n", __func__);
  747. return -ENODEV;
  748. }
  749. if (count > CTRL_BUF_SIZE)
  750. count = CTRL_BUF_SIZE;
  751. ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
  752. ctrl_dev->tx_ctrl_done);
  753. if (ret < 0)
  754. return ret;
  755. ctrl_dev->tx_ctrl_done = 0;
  756. if (!atomic_read(&ccid_dev->online)) {
  757. pr_debug("%s: USB cable not connected\n", __func__);
  758. return -ENODEV;
  759. }
  760. ret = copy_to_user(buf, ctrl_dev->buf, count);
  761. if (ret)
  762. return -EFAULT;
  763. return count;
  764. }
  765. static long
  766. ccid_ctrl_ioctl(struct file *fp, unsigned cmd, u_long arg)
  767. {
  768. struct f_ccid *ccid_dev = fp->private_data;
  769. struct usb_request *req = ccid_dev->notify_req;
  770. struct usb_ccid_notification *ccid_notify = req->buf;
  771. void __user *argp = (void __user *)arg;
  772. int ret = 0;
  773. switch (cmd) {
  774. case CCID_NOTIFY_CARD:
  775. if (copy_from_user(ccid_notify, argp,
  776. sizeof(struct usb_ccid_notification)))
  777. return -EFAULT;
  778. req->length = 2;
  779. break;
  780. case CCID_NOTIFY_HWERROR:
  781. if (copy_from_user(ccid_notify, argp,
  782. sizeof(struct usb_ccid_notification)))
  783. return -EFAULT;
  784. req->length = 4;
  785. break;
  786. case CCID_READ_DTR:
  787. if (copy_to_user((int *)arg, &ccid_dev->dtr_state, sizeof(int)))
  788. return -EFAULT;
  789. return 0;
  790. }
  791. ret = usb_ep_queue(ccid_dev->notify, ccid_dev->notify_req, GFP_KERNEL);
  792. if (ret < 0) {
  793. pr_err("ccid notify ep enqueue error %d\n", ret);
  794. return ret;
  795. }
  796. return 0;
  797. }
  798. static const struct file_operations ccid_ctrl_fops = {
  799. .owner = THIS_MODULE,
  800. .open = ccid_ctrl_open,
  801. .release = ccid_ctrl_release,
  802. .read = ccid_ctrl_read,
  803. .unlocked_ioctl = ccid_ctrl_ioctl,
  804. };
  805. static struct miscdevice ccid_ctrl_device = {
  806. .minor = MISC_DYNAMIC_MINOR,
  807. .name = "ccid_ctrl",
  808. .fops = &ccid_ctrl_fops,
  809. };
  810. static int ccid_ctrl_device_init(struct f_ccid *dev)
  811. {
  812. int ret;
  813. struct ccid_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
  814. INIT_LIST_HEAD(&ctrl_dev->tx_q);
  815. init_waitqueue_head(&ctrl_dev->tx_wait_q);
  816. ret = misc_register(&ccid_ctrl_device);
  817. if (ret) {
  818. pr_err("%s: failed to register misc device\n", __func__);
  819. return ret;
  820. }
  821. return 0;
  822. }
  823. static int ccid_bind_config(struct usb_configuration *c)
  824. {
  825. struct f_ccid *ccid_dev = _ccid_dev;
  826. pr_debug("ccid_bind_config\n");
  827. ccid_dev->cdev = c->cdev;
  828. ccid_dev->function.name = FUNCTION_NAME;
  829. ccid_dev->function.fs_descriptors = ccid_fs_descs;
  830. ccid_dev->function.hs_descriptors = ccid_hs_descs;
  831. ccid_dev->function.bind = ccid_function_bind;
  832. ccid_dev->function.unbind = ccid_function_unbind;
  833. ccid_dev->function.set_alt = ccid_function_set_alt;
  834. ccid_dev->function.setup = ccid_function_setup;
  835. ccid_dev->function.disable = ccid_function_disable;
  836. return usb_add_function(c, &ccid_dev->function);
  837. }
  838. static int ccid_setup(void)
  839. {
  840. struct f_ccid *ccid_dev;
  841. int ret;
  842. ccid_dev = kzalloc(sizeof(*ccid_dev), GFP_KERNEL);
  843. if (!ccid_dev)
  844. return -ENOMEM;
  845. _ccid_dev = ccid_dev;
  846. spin_lock_init(&ccid_dev->lock);
  847. ret = ccid_ctrl_device_init(ccid_dev);
  848. if (ret) {
  849. pr_err("%s: ccid_ctrl_device_init failed, err:%d\n",
  850. __func__, ret);
  851. goto err_ctrl_init;
  852. }
  853. ret = ccid_bulk_device_init(ccid_dev);
  854. if (ret) {
  855. pr_err("%s: ccid_bulk_device_init failed, err:%d\n",
  856. __func__, ret);
  857. goto err_bulk_init;
  858. }
  859. return 0;
  860. err_bulk_init:
  861. misc_deregister(&ccid_ctrl_device);
  862. err_ctrl_init:
  863. kfree(ccid_dev);
  864. pr_err("ccid gadget driver failed to initialize\n");
  865. return ret;
  866. }
  867. static void ccid_cleanup(void)
  868. {
  869. misc_deregister(&ccid_bulk_device);
  870. misc_deregister(&ccid_ctrl_device);
  871. kfree(_ccid_dev);
  872. }