usb-skeleton.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * USB Skeleton driver - 2.2
  4. *
  5. * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
  6. *
  7. * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
  8. * but has been rewritten to be easier to read and use.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <linux/kref.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/usb.h>
  17. #include <linux/mutex.h>
  18. /* Define these values to match your devices */
  19. #define USB_SKEL_VENDOR_ID 0xfff0
  20. #define USB_SKEL_PRODUCT_ID 0xfff0
  21. /* table of devices that work with this driver */
  22. static const struct usb_device_id skel_table[] = {
  23. { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
  24. { } /* Terminating entry */
  25. };
  26. MODULE_DEVICE_TABLE(usb, skel_table);
  27. /* Get a minor range for your devices from the usb maintainer */
  28. #define USB_SKEL_MINOR_BASE 192
  29. /* our private defines. if this grows any larger, use your own .h file */
  30. #define MAX_TRANSFER (PAGE_SIZE - 512)
  31. /* MAX_TRANSFER is chosen so that the VM is not stressed by
  32. allocations > PAGE_SIZE and the number of packets in a page
  33. is an integer 512 is the largest possible packet on EHCI */
  34. #define WRITES_IN_FLIGHT 8
  35. /* arbitrarily chosen */
  36. /* Structure to hold all of our device specific stuff */
  37. struct usb_skel {
  38. struct usb_device *udev; /* the usb device for this device */
  39. struct usb_interface *interface; /* the interface for this device */
  40. struct semaphore limit_sem; /* limiting the number of writes in progress */
  41. struct usb_anchor submitted; /* in case we need to retract our submissions */
  42. struct urb *bulk_in_urb; /* the urb to read data with */
  43. unsigned char *bulk_in_buffer; /* the buffer to receive data */
  44. size_t bulk_in_size; /* the size of the receive buffer */
  45. size_t bulk_in_filled; /* number of bytes in the buffer */
  46. size_t bulk_in_copied; /* already copied to user space */
  47. __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
  48. __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
  49. int errors; /* the last request tanked */
  50. bool ongoing_read; /* a read is going on */
  51. spinlock_t err_lock; /* lock for errors */
  52. struct kref kref;
  53. struct mutex io_mutex; /* synchronize I/O with disconnect */
  54. unsigned long disconnected:1;
  55. wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */
  56. };
  57. #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
  58. static struct usb_driver skel_driver;
  59. static void skel_draw_down(struct usb_skel *dev);
  60. static void skel_delete(struct kref *kref)
  61. {
  62. struct usb_skel *dev = to_skel_dev(kref);
  63. usb_free_urb(dev->bulk_in_urb);
  64. usb_put_intf(dev->interface);
  65. usb_put_dev(dev->udev);
  66. kfree(dev->bulk_in_buffer);
  67. kfree(dev);
  68. }
  69. static int skel_open(struct inode *inode, struct file *file)
  70. {
  71. struct usb_skel *dev;
  72. struct usb_interface *interface;
  73. int subminor;
  74. int retval = 0;
  75. subminor = iminor(inode);
  76. interface = usb_find_interface(&skel_driver, subminor);
  77. if (!interface) {
  78. pr_err("%s - error, can't find device for minor %d\n",
  79. __func__, subminor);
  80. retval = -ENODEV;
  81. goto exit;
  82. }
  83. dev = usb_get_intfdata(interface);
  84. if (!dev) {
  85. retval = -ENODEV;
  86. goto exit;
  87. }
  88. retval = usb_autopm_get_interface(interface);
  89. if (retval)
  90. goto exit;
  91. /* increment our usage count for the device */
  92. kref_get(&dev->kref);
  93. /* save our object in the file's private structure */
  94. file->private_data = dev;
  95. exit:
  96. return retval;
  97. }
  98. static int skel_release(struct inode *inode, struct file *file)
  99. {
  100. struct usb_skel *dev;
  101. dev = file->private_data;
  102. if (dev == NULL)
  103. return -ENODEV;
  104. /* allow the device to be autosuspended */
  105. usb_autopm_put_interface(dev->interface);
  106. /* decrement the count on our device */
  107. kref_put(&dev->kref, skel_delete);
  108. return 0;
  109. }
  110. static int skel_flush(struct file *file, fl_owner_t id)
  111. {
  112. struct usb_skel *dev;
  113. int res;
  114. dev = file->private_data;
  115. if (dev == NULL)
  116. return -ENODEV;
  117. /* wait for io to stop */
  118. mutex_lock(&dev->io_mutex);
  119. skel_draw_down(dev);
  120. /* read out errors, leave subsequent opens a clean slate */
  121. spin_lock_irq(&dev->err_lock);
  122. res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
  123. dev->errors = 0;
  124. spin_unlock_irq(&dev->err_lock);
  125. mutex_unlock(&dev->io_mutex);
  126. return res;
  127. }
  128. static void skel_read_bulk_callback(struct urb *urb)
  129. {
  130. struct usb_skel *dev;
  131. unsigned long flags;
  132. dev = urb->context;
  133. spin_lock_irqsave(&dev->err_lock, flags);
  134. /* sync/async unlink faults aren't errors */
  135. if (urb->status) {
  136. if (!(urb->status == -ENOENT ||
  137. urb->status == -ECONNRESET ||
  138. urb->status == -ESHUTDOWN))
  139. dev_err(&dev->interface->dev,
  140. "%s - nonzero write bulk status received: %d\n",
  141. __func__, urb->status);
  142. dev->errors = urb->status;
  143. } else {
  144. dev->bulk_in_filled = urb->actual_length;
  145. }
  146. dev->ongoing_read = 0;
  147. spin_unlock_irqrestore(&dev->err_lock, flags);
  148. wake_up_interruptible(&dev->bulk_in_wait);
  149. }
  150. static int skel_do_read_io(struct usb_skel *dev, size_t count)
  151. {
  152. int rv;
  153. /* prepare a read */
  154. usb_fill_bulk_urb(dev->bulk_in_urb,
  155. dev->udev,
  156. usb_rcvbulkpipe(dev->udev,
  157. dev->bulk_in_endpointAddr),
  158. dev->bulk_in_buffer,
  159. min(dev->bulk_in_size, count),
  160. skel_read_bulk_callback,
  161. dev);
  162. /* tell everybody to leave the URB alone */
  163. spin_lock_irq(&dev->err_lock);
  164. dev->ongoing_read = 1;
  165. spin_unlock_irq(&dev->err_lock);
  166. /* submit bulk in urb, which means no data to deliver */
  167. dev->bulk_in_filled = 0;
  168. dev->bulk_in_copied = 0;
  169. /* do it */
  170. rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
  171. if (rv < 0) {
  172. dev_err(&dev->interface->dev,
  173. "%s - failed submitting read urb, error %d\n",
  174. __func__, rv);
  175. rv = (rv == -ENOMEM) ? rv : -EIO;
  176. spin_lock_irq(&dev->err_lock);
  177. dev->ongoing_read = 0;
  178. spin_unlock_irq(&dev->err_lock);
  179. }
  180. return rv;
  181. }
  182. static ssize_t skel_read(struct file *file, char *buffer, size_t count,
  183. loff_t *ppos)
  184. {
  185. struct usb_skel *dev;
  186. int rv;
  187. bool ongoing_io;
  188. dev = file->private_data;
  189. /* if we cannot read at all, return EOF */
  190. if (!dev->bulk_in_urb || !count)
  191. return 0;
  192. /* no concurrent readers */
  193. rv = mutex_lock_interruptible(&dev->io_mutex);
  194. if (rv < 0)
  195. return rv;
  196. if (dev->disconnected) { /* disconnect() was called */
  197. rv = -ENODEV;
  198. goto exit;
  199. }
  200. /* if IO is under way, we must not touch things */
  201. retry:
  202. spin_lock_irq(&dev->err_lock);
  203. ongoing_io = dev->ongoing_read;
  204. spin_unlock_irq(&dev->err_lock);
  205. if (ongoing_io) {
  206. /* nonblocking IO shall not wait */
  207. if (file->f_flags & O_NONBLOCK) {
  208. rv = -EAGAIN;
  209. goto exit;
  210. }
  211. /*
  212. * IO may take forever
  213. * hence wait in an interruptible state
  214. */
  215. rv = wait_event_interruptible(dev->bulk_in_wait, (!dev->ongoing_read));
  216. if (rv < 0)
  217. goto exit;
  218. }
  219. /* errors must be reported */
  220. rv = dev->errors;
  221. if (rv < 0) {
  222. /* any error is reported once */
  223. dev->errors = 0;
  224. /* to preserve notifications about reset */
  225. rv = (rv == -EPIPE) ? rv : -EIO;
  226. /* report it */
  227. goto exit;
  228. }
  229. /*
  230. * if the buffer is filled we may satisfy the read
  231. * else we need to start IO
  232. */
  233. if (dev->bulk_in_filled) {
  234. /* we had read data */
  235. size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
  236. size_t chunk = min(available, count);
  237. if (!available) {
  238. /*
  239. * all data has been used
  240. * actual IO needs to be done
  241. */
  242. rv = skel_do_read_io(dev, count);
  243. if (rv < 0)
  244. goto exit;
  245. else
  246. goto retry;
  247. }
  248. /*
  249. * data is available
  250. * chunk tells us how much shall be copied
  251. */
  252. if (copy_to_user(buffer,
  253. dev->bulk_in_buffer + dev->bulk_in_copied,
  254. chunk))
  255. rv = -EFAULT;
  256. else
  257. rv = chunk;
  258. dev->bulk_in_copied += chunk;
  259. /*
  260. * if we are asked for more than we have,
  261. * we start IO but don't wait
  262. */
  263. if (available < count)
  264. skel_do_read_io(dev, count - chunk);
  265. } else {
  266. /* no data in the buffer */
  267. rv = skel_do_read_io(dev, count);
  268. if (rv < 0)
  269. goto exit;
  270. else
  271. goto retry;
  272. }
  273. exit:
  274. mutex_unlock(&dev->io_mutex);
  275. return rv;
  276. }
  277. static void skel_write_bulk_callback(struct urb *urb)
  278. {
  279. struct usb_skel *dev;
  280. unsigned long flags;
  281. dev = urb->context;
  282. /* sync/async unlink faults aren't errors */
  283. if (urb->status) {
  284. if (!(urb->status == -ENOENT ||
  285. urb->status == -ECONNRESET ||
  286. urb->status == -ESHUTDOWN))
  287. dev_err(&dev->interface->dev,
  288. "%s - nonzero write bulk status received: %d\n",
  289. __func__, urb->status);
  290. spin_lock_irqsave(&dev->err_lock, flags);
  291. dev->errors = urb->status;
  292. spin_unlock_irqrestore(&dev->err_lock, flags);
  293. }
  294. /* free up our allocated buffer */
  295. usb_free_coherent(urb->dev, urb->transfer_buffer_length,
  296. urb->transfer_buffer, urb->transfer_dma);
  297. up(&dev->limit_sem);
  298. }
  299. static ssize_t skel_write(struct file *file, const char *user_buffer,
  300. size_t count, loff_t *ppos)
  301. {
  302. struct usb_skel *dev;
  303. int retval = 0;
  304. struct urb *urb = NULL;
  305. char *buf = NULL;
  306. size_t writesize = min(count, (size_t)MAX_TRANSFER);
  307. dev = file->private_data;
  308. /* verify that we actually have some data to write */
  309. if (count == 0)
  310. goto exit;
  311. /*
  312. * limit the number of URBs in flight to stop a user from using up all
  313. * RAM
  314. */
  315. if (!(file->f_flags & O_NONBLOCK)) {
  316. if (down_interruptible(&dev->limit_sem)) {
  317. retval = -ERESTARTSYS;
  318. goto exit;
  319. }
  320. } else {
  321. if (down_trylock(&dev->limit_sem)) {
  322. retval = -EAGAIN;
  323. goto exit;
  324. }
  325. }
  326. spin_lock_irq(&dev->err_lock);
  327. retval = dev->errors;
  328. if (retval < 0) {
  329. /* any error is reported once */
  330. dev->errors = 0;
  331. /* to preserve notifications about reset */
  332. retval = (retval == -EPIPE) ? retval : -EIO;
  333. }
  334. spin_unlock_irq(&dev->err_lock);
  335. if (retval < 0)
  336. goto error;
  337. /* create a urb, and a buffer for it, and copy the data to the urb */
  338. urb = usb_alloc_urb(0, GFP_KERNEL);
  339. if (!urb) {
  340. retval = -ENOMEM;
  341. goto error;
  342. }
  343. buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
  344. &urb->transfer_dma);
  345. if (!buf) {
  346. retval = -ENOMEM;
  347. goto error;
  348. }
  349. if (copy_from_user(buf, user_buffer, writesize)) {
  350. retval = -EFAULT;
  351. goto error;
  352. }
  353. /* this lock makes sure we don't submit URBs to gone devices */
  354. mutex_lock(&dev->io_mutex);
  355. if (dev->disconnected) { /* disconnect() was called */
  356. mutex_unlock(&dev->io_mutex);
  357. retval = -ENODEV;
  358. goto error;
  359. }
  360. /* initialize the urb properly */
  361. usb_fill_bulk_urb(urb, dev->udev,
  362. usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
  363. buf, writesize, skel_write_bulk_callback, dev);
  364. urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
  365. usb_anchor_urb(urb, &dev->submitted);
  366. /* send the data out the bulk port */
  367. retval = usb_submit_urb(urb, GFP_KERNEL);
  368. mutex_unlock(&dev->io_mutex);
  369. if (retval) {
  370. dev_err(&dev->interface->dev,
  371. "%s - failed submitting write urb, error %d\n",
  372. __func__, retval);
  373. goto error_unanchor;
  374. }
  375. /*
  376. * release our reference to this urb, the USB core will eventually free
  377. * it entirely
  378. */
  379. usb_free_urb(urb);
  380. return writesize;
  381. error_unanchor:
  382. usb_unanchor_urb(urb);
  383. error:
  384. if (urb) {
  385. usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
  386. usb_free_urb(urb);
  387. }
  388. up(&dev->limit_sem);
  389. exit:
  390. return retval;
  391. }
  392. static const struct file_operations skel_fops = {
  393. .owner = THIS_MODULE,
  394. .read = skel_read,
  395. .write = skel_write,
  396. .open = skel_open,
  397. .release = skel_release,
  398. .flush = skel_flush,
  399. .llseek = noop_llseek,
  400. };
  401. /*
  402. * usb class driver info in order to get a minor number from the usb core,
  403. * and to have the device registered with the driver core
  404. */
  405. static struct usb_class_driver skel_class = {
  406. .name = "skel%d",
  407. .fops = &skel_fops,
  408. .minor_base = USB_SKEL_MINOR_BASE,
  409. };
  410. static int skel_probe(struct usb_interface *interface,
  411. const struct usb_device_id *id)
  412. {
  413. struct usb_skel *dev;
  414. struct usb_endpoint_descriptor *bulk_in, *bulk_out;
  415. int retval;
  416. /* allocate memory for our device state and initialize it */
  417. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  418. if (!dev)
  419. return -ENOMEM;
  420. kref_init(&dev->kref);
  421. sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
  422. mutex_init(&dev->io_mutex);
  423. spin_lock_init(&dev->err_lock);
  424. init_usb_anchor(&dev->submitted);
  425. init_waitqueue_head(&dev->bulk_in_wait);
  426. dev->udev = usb_get_dev(interface_to_usbdev(interface));
  427. dev->interface = usb_get_intf(interface);
  428. /* set up the endpoint information */
  429. /* use only the first bulk-in and bulk-out endpoints */
  430. retval = usb_find_common_endpoints(interface->cur_altsetting,
  431. &bulk_in, &bulk_out, NULL, NULL);
  432. if (retval) {
  433. dev_err(&interface->dev,
  434. "Could not find both bulk-in and bulk-out endpoints\n");
  435. goto error;
  436. }
  437. dev->bulk_in_size = usb_endpoint_maxp(bulk_in);
  438. dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
  439. dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL);
  440. if (!dev->bulk_in_buffer) {
  441. retval = -ENOMEM;
  442. goto error;
  443. }
  444. dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
  445. if (!dev->bulk_in_urb) {
  446. retval = -ENOMEM;
  447. goto error;
  448. }
  449. dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
  450. /* save our data pointer in this interface device */
  451. usb_set_intfdata(interface, dev);
  452. /* we can register the device now, as it is ready */
  453. retval = usb_register_dev(interface, &skel_class);
  454. if (retval) {
  455. /* something prevented us from registering this driver */
  456. dev_err(&interface->dev,
  457. "Not able to get a minor for this device.\n");
  458. usb_set_intfdata(interface, NULL);
  459. goto error;
  460. }
  461. /* let the user know what node this device is now attached to */
  462. dev_info(&interface->dev,
  463. "USB Skeleton device now attached to USBSkel-%d",
  464. interface->minor);
  465. return 0;
  466. error:
  467. /* this frees allocated memory */
  468. kref_put(&dev->kref, skel_delete);
  469. return retval;
  470. }
  471. static void skel_disconnect(struct usb_interface *interface)
  472. {
  473. struct usb_skel *dev;
  474. int minor = interface->minor;
  475. dev = usb_get_intfdata(interface);
  476. usb_set_intfdata(interface, NULL);
  477. /* give back our minor */
  478. usb_deregister_dev(interface, &skel_class);
  479. /* prevent more I/O from starting */
  480. mutex_lock(&dev->io_mutex);
  481. dev->disconnected = 1;
  482. mutex_unlock(&dev->io_mutex);
  483. usb_kill_anchored_urbs(&dev->submitted);
  484. /* decrement our usage count */
  485. kref_put(&dev->kref, skel_delete);
  486. dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
  487. }
  488. static void skel_draw_down(struct usb_skel *dev)
  489. {
  490. int time;
  491. time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
  492. if (!time)
  493. usb_kill_anchored_urbs(&dev->submitted);
  494. usb_kill_urb(dev->bulk_in_urb);
  495. }
  496. static int skel_suspend(struct usb_interface *intf, pm_message_t message)
  497. {
  498. struct usb_skel *dev = usb_get_intfdata(intf);
  499. if (!dev)
  500. return 0;
  501. skel_draw_down(dev);
  502. return 0;
  503. }
  504. static int skel_resume(struct usb_interface *intf)
  505. {
  506. return 0;
  507. }
  508. static int skel_pre_reset(struct usb_interface *intf)
  509. {
  510. struct usb_skel *dev = usb_get_intfdata(intf);
  511. mutex_lock(&dev->io_mutex);
  512. skel_draw_down(dev);
  513. return 0;
  514. }
  515. static int skel_post_reset(struct usb_interface *intf)
  516. {
  517. struct usb_skel *dev = usb_get_intfdata(intf);
  518. /* we are sure no URBs are active - no locking needed */
  519. dev->errors = -EPIPE;
  520. mutex_unlock(&dev->io_mutex);
  521. return 0;
  522. }
  523. static struct usb_driver skel_driver = {
  524. .name = "skeleton",
  525. .probe = skel_probe,
  526. .disconnect = skel_disconnect,
  527. .suspend = skel_suspend,
  528. .resume = skel_resume,
  529. .pre_reset = skel_pre_reset,
  530. .post_reset = skel_post_reset,
  531. .id_table = skel_table,
  532. .supports_autosuspend = 1,
  533. };
  534. module_usb_driver(skel_driver);
  535. MODULE_LICENSE("GPL v2");