share.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392
  1. /*
  2. * Parallel-port resource manager code.
  3. *
  4. * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
  5. * Tim Waugh <tim@cyberelk.demon.co.uk>
  6. * Jose Renau <renau@acm.org>
  7. * Philip Blundell <philb@gnu.org>
  8. * Andrea Arcangeli
  9. *
  10. * based on work by Grant Guenther <grant@torque.net>
  11. * and Philip Blundell
  12. *
  13. * Any part of this program may be used in documents licensed under
  14. * the GNU Free Documentation License, Version 1.1 or any later version
  15. * published by the Free Software Foundation.
  16. */
  17. #undef PARPORT_DEBUG_SHARING /* undef for production */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/threads.h>
  21. #include <linux/parport.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ioport.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/sched/signal.h>
  29. #include <linux/kmod.h>
  30. #include <linux/device.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/mutex.h>
  33. #include <asm/irq.h>
  34. #undef PARPORT_PARANOID
  35. #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
  36. unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
  37. int parport_default_spintime = DEFAULT_SPIN_TIME;
  38. static LIST_HEAD(portlist);
  39. static DEFINE_SPINLOCK(parportlist_lock);
  40. /* list of all allocated ports, sorted by ->number */
  41. static LIST_HEAD(all_ports);
  42. static DEFINE_SPINLOCK(full_list_lock);
  43. static LIST_HEAD(drivers);
  44. static DEFINE_MUTEX(registration_lock);
  45. /* What you can do to a port that's gone away.. */
  46. static void dead_write_lines(struct parport *p, unsigned char b){}
  47. static unsigned char dead_read_lines(struct parport *p) { return 0; }
  48. static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
  49. unsigned char c) { return 0; }
  50. static void dead_onearg(struct parport *p){}
  51. static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
  52. static void dead_state(struct parport *p, struct parport_state *s) { }
  53. static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
  54. { return 0; }
  55. static size_t dead_read(struct parport *p, void *b, size_t l, int f)
  56. { return 0; }
  57. static struct parport_operations dead_ops = {
  58. .write_data = dead_write_lines, /* data */
  59. .read_data = dead_read_lines,
  60. .write_control = dead_write_lines, /* control */
  61. .read_control = dead_read_lines,
  62. .frob_control = dead_frob_lines,
  63. .read_status = dead_read_lines, /* status */
  64. .enable_irq = dead_onearg, /* enable_irq */
  65. .disable_irq = dead_onearg, /* disable_irq */
  66. .data_forward = dead_onearg, /* data_forward */
  67. .data_reverse = dead_onearg, /* data_reverse */
  68. .init_state = dead_initstate, /* init_state */
  69. .save_state = dead_state,
  70. .restore_state = dead_state,
  71. .epp_write_data = dead_write, /* epp */
  72. .epp_read_data = dead_read,
  73. .epp_write_addr = dead_write,
  74. .epp_read_addr = dead_read,
  75. .ecp_write_data = dead_write, /* ecp */
  76. .ecp_read_data = dead_read,
  77. .ecp_write_addr = dead_write,
  78. .compat_write_data = dead_write, /* compat */
  79. .nibble_read_data = dead_read, /* nibble */
  80. .byte_read_data = dead_read, /* byte */
  81. .owner = NULL,
  82. };
  83. static struct device_type parport_device_type = {
  84. .name = "parport",
  85. };
  86. static int is_parport(struct device *dev)
  87. {
  88. return dev->type == &parport_device_type;
  89. }
  90. static int parport_probe(struct device *dev)
  91. {
  92. struct parport_driver *drv;
  93. if (is_parport(dev))
  94. return -ENODEV;
  95. drv = to_parport_driver(dev->driver);
  96. if (!drv->probe) {
  97. /* if driver has not defined a custom probe */
  98. struct pardevice *par_dev = to_pardevice(dev);
  99. if (strcmp(par_dev->name, drv->name))
  100. return -ENODEV;
  101. return 0;
  102. }
  103. /* if driver defined its own probe */
  104. return drv->probe(to_pardevice(dev));
  105. }
  106. static struct bus_type parport_bus_type = {
  107. .name = "parport",
  108. .probe = parport_probe,
  109. };
  110. int parport_bus_init(void)
  111. {
  112. return bus_register(&parport_bus_type);
  113. }
  114. void parport_bus_exit(void)
  115. {
  116. bus_unregister(&parport_bus_type);
  117. }
  118. /*
  119. * iterates through all the drivers registered with the bus and sends the port
  120. * details to the match_port callback of the driver, so that the driver can
  121. * know about the new port that just registered with the bus and decide if it
  122. * wants to use this new port.
  123. */
  124. static int driver_check(struct device_driver *dev_drv, void *_port)
  125. {
  126. struct parport *port = _port;
  127. struct parport_driver *drv = to_parport_driver(dev_drv);
  128. if (drv->match_port)
  129. drv->match_port(port);
  130. return 0;
  131. }
  132. /* Call attach(port) for each registered driver. */
  133. static void attach_driver_chain(struct parport *port)
  134. {
  135. /* caller has exclusive registration_lock */
  136. struct parport_driver *drv;
  137. list_for_each_entry(drv, &drivers, list)
  138. drv->attach(port);
  139. /*
  140. * call the driver_check function of the drivers registered in
  141. * new device model
  142. */
  143. bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
  144. }
  145. static int driver_detach(struct device_driver *_drv, void *_port)
  146. {
  147. struct parport *port = _port;
  148. struct parport_driver *drv = to_parport_driver(_drv);
  149. if (drv->detach)
  150. drv->detach(port);
  151. return 0;
  152. }
  153. /* Call detach(port) for each registered driver. */
  154. static void detach_driver_chain(struct parport *port)
  155. {
  156. struct parport_driver *drv;
  157. /* caller has exclusive registration_lock */
  158. list_for_each_entry(drv, &drivers, list)
  159. drv->detach(port);
  160. /*
  161. * call the detach function of the drivers registered in
  162. * new device model
  163. */
  164. bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
  165. }
  166. /* Ask kmod for some lowlevel drivers. */
  167. static void get_lowlevel_driver(void)
  168. {
  169. /*
  170. * There is no actual module called this: you should set
  171. * up an alias for modutils.
  172. */
  173. request_module("parport_lowlevel");
  174. }
  175. /*
  176. * iterates through all the devices connected to the bus and sends the device
  177. * details to the match_port callback of the driver, so that the driver can
  178. * know what are all the ports that are connected to the bus and choose the
  179. * port to which it wants to register its device.
  180. */
  181. static int port_check(struct device *dev, void *dev_drv)
  182. {
  183. struct parport_driver *drv = dev_drv;
  184. /* only send ports, do not send other devices connected to bus */
  185. if (is_parport(dev))
  186. drv->match_port(to_parport_dev(dev));
  187. return 0;
  188. }
  189. /*
  190. * Iterates through all the devices connected to the bus and return 1
  191. * if the device is a parallel port.
  192. */
  193. static int port_detect(struct device *dev, void *dev_drv)
  194. {
  195. if (is_parport(dev))
  196. return 1;
  197. return 0;
  198. }
  199. /**
  200. * parport_register_driver - register a parallel port device driver
  201. * @drv: structure describing the driver
  202. * @owner: owner module of drv
  203. * @mod_name: module name string
  204. *
  205. * This can be called by a parallel port device driver in order
  206. * to receive notifications about ports being found in the
  207. * system, as well as ports no longer available.
  208. *
  209. * If devmodel is true then the new device model is used
  210. * for registration.
  211. *
  212. * The @drv structure is allocated by the caller and must not be
  213. * deallocated until after calling parport_unregister_driver().
  214. *
  215. * If using the non device model:
  216. * The driver's attach() function may block. The port that
  217. * attach() is given will be valid for the duration of the
  218. * callback, but if the driver wants to take a copy of the
  219. * pointer it must call parport_get_port() to do so. Calling
  220. * parport_register_device() on that port will do this for you.
  221. *
  222. * The driver's detach() function may block. The port that
  223. * detach() is given will be valid for the duration of the
  224. * callback, but if the driver wants to take a copy of the
  225. * pointer it must call parport_get_port() to do so.
  226. *
  227. *
  228. * Returns 0 on success. The non device model will always succeeds.
  229. * but the new device model can fail and will return the error code.
  230. **/
  231. int __parport_register_driver(struct parport_driver *drv, struct module *owner,
  232. const char *mod_name)
  233. {
  234. if (list_empty(&portlist))
  235. get_lowlevel_driver();
  236. if (drv->devmodel) {
  237. /* using device model */
  238. int ret;
  239. /* initialize common driver fields */
  240. drv->driver.name = drv->name;
  241. drv->driver.bus = &parport_bus_type;
  242. drv->driver.owner = owner;
  243. drv->driver.mod_name = mod_name;
  244. ret = driver_register(&drv->driver);
  245. if (ret)
  246. return ret;
  247. /*
  248. * check if bus has any parallel port registered, if
  249. * none is found then load the lowlevel driver.
  250. */
  251. ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
  252. port_detect);
  253. if (!ret)
  254. get_lowlevel_driver();
  255. mutex_lock(&registration_lock);
  256. if (drv->match_port)
  257. bus_for_each_dev(&parport_bus_type, NULL, drv,
  258. port_check);
  259. mutex_unlock(&registration_lock);
  260. } else {
  261. struct parport *port;
  262. drv->devmodel = false;
  263. mutex_lock(&registration_lock);
  264. list_for_each_entry(port, &portlist, list)
  265. drv->attach(port);
  266. list_add(&drv->list, &drivers);
  267. mutex_unlock(&registration_lock);
  268. }
  269. return 0;
  270. }
  271. EXPORT_SYMBOL(__parport_register_driver);
  272. static int port_detach(struct device *dev, void *_drv)
  273. {
  274. struct parport_driver *drv = _drv;
  275. if (is_parport(dev) && drv->detach)
  276. drv->detach(to_parport_dev(dev));
  277. return 0;
  278. }
  279. /**
  280. * parport_unregister_driver - deregister a parallel port device driver
  281. * @drv: structure describing the driver that was given to
  282. * parport_register_driver()
  283. *
  284. * This should be called by a parallel port device driver that
  285. * has registered itself using parport_register_driver() when it
  286. * is about to be unloaded.
  287. *
  288. * When it returns, the driver's attach() routine will no longer
  289. * be called, and for each port that attach() was called for, the
  290. * detach() routine will have been called.
  291. *
  292. * All the driver's attach() and detach() calls are guaranteed to have
  293. * finished by the time this function returns.
  294. **/
  295. void parport_unregister_driver(struct parport_driver *drv)
  296. {
  297. struct parport *port;
  298. mutex_lock(&registration_lock);
  299. if (drv->devmodel) {
  300. bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
  301. driver_unregister(&drv->driver);
  302. } else {
  303. list_del_init(&drv->list);
  304. list_for_each_entry(port, &portlist, list)
  305. drv->detach(port);
  306. }
  307. mutex_unlock(&registration_lock);
  308. }
  309. EXPORT_SYMBOL(parport_unregister_driver);
  310. static void free_port(struct device *dev)
  311. {
  312. int d;
  313. struct parport *port = to_parport_dev(dev);
  314. spin_lock(&full_list_lock);
  315. list_del(&port->full_list);
  316. spin_unlock(&full_list_lock);
  317. for (d = 0; d < 5; d++) {
  318. kfree(port->probe_info[d].class_name);
  319. kfree(port->probe_info[d].mfr);
  320. kfree(port->probe_info[d].model);
  321. kfree(port->probe_info[d].cmdset);
  322. kfree(port->probe_info[d].description);
  323. }
  324. kfree(port->name);
  325. kfree(port);
  326. }
  327. /**
  328. * parport_get_port - increment a port's reference count
  329. * @port: the port
  330. *
  331. * This ensures that a struct parport pointer remains valid
  332. * until the matching parport_put_port() call.
  333. **/
  334. struct parport *parport_get_port(struct parport *port)
  335. {
  336. struct device *dev = get_device(&port->bus_dev);
  337. return to_parport_dev(dev);
  338. }
  339. EXPORT_SYMBOL(parport_get_port);
  340. void parport_del_port(struct parport *port)
  341. {
  342. device_unregister(&port->bus_dev);
  343. }
  344. EXPORT_SYMBOL(parport_del_port);
  345. /**
  346. * parport_put_port - decrement a port's reference count
  347. * @port: the port
  348. *
  349. * This should be called once for each call to parport_get_port(),
  350. * once the port is no longer needed. When the reference count reaches
  351. * zero (port is no longer used), free_port is called.
  352. **/
  353. void parport_put_port(struct parport *port)
  354. {
  355. put_device(&port->bus_dev);
  356. }
  357. EXPORT_SYMBOL(parport_put_port);
  358. /**
  359. * parport_register_port - register a parallel port
  360. * @base: base I/O address
  361. * @irq: IRQ line
  362. * @dma: DMA channel
  363. * @ops: pointer to the port driver's port operations structure
  364. *
  365. * When a parallel port (lowlevel) driver finds a port that
  366. * should be made available to parallel port device drivers, it
  367. * should call parport_register_port(). The @base, @irq, and
  368. * @dma parameters are for the convenience of port drivers, and
  369. * for ports where they aren't meaningful needn't be set to
  370. * anything special. They can be altered afterwards by adjusting
  371. * the relevant members of the parport structure that is returned
  372. * and represents the port. They should not be tampered with
  373. * after calling parport_announce_port, however.
  374. *
  375. * If there are parallel port device drivers in the system that
  376. * have registered themselves using parport_register_driver(),
  377. * they are not told about the port at this time; that is done by
  378. * parport_announce_port().
  379. *
  380. * The @ops structure is allocated by the caller, and must not be
  381. * deallocated before calling parport_remove_port().
  382. *
  383. * If there is no memory to allocate a new parport structure,
  384. * this function will return %NULL.
  385. **/
  386. struct parport *parport_register_port(unsigned long base, int irq, int dma,
  387. struct parport_operations *ops)
  388. {
  389. struct list_head *l;
  390. struct parport *tmp;
  391. int num;
  392. int device;
  393. char *name;
  394. int ret;
  395. tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
  396. if (!tmp)
  397. return NULL;
  398. /* Init our structure */
  399. tmp->base = base;
  400. tmp->irq = irq;
  401. tmp->dma = dma;
  402. tmp->muxport = tmp->daisy = tmp->muxsel = -1;
  403. tmp->modes = 0;
  404. INIT_LIST_HEAD(&tmp->list);
  405. tmp->devices = tmp->cad = NULL;
  406. tmp->flags = 0;
  407. tmp->ops = ops;
  408. tmp->physport = tmp;
  409. memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
  410. rwlock_init(&tmp->cad_lock);
  411. spin_lock_init(&tmp->waitlist_lock);
  412. spin_lock_init(&tmp->pardevice_lock);
  413. tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
  414. tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  415. sema_init(&tmp->ieee1284.irq, 0);
  416. tmp->spintime = parport_default_spintime;
  417. atomic_set(&tmp->ref_count, 1);
  418. INIT_LIST_HEAD(&tmp->full_list);
  419. name = kmalloc(15, GFP_KERNEL);
  420. if (!name) {
  421. kfree(tmp);
  422. return NULL;
  423. }
  424. /* Search for the lowest free parport number. */
  425. spin_lock(&full_list_lock);
  426. for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
  427. struct parport *p = list_entry(l, struct parport, full_list);
  428. if (p->number != num)
  429. break;
  430. }
  431. tmp->portnum = tmp->number = num;
  432. list_add_tail(&tmp->full_list, l);
  433. spin_unlock(&full_list_lock);
  434. /*
  435. * Now that the portnum is known finish doing the Init.
  436. */
  437. sprintf(name, "parport%d", tmp->portnum = tmp->number);
  438. tmp->name = name;
  439. tmp->bus_dev.bus = &parport_bus_type;
  440. tmp->bus_dev.release = free_port;
  441. dev_set_name(&tmp->bus_dev, name);
  442. tmp->bus_dev.type = &parport_device_type;
  443. for (device = 0; device < 5; device++)
  444. /* assume the worst */
  445. tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
  446. tmp->waithead = tmp->waittail = NULL;
  447. ret = device_register(&tmp->bus_dev);
  448. if (ret) {
  449. put_device(&tmp->bus_dev);
  450. return NULL;
  451. }
  452. return tmp;
  453. }
  454. EXPORT_SYMBOL(parport_register_port);
  455. /**
  456. * parport_announce_port - tell device drivers about a parallel port
  457. * @port: parallel port to announce
  458. *
  459. * After a port driver has registered a parallel port with
  460. * parport_register_port, and performed any necessary
  461. * initialisation or adjustments, it should call
  462. * parport_announce_port() in order to notify all device drivers
  463. * that have called parport_register_driver(). Their attach()
  464. * functions will be called, with @port as the parameter.
  465. **/
  466. void parport_announce_port(struct parport *port)
  467. {
  468. int i;
  469. #ifdef CONFIG_PARPORT_1284
  470. /* Analyse the IEEE1284.3 topology of the port. */
  471. parport_daisy_init(port);
  472. #endif
  473. if (!port->dev)
  474. printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n",
  475. port->name);
  476. parport_proc_register(port);
  477. mutex_lock(&registration_lock);
  478. spin_lock_irq(&parportlist_lock);
  479. list_add_tail(&port->list, &portlist);
  480. for (i = 1; i < 3; i++) {
  481. struct parport *slave = port->slaves[i-1];
  482. if (slave)
  483. list_add_tail(&slave->list, &portlist);
  484. }
  485. spin_unlock_irq(&parportlist_lock);
  486. /* Let drivers know that new port(s) has arrived. */
  487. attach_driver_chain(port);
  488. for (i = 1; i < 3; i++) {
  489. struct parport *slave = port->slaves[i-1];
  490. if (slave)
  491. attach_driver_chain(slave);
  492. }
  493. mutex_unlock(&registration_lock);
  494. }
  495. EXPORT_SYMBOL(parport_announce_port);
  496. /**
  497. * parport_remove_port - deregister a parallel port
  498. * @port: parallel port to deregister
  499. *
  500. * When a parallel port driver is forcibly unloaded, or a
  501. * parallel port becomes inaccessible, the port driver must call
  502. * this function in order to deal with device drivers that still
  503. * want to use it.
  504. *
  505. * The parport structure associated with the port has its
  506. * operations structure replaced with one containing 'null'
  507. * operations that return errors or just don't do anything.
  508. *
  509. * Any drivers that have registered themselves using
  510. * parport_register_driver() are notified that the port is no
  511. * longer accessible by having their detach() routines called
  512. * with @port as the parameter.
  513. **/
  514. void parport_remove_port(struct parport *port)
  515. {
  516. int i;
  517. mutex_lock(&registration_lock);
  518. /* Spread the word. */
  519. detach_driver_chain(port);
  520. #ifdef CONFIG_PARPORT_1284
  521. /* Forget the IEEE1284.3 topology of the port. */
  522. parport_daisy_fini(port);
  523. for (i = 1; i < 3; i++) {
  524. struct parport *slave = port->slaves[i-1];
  525. if (!slave)
  526. continue;
  527. detach_driver_chain(slave);
  528. parport_daisy_fini(slave);
  529. }
  530. #endif
  531. port->ops = &dead_ops;
  532. spin_lock(&parportlist_lock);
  533. list_del_init(&port->list);
  534. for (i = 1; i < 3; i++) {
  535. struct parport *slave = port->slaves[i-1];
  536. if (slave)
  537. list_del_init(&slave->list);
  538. }
  539. spin_unlock(&parportlist_lock);
  540. mutex_unlock(&registration_lock);
  541. parport_proc_unregister(port);
  542. for (i = 1; i < 3; i++) {
  543. struct parport *slave = port->slaves[i-1];
  544. if (slave)
  545. parport_put_port(slave);
  546. }
  547. }
  548. EXPORT_SYMBOL(parport_remove_port);
  549. /**
  550. * parport_register_device - register a device on a parallel port
  551. * @port: port to which the device is attached
  552. * @name: a name to refer to the device
  553. * @pf: preemption callback
  554. * @kf: kick callback (wake-up)
  555. * @irq_func: interrupt handler
  556. * @flags: registration flags
  557. * @handle: data for callback functions
  558. *
  559. * This function, called by parallel port device drivers,
  560. * declares that a device is connected to a port, and tells the
  561. * system all it needs to know.
  562. *
  563. * The @name is allocated by the caller and must not be
  564. * deallocated until the caller calls @parport_unregister_device
  565. * for that device.
  566. *
  567. * The preemption callback function, @pf, is called when this
  568. * device driver has claimed access to the port but another
  569. * device driver wants to use it. It is given @handle as its
  570. * parameter, and should return zero if it is willing for the
  571. * system to release the port to another driver on its behalf.
  572. * If it wants to keep control of the port it should return
  573. * non-zero, and no action will be taken. It is good manners for
  574. * the driver to try to release the port at the earliest
  575. * opportunity after its preemption callback rejects a preemption
  576. * attempt. Note that if a preemption callback is happy for
  577. * preemption to go ahead, there is no need to release the port;
  578. * it is done automatically. This function may not block, as it
  579. * may be called from interrupt context. If the device driver
  580. * does not support preemption, @pf can be %NULL.
  581. *
  582. * The wake-up ("kick") callback function, @kf, is called when
  583. * the port is available to be claimed for exclusive access; that
  584. * is, parport_claim() is guaranteed to succeed when called from
  585. * inside the wake-up callback function. If the driver wants to
  586. * claim the port it should do so; otherwise, it need not take
  587. * any action. This function may not block, as it may be called
  588. * from interrupt context. If the device driver does not want to
  589. * be explicitly invited to claim the port in this way, @kf can
  590. * be %NULL.
  591. *
  592. * The interrupt handler, @irq_func, is called when an interrupt
  593. * arrives from the parallel port. Note that if a device driver
  594. * wants to use interrupts it should use parport_enable_irq(),
  595. * and can also check the irq member of the parport structure
  596. * representing the port.
  597. *
  598. * The parallel port (lowlevel) driver is the one that has called
  599. * request_irq() and whose interrupt handler is called first.
  600. * This handler does whatever needs to be done to the hardware to
  601. * acknowledge the interrupt (for PC-style ports there is nothing
  602. * special to be done). It then tells the IEEE 1284 code about
  603. * the interrupt, which may involve reacting to an IEEE 1284
  604. * event depending on the current IEEE 1284 phase. After this,
  605. * it calls @irq_func. Needless to say, @irq_func will be called
  606. * from interrupt context, and may not block.
  607. *
  608. * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
  609. * so should only be used when sharing the port with other device
  610. * drivers is impossible and would lead to incorrect behaviour.
  611. * Use it sparingly! Normally, @flags will be zero.
  612. *
  613. * This function returns a pointer to a structure that represents
  614. * the device on the port, or %NULL if there is not enough memory
  615. * to allocate space for that structure.
  616. **/
  617. struct pardevice *
  618. parport_register_device(struct parport *port, const char *name,
  619. int (*pf)(void *), void (*kf)(void *),
  620. void (*irq_func)(void *),
  621. int flags, void *handle)
  622. {
  623. struct pardevice *tmp;
  624. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  625. /* An exclusive device is registered. */
  626. printk(KERN_DEBUG "%s: no more devices allowed\n",
  627. port->name);
  628. return NULL;
  629. }
  630. if (flags & PARPORT_DEV_LURK) {
  631. if (!pf || !kf) {
  632. printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
  633. return NULL;
  634. }
  635. }
  636. if (flags & PARPORT_DEV_EXCL) {
  637. if (port->physport->devices) {
  638. /*
  639. * If a device is already registered and this new
  640. * device wants exclusive access, then no need to
  641. * continue as we can not grant exclusive access to
  642. * this device.
  643. */
  644. pr_err("%s: cannot grant exclusive access for device %s\n",
  645. port->name, name);
  646. return NULL;
  647. }
  648. }
  649. /*
  650. * We up our own module reference count, and that of the port
  651. * on which a device is to be registered, to ensure that
  652. * neither of us gets unloaded while we sleep in (e.g.)
  653. * kmalloc.
  654. */
  655. if (!try_module_get(port->ops->owner))
  656. return NULL;
  657. parport_get_port(port);
  658. tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
  659. if (!tmp)
  660. goto out;
  661. tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
  662. if (!tmp->state)
  663. goto out_free_pardevice;
  664. tmp->name = name;
  665. tmp->port = port;
  666. tmp->daisy = -1;
  667. tmp->preempt = pf;
  668. tmp->wakeup = kf;
  669. tmp->private = handle;
  670. tmp->flags = flags;
  671. tmp->irq_func = irq_func;
  672. tmp->waiting = 0;
  673. tmp->timeout = 5 * HZ;
  674. tmp->devmodel = false;
  675. /* Chain this onto the list */
  676. tmp->prev = NULL;
  677. /*
  678. * This function must not run from an irq handler so we don' t need
  679. * to clear irq on the local CPU. -arca
  680. */
  681. spin_lock(&port->physport->pardevice_lock);
  682. if (flags & PARPORT_DEV_EXCL) {
  683. if (port->physport->devices) {
  684. spin_unlock(&port->physport->pardevice_lock);
  685. printk(KERN_DEBUG
  686. "%s: cannot grant exclusive access for device %s\n",
  687. port->name, name);
  688. goto out_free_all;
  689. }
  690. port->flags |= PARPORT_FLAG_EXCL;
  691. }
  692. tmp->next = port->physport->devices;
  693. wmb(); /*
  694. * Make sure that tmp->next is written before it's
  695. * added to the list; see comments marked 'no locking
  696. * required'
  697. */
  698. if (port->physport->devices)
  699. port->physport->devices->prev = tmp;
  700. port->physport->devices = tmp;
  701. spin_unlock(&port->physport->pardevice_lock);
  702. init_waitqueue_head(&tmp->wait_q);
  703. tmp->timeslice = parport_default_timeslice;
  704. tmp->waitnext = tmp->waitprev = NULL;
  705. /*
  706. * This has to be run as last thing since init_state may need other
  707. * pardevice fields. -arca
  708. */
  709. port->ops->init_state(tmp, tmp->state);
  710. if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
  711. port->proc_device = tmp;
  712. parport_device_proc_register(tmp);
  713. }
  714. return tmp;
  715. out_free_all:
  716. kfree(tmp->state);
  717. out_free_pardevice:
  718. kfree(tmp);
  719. out:
  720. parport_put_port(port);
  721. module_put(port->ops->owner);
  722. return NULL;
  723. }
  724. EXPORT_SYMBOL(parport_register_device);
  725. static void free_pardevice(struct device *dev)
  726. {
  727. struct pardevice *par_dev = to_pardevice(dev);
  728. kfree(par_dev->name);
  729. kfree(par_dev);
  730. }
  731. struct pardevice *
  732. parport_register_dev_model(struct parport *port, const char *name,
  733. const struct pardev_cb *par_dev_cb, int id)
  734. {
  735. struct pardevice *par_dev;
  736. int ret;
  737. char *devname;
  738. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  739. /* An exclusive device is registered. */
  740. pr_err("%s: no more devices allowed\n", port->name);
  741. return NULL;
  742. }
  743. if (par_dev_cb->flags & PARPORT_DEV_LURK) {
  744. if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
  745. pr_info("%s: refused to register lurking device (%s) without callbacks\n",
  746. port->name, name);
  747. return NULL;
  748. }
  749. }
  750. if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
  751. if (port->physport->devices) {
  752. /*
  753. * If a device is already registered and this new
  754. * device wants exclusive access, then no need to
  755. * continue as we can not grant exclusive access to
  756. * this device.
  757. */
  758. pr_err("%s: cannot grant exclusive access for device %s\n",
  759. port->name, name);
  760. return NULL;
  761. }
  762. }
  763. if (!try_module_get(port->ops->owner))
  764. return NULL;
  765. parport_get_port(port);
  766. par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
  767. if (!par_dev)
  768. goto err_put_port;
  769. par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
  770. if (!par_dev->state)
  771. goto err_put_par_dev;
  772. devname = kstrdup(name, GFP_KERNEL);
  773. if (!devname)
  774. goto err_free_par_dev;
  775. par_dev->name = devname;
  776. par_dev->port = port;
  777. par_dev->daisy = -1;
  778. par_dev->preempt = par_dev_cb->preempt;
  779. par_dev->wakeup = par_dev_cb->wakeup;
  780. par_dev->private = par_dev_cb->private;
  781. par_dev->flags = par_dev_cb->flags;
  782. par_dev->irq_func = par_dev_cb->irq_func;
  783. par_dev->waiting = 0;
  784. par_dev->timeout = 5 * HZ;
  785. par_dev->dev.parent = &port->bus_dev;
  786. par_dev->dev.bus = &parport_bus_type;
  787. ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
  788. if (ret)
  789. goto err_free_devname;
  790. par_dev->dev.release = free_pardevice;
  791. par_dev->devmodel = true;
  792. ret = device_register(&par_dev->dev);
  793. if (ret) {
  794. kfree(par_dev->state);
  795. put_device(&par_dev->dev);
  796. goto err_put_port;
  797. }
  798. /* Chain this onto the list */
  799. par_dev->prev = NULL;
  800. /*
  801. * This function must not run from an irq handler so we don' t need
  802. * to clear irq on the local CPU. -arca
  803. */
  804. spin_lock(&port->physport->pardevice_lock);
  805. if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
  806. if (port->physport->devices) {
  807. spin_unlock(&port->physport->pardevice_lock);
  808. pr_debug("%s: cannot grant exclusive access for device %s\n",
  809. port->name, name);
  810. kfree(par_dev->state);
  811. device_unregister(&par_dev->dev);
  812. goto err_put_port;
  813. }
  814. port->flags |= PARPORT_FLAG_EXCL;
  815. }
  816. par_dev->next = port->physport->devices;
  817. wmb(); /*
  818. * Make sure that tmp->next is written before it's
  819. * added to the list; see comments marked 'no locking
  820. * required'
  821. */
  822. if (port->physport->devices)
  823. port->physport->devices->prev = par_dev;
  824. port->physport->devices = par_dev;
  825. spin_unlock(&port->physport->pardevice_lock);
  826. init_waitqueue_head(&par_dev->wait_q);
  827. par_dev->timeslice = parport_default_timeslice;
  828. par_dev->waitnext = NULL;
  829. par_dev->waitprev = NULL;
  830. /*
  831. * This has to be run as last thing since init_state may need other
  832. * pardevice fields. -arca
  833. */
  834. port->ops->init_state(par_dev, par_dev->state);
  835. if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
  836. port->proc_device = par_dev;
  837. parport_device_proc_register(par_dev);
  838. }
  839. return par_dev;
  840. err_free_devname:
  841. kfree(devname);
  842. err_free_par_dev:
  843. kfree(par_dev->state);
  844. err_put_par_dev:
  845. if (!par_dev->devmodel)
  846. kfree(par_dev);
  847. err_put_port:
  848. parport_put_port(port);
  849. module_put(port->ops->owner);
  850. return NULL;
  851. }
  852. EXPORT_SYMBOL(parport_register_dev_model);
  853. /**
  854. * parport_unregister_device - deregister a device on a parallel port
  855. * @dev: pointer to structure representing device
  856. *
  857. * This undoes the effect of parport_register_device().
  858. **/
  859. void parport_unregister_device(struct pardevice *dev)
  860. {
  861. struct parport *port;
  862. #ifdef PARPORT_PARANOID
  863. if (!dev) {
  864. printk(KERN_ERR "parport_unregister_device: passed NULL\n");
  865. return;
  866. }
  867. #endif
  868. port = dev->port->physport;
  869. if (port->proc_device == dev) {
  870. port->proc_device = NULL;
  871. clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
  872. parport_device_proc_unregister(dev);
  873. }
  874. if (port->cad == dev) {
  875. printk(KERN_DEBUG "%s: %s forgot to release port\n",
  876. port->name, dev->name);
  877. parport_release(dev);
  878. }
  879. spin_lock(&port->pardevice_lock);
  880. if (dev->next)
  881. dev->next->prev = dev->prev;
  882. if (dev->prev)
  883. dev->prev->next = dev->next;
  884. else
  885. port->devices = dev->next;
  886. if (dev->flags & PARPORT_DEV_EXCL)
  887. port->flags &= ~PARPORT_FLAG_EXCL;
  888. spin_unlock(&port->pardevice_lock);
  889. /*
  890. * Make sure we haven't left any pointers around in the wait
  891. * list.
  892. */
  893. spin_lock_irq(&port->waitlist_lock);
  894. if (dev->waitprev || dev->waitnext || port->waithead == dev) {
  895. if (dev->waitprev)
  896. dev->waitprev->waitnext = dev->waitnext;
  897. else
  898. port->waithead = dev->waitnext;
  899. if (dev->waitnext)
  900. dev->waitnext->waitprev = dev->waitprev;
  901. else
  902. port->waittail = dev->waitprev;
  903. }
  904. spin_unlock_irq(&port->waitlist_lock);
  905. kfree(dev->state);
  906. if (dev->devmodel)
  907. device_unregister(&dev->dev);
  908. else
  909. kfree(dev);
  910. module_put(port->ops->owner);
  911. parport_put_port(port);
  912. }
  913. EXPORT_SYMBOL(parport_unregister_device);
  914. /**
  915. * parport_find_number - find a parallel port by number
  916. * @number: parallel port number
  917. *
  918. * This returns the parallel port with the specified number, or
  919. * %NULL if there is none.
  920. *
  921. * There is an implicit parport_get_port() done already; to throw
  922. * away the reference to the port that parport_find_number()
  923. * gives you, use parport_put_port().
  924. */
  925. struct parport *parport_find_number(int number)
  926. {
  927. struct parport *port, *result = NULL;
  928. if (list_empty(&portlist))
  929. get_lowlevel_driver();
  930. spin_lock(&parportlist_lock);
  931. list_for_each_entry(port, &portlist, list) {
  932. if (port->number == number) {
  933. result = parport_get_port(port);
  934. break;
  935. }
  936. }
  937. spin_unlock(&parportlist_lock);
  938. return result;
  939. }
  940. EXPORT_SYMBOL(parport_find_number);
  941. /**
  942. * parport_find_base - find a parallel port by base address
  943. * @base: base I/O address
  944. *
  945. * This returns the parallel port with the specified base
  946. * address, or %NULL if there is none.
  947. *
  948. * There is an implicit parport_get_port() done already; to throw
  949. * away the reference to the port that parport_find_base()
  950. * gives you, use parport_put_port().
  951. */
  952. struct parport *parport_find_base(unsigned long base)
  953. {
  954. struct parport *port, *result = NULL;
  955. if (list_empty(&portlist))
  956. get_lowlevel_driver();
  957. spin_lock(&parportlist_lock);
  958. list_for_each_entry(port, &portlist, list) {
  959. if (port->base == base) {
  960. result = parport_get_port(port);
  961. break;
  962. }
  963. }
  964. spin_unlock(&parportlist_lock);
  965. return result;
  966. }
  967. EXPORT_SYMBOL(parport_find_base);
  968. /**
  969. * parport_claim - claim access to a parallel port device
  970. * @dev: pointer to structure representing a device on the port
  971. *
  972. * This function will not block and so can be used from interrupt
  973. * context. If parport_claim() succeeds in claiming access to
  974. * the port it returns zero and the port is available to use. It
  975. * may fail (returning non-zero) if the port is in use by another
  976. * driver and that driver is not willing to relinquish control of
  977. * the port.
  978. **/
  979. int parport_claim(struct pardevice *dev)
  980. {
  981. struct pardevice *oldcad;
  982. struct parport *port = dev->port->physport;
  983. unsigned long flags;
  984. if (port->cad == dev) {
  985. printk(KERN_INFO "%s: %s already owner\n",
  986. dev->port->name,dev->name);
  987. return 0;
  988. }
  989. /* Preempt any current device */
  990. write_lock_irqsave(&port->cad_lock, flags);
  991. oldcad = port->cad;
  992. if (oldcad) {
  993. if (oldcad->preempt) {
  994. if (oldcad->preempt(oldcad->private))
  995. goto blocked;
  996. port->ops->save_state(port, dev->state);
  997. } else
  998. goto blocked;
  999. if (port->cad != oldcad) {
  1000. /*
  1001. * I think we'll actually deadlock rather than
  1002. * get here, but just in case..
  1003. */
  1004. printk(KERN_WARNING
  1005. "%s: %s released port when preempted!\n",
  1006. port->name, oldcad->name);
  1007. if (port->cad)
  1008. goto blocked;
  1009. }
  1010. }
  1011. /* Can't fail from now on, so mark ourselves as no longer waiting. */
  1012. if (dev->waiting & 1) {
  1013. dev->waiting = 0;
  1014. /* Take ourselves out of the wait list again. */
  1015. spin_lock_irq(&port->waitlist_lock);
  1016. if (dev->waitprev)
  1017. dev->waitprev->waitnext = dev->waitnext;
  1018. else
  1019. port->waithead = dev->waitnext;
  1020. if (dev->waitnext)
  1021. dev->waitnext->waitprev = dev->waitprev;
  1022. else
  1023. port->waittail = dev->waitprev;
  1024. spin_unlock_irq(&port->waitlist_lock);
  1025. dev->waitprev = dev->waitnext = NULL;
  1026. }
  1027. /* Now we do the change of devices */
  1028. port->cad = dev;
  1029. #ifdef CONFIG_PARPORT_1284
  1030. /* If it's a mux port, select it. */
  1031. if (dev->port->muxport >= 0) {
  1032. /* FIXME */
  1033. port->muxsel = dev->port->muxport;
  1034. }
  1035. /* If it's a daisy chain device, select it. */
  1036. if (dev->daisy >= 0) {
  1037. /* This could be lazier. */
  1038. if (!parport_daisy_select(port, dev->daisy,
  1039. IEEE1284_MODE_COMPAT))
  1040. port->daisy = dev->daisy;
  1041. }
  1042. #endif /* IEEE1284.3 support */
  1043. /* Restore control registers */
  1044. port->ops->restore_state(port, dev->state);
  1045. write_unlock_irqrestore(&port->cad_lock, flags);
  1046. dev->time = jiffies;
  1047. return 0;
  1048. blocked:
  1049. /*
  1050. * If this is the first time we tried to claim the port, register an
  1051. * interest. This is only allowed for devices sleeping in
  1052. * parport_claim_or_block(), or those with a wakeup function.
  1053. */
  1054. /* The cad_lock is still held for writing here */
  1055. if (dev->waiting & 2 || dev->wakeup) {
  1056. spin_lock(&port->waitlist_lock);
  1057. if (test_and_set_bit(0, &dev->waiting) == 0) {
  1058. /* First add ourselves to the end of the wait list. */
  1059. dev->waitnext = NULL;
  1060. dev->waitprev = port->waittail;
  1061. if (port->waittail) {
  1062. port->waittail->waitnext = dev;
  1063. port->waittail = dev;
  1064. } else
  1065. port->waithead = port->waittail = dev;
  1066. }
  1067. spin_unlock(&port->waitlist_lock);
  1068. }
  1069. write_unlock_irqrestore(&port->cad_lock, flags);
  1070. return -EAGAIN;
  1071. }
  1072. EXPORT_SYMBOL(parport_claim);
  1073. /**
  1074. * parport_claim_or_block - claim access to a parallel port device
  1075. * @dev: pointer to structure representing a device on the port
  1076. *
  1077. * This behaves like parport_claim(), but will block if necessary
  1078. * to wait for the port to be free. A return value of 1
  1079. * indicates that it slept; 0 means that it succeeded without
  1080. * needing to sleep. A negative error code indicates failure.
  1081. **/
  1082. int parport_claim_or_block(struct pardevice *dev)
  1083. {
  1084. int r;
  1085. /*
  1086. * Signal to parport_claim() that we can wait even without a
  1087. * wakeup function.
  1088. */
  1089. dev->waiting = 2;
  1090. /* Try to claim the port. If this fails, we need to sleep. */
  1091. r = parport_claim(dev);
  1092. if (r == -EAGAIN) {
  1093. #ifdef PARPORT_DEBUG_SHARING
  1094. printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
  1095. #endif
  1096. /*
  1097. * FIXME!!! Use the proper locking for dev->waiting,
  1098. * and make this use the "wait_event_interruptible()"
  1099. * interfaces. The cli/sti that used to be here
  1100. * did nothing.
  1101. *
  1102. * See also parport_release()
  1103. */
  1104. /*
  1105. * If dev->waiting is clear now, an interrupt
  1106. * gave us the port and we would deadlock if we slept.
  1107. */
  1108. if (dev->waiting) {
  1109. wait_event_interruptible(dev->wait_q,
  1110. !dev->waiting);
  1111. if (signal_pending(current))
  1112. return -EINTR;
  1113. r = 1;
  1114. } else {
  1115. r = 0;
  1116. #ifdef PARPORT_DEBUG_SHARING
  1117. printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
  1118. dev->name);
  1119. #endif
  1120. }
  1121. #ifdef PARPORT_DEBUG_SHARING
  1122. if (dev->port->physport->cad != dev)
  1123. printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
  1124. dev->name, dev->port->physport->cad ?
  1125. dev->port->physport->cad->name:"nobody");
  1126. #endif
  1127. }
  1128. dev->waiting = 0;
  1129. return r;
  1130. }
  1131. EXPORT_SYMBOL(parport_claim_or_block);
  1132. /**
  1133. * parport_release - give up access to a parallel port device
  1134. * @dev: pointer to structure representing parallel port device
  1135. *
  1136. * This function cannot fail, but it should not be called without
  1137. * the port claimed. Similarly, if the port is already claimed
  1138. * you should not try claiming it again.
  1139. **/
  1140. void parport_release(struct pardevice *dev)
  1141. {
  1142. struct parport *port = dev->port->physport;
  1143. struct pardevice *pd;
  1144. unsigned long flags;
  1145. /* Make sure that dev is the current device */
  1146. write_lock_irqsave(&port->cad_lock, flags);
  1147. if (port->cad != dev) {
  1148. write_unlock_irqrestore(&port->cad_lock, flags);
  1149. printk(KERN_WARNING "%s: %s tried to release parport when not owner\n",
  1150. port->name, dev->name);
  1151. return;
  1152. }
  1153. #ifdef CONFIG_PARPORT_1284
  1154. /* If this is on a mux port, deselect it. */
  1155. if (dev->port->muxport >= 0) {
  1156. /* FIXME */
  1157. port->muxsel = -1;
  1158. }
  1159. /* If this is a daisy device, deselect it. */
  1160. if (dev->daisy >= 0) {
  1161. parport_daisy_deselect_all(port);
  1162. port->daisy = -1;
  1163. }
  1164. #endif
  1165. port->cad = NULL;
  1166. write_unlock_irqrestore(&port->cad_lock, flags);
  1167. /* Save control registers */
  1168. port->ops->save_state(port, dev->state);
  1169. /*
  1170. * If anybody is waiting, find out who's been there longest and
  1171. * then wake them up. (Note: no locking required)
  1172. */
  1173. /* !!! LOCKING IS NEEDED HERE */
  1174. for (pd = port->waithead; pd; pd = pd->waitnext) {
  1175. if (pd->waiting & 2) { /* sleeping in claim_or_block */
  1176. parport_claim(pd);
  1177. if (waitqueue_active(&pd->wait_q))
  1178. wake_up_interruptible(&pd->wait_q);
  1179. return;
  1180. } else if (pd->wakeup) {
  1181. pd->wakeup(pd->private);
  1182. if (dev->port->cad) /* racy but no matter */
  1183. return;
  1184. } else {
  1185. printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
  1186. }
  1187. }
  1188. /*
  1189. * Nobody was waiting, so walk the list to see if anyone is
  1190. * interested in being woken up. (Note: no locking required)
  1191. */
  1192. /* !!! LOCKING IS NEEDED HERE */
  1193. for (pd = port->devices; !port->cad && pd; pd = pd->next) {
  1194. if (pd->wakeup && pd != dev)
  1195. pd->wakeup(pd->private);
  1196. }
  1197. }
  1198. EXPORT_SYMBOL(parport_release);
  1199. irqreturn_t parport_irq_handler(int irq, void *dev_id)
  1200. {
  1201. struct parport *port = dev_id;
  1202. parport_generic_irq(port);
  1203. return IRQ_HANDLED;
  1204. }
  1205. EXPORT_SYMBOL(parport_irq_handler);
  1206. MODULE_LICENSE("GPL");