pci_stub.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653
  1. /*
  2. * PCI Stub Driver - Grabs devices in backend to be exported later
  3. *
  4. * Ryan Wilson <hap9@epoch.ncsc.mil>
  5. * Chris Bookholt <hap10@epoch.ncsc.mil>
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/rwsem.h>
  11. #include <linux/list.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/kref.h>
  14. #include <linux/pci.h>
  15. #include <linux/wait.h>
  16. #include <linux/sched.h>
  17. #include <linux/atomic.h>
  18. #include <xen/events.h>
  19. #include <asm/xen/pci.h>
  20. #include <asm/xen/hypervisor.h>
  21. #include <xen/interface/physdev.h>
  22. #include "pciback.h"
  23. #include "conf_space.h"
  24. #include "conf_space_quirks.h"
  25. #define PCISTUB_DRIVER_NAME "pciback"
  26. static char *pci_devs_to_hide;
  27. wait_queue_head_t xen_pcibk_aer_wait_queue;
  28. /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
  29. * We want to avoid in middle of AER ops, xen_pcibk devices is being removed
  30. */
  31. static DECLARE_RWSEM(pcistub_sem);
  32. module_param_named(hide, pci_devs_to_hide, charp, 0444);
  33. struct pcistub_device_id {
  34. struct list_head slot_list;
  35. int domain;
  36. unsigned char bus;
  37. unsigned int devfn;
  38. };
  39. static LIST_HEAD(pcistub_device_ids);
  40. static DEFINE_SPINLOCK(device_ids_lock);
  41. struct pcistub_device {
  42. struct kref kref;
  43. struct list_head dev_list;
  44. spinlock_t lock;
  45. struct pci_dev *dev;
  46. struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
  47. };
  48. /* Access to pcistub_devices & seized_devices lists and the initialize_devices
  49. * flag must be locked with pcistub_devices_lock
  50. */
  51. static DEFINE_SPINLOCK(pcistub_devices_lock);
  52. static LIST_HEAD(pcistub_devices);
  53. /* wait for device_initcall before initializing our devices
  54. * (see pcistub_init_devices_late)
  55. */
  56. static int initialize_devices;
  57. static LIST_HEAD(seized_devices);
  58. static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
  59. {
  60. struct pcistub_device *psdev;
  61. dev_dbg(&dev->dev, "pcistub_device_alloc\n");
  62. psdev = kzalloc(sizeof(*psdev), GFP_KERNEL);
  63. if (!psdev)
  64. return NULL;
  65. psdev->dev = pci_dev_get(dev);
  66. if (!psdev->dev) {
  67. kfree(psdev);
  68. return NULL;
  69. }
  70. kref_init(&psdev->kref);
  71. spin_lock_init(&psdev->lock);
  72. return psdev;
  73. }
  74. /* Don't call this directly as it's called by pcistub_device_put */
  75. static void pcistub_device_release(struct kref *kref)
  76. {
  77. struct pcistub_device *psdev;
  78. struct pci_dev *dev;
  79. struct xen_pcibk_dev_data *dev_data;
  80. psdev = container_of(kref, struct pcistub_device, kref);
  81. dev = psdev->dev;
  82. dev_data = pci_get_drvdata(dev);
  83. dev_dbg(&dev->dev, "pcistub_device_release\n");
  84. xen_unregister_device_domain_owner(dev);
  85. /* Call the reset function which does not take lock as this
  86. * is called from "unbind" which takes a device_lock mutex.
  87. */
  88. __pci_reset_function_locked(dev);
  89. if (dev_data &&
  90. pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
  91. dev_info(&dev->dev, "Could not reload PCI state\n");
  92. else
  93. pci_restore_state(dev);
  94. if (dev->msix_cap) {
  95. struct physdev_pci_device ppdev = {
  96. .seg = pci_domain_nr(dev->bus),
  97. .bus = dev->bus->number,
  98. .devfn = dev->devfn
  99. };
  100. int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
  101. &ppdev);
  102. if (err && err != -ENOSYS)
  103. dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
  104. err);
  105. }
  106. /* Disable the device */
  107. xen_pcibk_reset_device(dev);
  108. kfree(dev_data);
  109. pci_set_drvdata(dev, NULL);
  110. /* Clean-up the device */
  111. xen_pcibk_config_free_dyn_fields(dev);
  112. xen_pcibk_config_free_dev(dev);
  113. pci_clear_dev_assigned(dev);
  114. pci_dev_put(dev);
  115. kfree(psdev);
  116. }
  117. static inline void pcistub_device_get(struct pcistub_device *psdev)
  118. {
  119. kref_get(&psdev->kref);
  120. }
  121. static inline void pcistub_device_put(struct pcistub_device *psdev)
  122. {
  123. kref_put(&psdev->kref, pcistub_device_release);
  124. }
  125. static struct pcistub_device *pcistub_device_find_locked(int domain, int bus,
  126. int slot, int func)
  127. {
  128. struct pcistub_device *psdev;
  129. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  130. if (psdev->dev != NULL
  131. && domain == pci_domain_nr(psdev->dev->bus)
  132. && bus == psdev->dev->bus->number
  133. && slot == PCI_SLOT(psdev->dev->devfn)
  134. && func == PCI_FUNC(psdev->dev->devfn)) {
  135. return psdev;
  136. }
  137. }
  138. return NULL;
  139. }
  140. static struct pcistub_device *pcistub_device_find(int domain, int bus,
  141. int slot, int func)
  142. {
  143. struct pcistub_device *psdev;
  144. unsigned long flags;
  145. spin_lock_irqsave(&pcistub_devices_lock, flags);
  146. psdev = pcistub_device_find_locked(domain, bus, slot, func);
  147. if (psdev)
  148. pcistub_device_get(psdev);
  149. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  150. return psdev;
  151. }
  152. static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
  153. struct pcistub_device *psdev)
  154. {
  155. struct pci_dev *pci_dev = NULL;
  156. unsigned long flags;
  157. pcistub_device_get(psdev);
  158. spin_lock_irqsave(&psdev->lock, flags);
  159. if (!psdev->pdev) {
  160. psdev->pdev = pdev;
  161. pci_dev = psdev->dev;
  162. }
  163. spin_unlock_irqrestore(&psdev->lock, flags);
  164. if (!pci_dev)
  165. pcistub_device_put(psdev);
  166. return pci_dev;
  167. }
  168. struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
  169. int domain, int bus,
  170. int slot, int func)
  171. {
  172. struct pcistub_device *psdev;
  173. struct pci_dev *found_dev = NULL;
  174. unsigned long flags;
  175. spin_lock_irqsave(&pcistub_devices_lock, flags);
  176. psdev = pcistub_device_find_locked(domain, bus, slot, func);
  177. if (psdev)
  178. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  179. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  180. return found_dev;
  181. }
  182. struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
  183. struct pci_dev *dev)
  184. {
  185. struct pcistub_device *psdev;
  186. struct pci_dev *found_dev = NULL;
  187. unsigned long flags;
  188. spin_lock_irqsave(&pcistub_devices_lock, flags);
  189. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  190. if (psdev->dev == dev) {
  191. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  192. break;
  193. }
  194. }
  195. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  196. return found_dev;
  197. }
  198. /*
  199. * Called when:
  200. * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
  201. * - XenBus state has been disconnected (guest shutdown). See xen_pcibk_xenbus_remove
  202. * - 'echo BDF > unbind' on pciback module with no guest attached. See pcistub_remove
  203. * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove
  204. *
  205. * As such we have to be careful.
  206. *
  207. * To make this easier, the caller has to hold the device lock.
  208. */
  209. void pcistub_put_pci_dev(struct pci_dev *dev)
  210. {
  211. struct pcistub_device *psdev, *found_psdev = NULL;
  212. unsigned long flags;
  213. struct xen_pcibk_dev_data *dev_data;
  214. int ret;
  215. spin_lock_irqsave(&pcistub_devices_lock, flags);
  216. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  217. if (psdev->dev == dev) {
  218. found_psdev = psdev;
  219. break;
  220. }
  221. }
  222. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  223. if (WARN_ON(!found_psdev))
  224. return;
  225. /*hold this lock for avoiding breaking link between
  226. * pcistub and xen_pcibk when AER is in processing
  227. */
  228. down_write(&pcistub_sem);
  229. /* Cleanup our device
  230. * (so it's ready for the next domain)
  231. */
  232. device_lock_assert(&dev->dev);
  233. __pci_reset_function_locked(dev);
  234. dev_data = pci_get_drvdata(dev);
  235. ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
  236. if (!ret) {
  237. /*
  238. * The usual sequence is pci_save_state & pci_restore_state
  239. * but the guest might have messed the configuration space up.
  240. * Use the initial version (when device was bound to us).
  241. */
  242. pci_restore_state(dev);
  243. } else
  244. dev_info(&dev->dev, "Could not reload PCI state\n");
  245. /* This disables the device. */
  246. xen_pcibk_reset_device(dev);
  247. /* And cleanup up our emulated fields. */
  248. xen_pcibk_config_reset_dev(dev);
  249. xen_pcibk_config_free_dyn_fields(dev);
  250. xen_unregister_device_domain_owner(dev);
  251. spin_lock_irqsave(&found_psdev->lock, flags);
  252. found_psdev->pdev = NULL;
  253. spin_unlock_irqrestore(&found_psdev->lock, flags);
  254. pcistub_device_put(found_psdev);
  255. up_write(&pcistub_sem);
  256. }
  257. static int pcistub_match_one(struct pci_dev *dev,
  258. struct pcistub_device_id *pdev_id)
  259. {
  260. /* Match the specified device by domain, bus, slot, func and also if
  261. * any of the device's parent bridges match.
  262. */
  263. for (; dev != NULL; dev = dev->bus->self) {
  264. if (pci_domain_nr(dev->bus) == pdev_id->domain
  265. && dev->bus->number == pdev_id->bus
  266. && dev->devfn == pdev_id->devfn)
  267. return 1;
  268. /* Sometimes topmost bridge links to itself. */
  269. if (dev == dev->bus->self)
  270. break;
  271. }
  272. return 0;
  273. }
  274. static int pcistub_match(struct pci_dev *dev)
  275. {
  276. struct pcistub_device_id *pdev_id;
  277. unsigned long flags;
  278. int found = 0;
  279. spin_lock_irqsave(&device_ids_lock, flags);
  280. list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
  281. if (pcistub_match_one(dev, pdev_id)) {
  282. found = 1;
  283. break;
  284. }
  285. }
  286. spin_unlock_irqrestore(&device_ids_lock, flags);
  287. return found;
  288. }
  289. static int pcistub_init_device(struct pci_dev *dev)
  290. {
  291. struct xen_pcibk_dev_data *dev_data;
  292. int err = 0;
  293. dev_dbg(&dev->dev, "initializing...\n");
  294. /* The PCI backend is not intended to be a module (or to work with
  295. * removable PCI devices (yet). If it were, xen_pcibk_config_free()
  296. * would need to be called somewhere to free the memory allocated
  297. * here and then to call kfree(pci_get_drvdata(psdev->dev)).
  298. */
  299. dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
  300. + strlen(pci_name(dev)) + 1, GFP_KERNEL);
  301. if (!dev_data) {
  302. err = -ENOMEM;
  303. goto out;
  304. }
  305. pci_set_drvdata(dev, dev_data);
  306. /*
  307. * Setup name for fake IRQ handler. It will only be enabled
  308. * once the device is turned on by the guest.
  309. */
  310. sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
  311. dev_dbg(&dev->dev, "initializing config\n");
  312. init_waitqueue_head(&xen_pcibk_aer_wait_queue);
  313. err = xen_pcibk_config_init_dev(dev);
  314. if (err)
  315. goto out;
  316. /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
  317. * must do this here because pcibios_enable_device may specify
  318. * the pci device's true irq (and possibly its other resources)
  319. * if they differ from what's in the configuration space.
  320. * This makes the assumption that the device's resources won't
  321. * change after this point (otherwise this code may break!)
  322. */
  323. dev_dbg(&dev->dev, "enabling device\n");
  324. err = pci_enable_device(dev);
  325. if (err)
  326. goto config_release;
  327. if (dev->msix_cap) {
  328. struct physdev_pci_device ppdev = {
  329. .seg = pci_domain_nr(dev->bus),
  330. .bus = dev->bus->number,
  331. .devfn = dev->devfn
  332. };
  333. err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
  334. if (err && err != -ENOSYS)
  335. dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
  336. err);
  337. }
  338. /* We need the device active to save the state. */
  339. dev_dbg(&dev->dev, "save state of device\n");
  340. pci_save_state(dev);
  341. dev_data->pci_saved_state = pci_store_saved_state(dev);
  342. if (!dev_data->pci_saved_state)
  343. dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
  344. else {
  345. dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
  346. __pci_reset_function_locked(dev);
  347. pci_restore_state(dev);
  348. }
  349. /* Now disable the device (this also ensures some private device
  350. * data is setup before we export)
  351. */
  352. dev_dbg(&dev->dev, "reset device\n");
  353. xen_pcibk_reset_device(dev);
  354. pci_set_dev_assigned(dev);
  355. return 0;
  356. config_release:
  357. xen_pcibk_config_free_dev(dev);
  358. out:
  359. pci_set_drvdata(dev, NULL);
  360. kfree(dev_data);
  361. return err;
  362. }
  363. /*
  364. * Because some initialization still happens on
  365. * devices during fs_initcall, we need to defer
  366. * full initialization of our devices until
  367. * device_initcall.
  368. */
  369. static int __init pcistub_init_devices_late(void)
  370. {
  371. struct pcistub_device *psdev;
  372. unsigned long flags;
  373. int err = 0;
  374. spin_lock_irqsave(&pcistub_devices_lock, flags);
  375. while (!list_empty(&seized_devices)) {
  376. psdev = container_of(seized_devices.next,
  377. struct pcistub_device, dev_list);
  378. list_del(&psdev->dev_list);
  379. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  380. err = pcistub_init_device(psdev->dev);
  381. if (err) {
  382. dev_err(&psdev->dev->dev,
  383. "error %d initializing device\n", err);
  384. kfree(psdev);
  385. psdev = NULL;
  386. }
  387. spin_lock_irqsave(&pcistub_devices_lock, flags);
  388. if (psdev)
  389. list_add_tail(&psdev->dev_list, &pcistub_devices);
  390. }
  391. initialize_devices = 1;
  392. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  393. return 0;
  394. }
  395. static void pcistub_device_id_add_list(struct pcistub_device_id *new,
  396. int domain, int bus, unsigned int devfn)
  397. {
  398. struct pcistub_device_id *pci_dev_id;
  399. unsigned long flags;
  400. int found = 0;
  401. spin_lock_irqsave(&device_ids_lock, flags);
  402. list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
  403. if (pci_dev_id->domain == domain && pci_dev_id->bus == bus &&
  404. pci_dev_id->devfn == devfn) {
  405. found = 1;
  406. break;
  407. }
  408. }
  409. if (!found) {
  410. new->domain = domain;
  411. new->bus = bus;
  412. new->devfn = devfn;
  413. list_add_tail(&new->slot_list, &pcistub_device_ids);
  414. }
  415. spin_unlock_irqrestore(&device_ids_lock, flags);
  416. if (found)
  417. kfree(new);
  418. }
  419. static int pcistub_seize(struct pci_dev *dev,
  420. struct pcistub_device_id *pci_dev_id)
  421. {
  422. struct pcistub_device *psdev;
  423. unsigned long flags;
  424. int err = 0;
  425. psdev = pcistub_device_alloc(dev);
  426. if (!psdev) {
  427. kfree(pci_dev_id);
  428. return -ENOMEM;
  429. }
  430. spin_lock_irqsave(&pcistub_devices_lock, flags);
  431. if (initialize_devices) {
  432. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  433. /* don't want irqs disabled when calling pcistub_init_device */
  434. err = pcistub_init_device(psdev->dev);
  435. spin_lock_irqsave(&pcistub_devices_lock, flags);
  436. if (!err)
  437. list_add(&psdev->dev_list, &pcistub_devices);
  438. } else {
  439. dev_dbg(&dev->dev, "deferring initialization\n");
  440. list_add(&psdev->dev_list, &seized_devices);
  441. }
  442. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  443. if (err) {
  444. kfree(pci_dev_id);
  445. pcistub_device_put(psdev);
  446. } else if (pci_dev_id)
  447. pcistub_device_id_add_list(pci_dev_id, pci_domain_nr(dev->bus),
  448. dev->bus->number, dev->devfn);
  449. return err;
  450. }
  451. /* Called when 'bind'. This means we must _NOT_ call pci_reset_function or
  452. * other functions that take the sysfs lock. */
  453. static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
  454. {
  455. int err = 0, match;
  456. struct pcistub_device_id *pci_dev_id = NULL;
  457. dev_dbg(&dev->dev, "probing...\n");
  458. match = pcistub_match(dev);
  459. if ((dev->driver_override &&
  460. !strcmp(dev->driver_override, PCISTUB_DRIVER_NAME)) ||
  461. match) {
  462. if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
  463. && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
  464. dev_err(&dev->dev, "can't export pci devices that "
  465. "don't have a normal (0) or bridge (1) "
  466. "header type!\n");
  467. err = -ENODEV;
  468. goto out;
  469. }
  470. if (!match) {
  471. pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
  472. if (!pci_dev_id) {
  473. err = -ENOMEM;
  474. goto out;
  475. }
  476. }
  477. dev_info(&dev->dev, "seizing device\n");
  478. err = pcistub_seize(dev, pci_dev_id);
  479. } else
  480. /* Didn't find the device */
  481. err = -ENODEV;
  482. out:
  483. return err;
  484. }
  485. /* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or
  486. * other functions that take the sysfs lock. */
  487. static void pcistub_remove(struct pci_dev *dev)
  488. {
  489. struct pcistub_device *psdev, *found_psdev = NULL;
  490. unsigned long flags;
  491. dev_dbg(&dev->dev, "removing\n");
  492. spin_lock_irqsave(&pcistub_devices_lock, flags);
  493. xen_pcibk_config_quirk_release(dev);
  494. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  495. if (psdev->dev == dev) {
  496. found_psdev = psdev;
  497. break;
  498. }
  499. }
  500. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  501. if (found_psdev) {
  502. dev_dbg(&dev->dev, "found device to remove %s\n",
  503. found_psdev->pdev ? "- in-use" : "");
  504. if (found_psdev->pdev) {
  505. int domid = xen_find_device_domain_owner(dev);
  506. pr_warn("****** removing device %s while still in-use by domain %d! ******\n",
  507. pci_name(found_psdev->dev), domid);
  508. pr_warn("****** driver domain may still access this device's i/o resources!\n");
  509. pr_warn("****** shutdown driver domain before binding device\n");
  510. pr_warn("****** to other drivers or domains\n");
  511. /* N.B. This ends up calling pcistub_put_pci_dev which ends up
  512. * doing the FLR. */
  513. xen_pcibk_release_pci_dev(found_psdev->pdev,
  514. found_psdev->dev,
  515. false /* caller holds the lock. */);
  516. }
  517. spin_lock_irqsave(&pcistub_devices_lock, flags);
  518. list_del(&found_psdev->dev_list);
  519. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  520. /* the final put for releasing from the list */
  521. pcistub_device_put(found_psdev);
  522. }
  523. }
  524. static const struct pci_device_id pcistub_ids[] = {
  525. {
  526. .vendor = PCI_ANY_ID,
  527. .device = PCI_ANY_ID,
  528. .subvendor = PCI_ANY_ID,
  529. .subdevice = PCI_ANY_ID,
  530. },
  531. {0,},
  532. };
  533. #define PCI_NODENAME_MAX 40
  534. static void kill_domain_by_device(struct pcistub_device *psdev)
  535. {
  536. struct xenbus_transaction xbt;
  537. int err;
  538. char nodename[PCI_NODENAME_MAX];
  539. BUG_ON(!psdev);
  540. snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
  541. psdev->pdev->xdev->otherend_id);
  542. again:
  543. err = xenbus_transaction_start(&xbt);
  544. if (err) {
  545. dev_err(&psdev->dev->dev,
  546. "error %d when start xenbus transaction\n", err);
  547. return;
  548. }
  549. /*PV AER handlers will set this flag*/
  550. xenbus_printf(xbt, nodename, "aerState" , "aerfail");
  551. err = xenbus_transaction_end(xbt, 0);
  552. if (err) {
  553. if (err == -EAGAIN)
  554. goto again;
  555. dev_err(&psdev->dev->dev,
  556. "error %d when end xenbus transaction\n", err);
  557. return;
  558. }
  559. }
  560. /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
  561. * backend need to have cooperation. In xen_pcibk, those steps will do similar
  562. * jobs: send service request and waiting for front_end response.
  563. */
  564. static pci_ers_result_t common_process(struct pcistub_device *psdev,
  565. pci_channel_state_t state, int aer_cmd,
  566. pci_ers_result_t result)
  567. {
  568. pci_ers_result_t res = result;
  569. struct xen_pcie_aer_op *aer_op;
  570. struct xen_pcibk_device *pdev = psdev->pdev;
  571. struct xen_pci_sharedinfo *sh_info = pdev->sh_info;
  572. int ret;
  573. /*with PV AER drivers*/
  574. aer_op = &(sh_info->aer_op);
  575. aer_op->cmd = aer_cmd ;
  576. /*useful for error_detected callback*/
  577. aer_op->err = state;
  578. /*pcifront_end BDF*/
  579. ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
  580. &aer_op->domain, &aer_op->bus, &aer_op->devfn);
  581. if (!ret) {
  582. dev_err(&psdev->dev->dev,
  583. DRV_NAME ": failed to get pcifront device\n");
  584. return PCI_ERS_RESULT_NONE;
  585. }
  586. wmb();
  587. dev_dbg(&psdev->dev->dev,
  588. DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
  589. aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
  590. /*local flag to mark there's aer request, xen_pcibk callback will use
  591. * this flag to judge whether we need to check pci-front give aer
  592. * service ack signal
  593. */
  594. set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
  595. /*It is possible that a pcifront conf_read_write ops request invokes
  596. * the callback which cause the spurious execution of wake_up.
  597. * Yet it is harmless and better than a spinlock here
  598. */
  599. set_bit(_XEN_PCIB_active,
  600. (unsigned long *)&sh_info->flags);
  601. wmb();
  602. notify_remote_via_irq(pdev->evtchn_irq);
  603. ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
  604. !(test_bit(_XEN_PCIB_active, (unsigned long *)
  605. &sh_info->flags)), 300*HZ);
  606. if (!ret) {
  607. if (test_bit(_XEN_PCIB_active,
  608. (unsigned long *)&sh_info->flags)) {
  609. dev_err(&psdev->dev->dev,
  610. "pcifront aer process not responding!\n");
  611. clear_bit(_XEN_PCIB_active,
  612. (unsigned long *)&sh_info->flags);
  613. aer_op->err = PCI_ERS_RESULT_NONE;
  614. return res;
  615. }
  616. }
  617. clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
  618. if (test_bit(_XEN_PCIF_active,
  619. (unsigned long *)&sh_info->flags)) {
  620. dev_dbg(&psdev->dev->dev,
  621. "schedule pci_conf service in " DRV_NAME "\n");
  622. xen_pcibk_test_and_schedule_op(psdev->pdev);
  623. }
  624. res = (pci_ers_result_t)aer_op->err;
  625. return res;
  626. }
  627. /*
  628. * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
  629. * of the device driver could provide this service, and then wait for pcifront
  630. * ack.
  631. * @dev: pointer to PCI devices
  632. * return value is used by aer_core do_recovery policy
  633. */
  634. static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
  635. {
  636. struct pcistub_device *psdev;
  637. pci_ers_result_t result;
  638. result = PCI_ERS_RESULT_RECOVERED;
  639. dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
  640. dev->bus->number, dev->devfn);
  641. down_write(&pcistub_sem);
  642. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  643. dev->bus->number,
  644. PCI_SLOT(dev->devfn),
  645. PCI_FUNC(dev->devfn));
  646. if (!psdev || !psdev->pdev) {
  647. dev_err(&dev->dev,
  648. DRV_NAME " device is not found/assigned\n");
  649. goto end;
  650. }
  651. if (!psdev->pdev->sh_info) {
  652. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  653. " by HVM, kill it\n");
  654. kill_domain_by_device(psdev);
  655. goto end;
  656. }
  657. if (!test_bit(_XEN_PCIB_AERHANDLER,
  658. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  659. dev_err(&dev->dev,
  660. "guest with no AER driver should have been killed\n");
  661. goto end;
  662. }
  663. result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
  664. if (result == PCI_ERS_RESULT_NONE ||
  665. result == PCI_ERS_RESULT_DISCONNECT) {
  666. dev_dbg(&dev->dev,
  667. "No AER slot_reset service or disconnected!\n");
  668. kill_domain_by_device(psdev);
  669. }
  670. end:
  671. if (psdev)
  672. pcistub_device_put(psdev);
  673. up_write(&pcistub_sem);
  674. return result;
  675. }
  676. /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
  677. * in case of the device driver could provide this service, and then wait
  678. * for pcifront ack
  679. * @dev: pointer to PCI devices
  680. * return value is used by aer_core do_recovery policy
  681. */
  682. static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
  683. {
  684. struct pcistub_device *psdev;
  685. pci_ers_result_t result;
  686. result = PCI_ERS_RESULT_RECOVERED;
  687. dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
  688. dev->bus->number, dev->devfn);
  689. down_write(&pcistub_sem);
  690. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  691. dev->bus->number,
  692. PCI_SLOT(dev->devfn),
  693. PCI_FUNC(dev->devfn));
  694. if (!psdev || !psdev->pdev) {
  695. dev_err(&dev->dev,
  696. DRV_NAME " device is not found/assigned\n");
  697. goto end;
  698. }
  699. if (!psdev->pdev->sh_info) {
  700. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  701. " by HVM, kill it\n");
  702. kill_domain_by_device(psdev);
  703. goto end;
  704. }
  705. if (!test_bit(_XEN_PCIB_AERHANDLER,
  706. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  707. dev_err(&dev->dev,
  708. "guest with no AER driver should have been killed\n");
  709. goto end;
  710. }
  711. result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
  712. if (result == PCI_ERS_RESULT_NONE ||
  713. result == PCI_ERS_RESULT_DISCONNECT) {
  714. dev_dbg(&dev->dev,
  715. "No AER mmio_enabled service or disconnected!\n");
  716. kill_domain_by_device(psdev);
  717. }
  718. end:
  719. if (psdev)
  720. pcistub_device_put(psdev);
  721. up_write(&pcistub_sem);
  722. return result;
  723. }
  724. /*xen_pcibk_error_detected: it will send the error_detected request to pcifront
  725. * in case of the device driver could provide this service, and then wait
  726. * for pcifront ack.
  727. * @dev: pointer to PCI devices
  728. * @error: the current PCI connection state
  729. * return value is used by aer_core do_recovery policy
  730. */
  731. static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
  732. pci_channel_state_t error)
  733. {
  734. struct pcistub_device *psdev;
  735. pci_ers_result_t result;
  736. result = PCI_ERS_RESULT_CAN_RECOVER;
  737. dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
  738. dev->bus->number, dev->devfn);
  739. down_write(&pcistub_sem);
  740. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  741. dev->bus->number,
  742. PCI_SLOT(dev->devfn),
  743. PCI_FUNC(dev->devfn));
  744. if (!psdev || !psdev->pdev) {
  745. dev_err(&dev->dev,
  746. DRV_NAME " device is not found/assigned\n");
  747. goto end;
  748. }
  749. if (!psdev->pdev->sh_info) {
  750. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  751. " by HVM, kill it\n");
  752. kill_domain_by_device(psdev);
  753. goto end;
  754. }
  755. /*Guest owns the device yet no aer handler regiested, kill guest*/
  756. if (!test_bit(_XEN_PCIB_AERHANDLER,
  757. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  758. dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
  759. kill_domain_by_device(psdev);
  760. goto end;
  761. }
  762. result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
  763. if (result == PCI_ERS_RESULT_NONE ||
  764. result == PCI_ERS_RESULT_DISCONNECT) {
  765. dev_dbg(&dev->dev,
  766. "No AER error_detected service or disconnected!\n");
  767. kill_domain_by_device(psdev);
  768. }
  769. end:
  770. if (psdev)
  771. pcistub_device_put(psdev);
  772. up_write(&pcistub_sem);
  773. return result;
  774. }
  775. /*xen_pcibk_error_resume: it will send the error_resume request to pcifront
  776. * in case of the device driver could provide this service, and then wait
  777. * for pcifront ack.
  778. * @dev: pointer to PCI devices
  779. */
  780. static void xen_pcibk_error_resume(struct pci_dev *dev)
  781. {
  782. struct pcistub_device *psdev;
  783. dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
  784. dev->bus->number, dev->devfn);
  785. down_write(&pcistub_sem);
  786. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  787. dev->bus->number,
  788. PCI_SLOT(dev->devfn),
  789. PCI_FUNC(dev->devfn));
  790. if (!psdev || !psdev->pdev) {
  791. dev_err(&dev->dev,
  792. DRV_NAME " device is not found/assigned\n");
  793. goto end;
  794. }
  795. if (!psdev->pdev->sh_info) {
  796. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  797. " by HVM, kill it\n");
  798. kill_domain_by_device(psdev);
  799. goto end;
  800. }
  801. if (!test_bit(_XEN_PCIB_AERHANDLER,
  802. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  803. dev_err(&dev->dev,
  804. "guest with no AER driver should have been killed\n");
  805. kill_domain_by_device(psdev);
  806. goto end;
  807. }
  808. common_process(psdev, 1, XEN_PCI_OP_aer_resume,
  809. PCI_ERS_RESULT_RECOVERED);
  810. end:
  811. if (psdev)
  812. pcistub_device_put(psdev);
  813. up_write(&pcistub_sem);
  814. return;
  815. }
  816. /*add xen_pcibk AER handling*/
  817. static const struct pci_error_handlers xen_pcibk_error_handler = {
  818. .error_detected = xen_pcibk_error_detected,
  819. .mmio_enabled = xen_pcibk_mmio_enabled,
  820. .slot_reset = xen_pcibk_slot_reset,
  821. .resume = xen_pcibk_error_resume,
  822. };
  823. /*
  824. * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
  825. * for a normal device. I don't want it to be loaded automatically.
  826. */
  827. static struct pci_driver xen_pcibk_pci_driver = {
  828. /* The name should be xen_pciback, but until the tools are updated
  829. * we will keep it as pciback. */
  830. .name = PCISTUB_DRIVER_NAME,
  831. .id_table = pcistub_ids,
  832. .probe = pcistub_probe,
  833. .remove = pcistub_remove,
  834. .err_handler = &xen_pcibk_error_handler,
  835. };
  836. static inline int str_to_slot(const char *buf, int *domain, int *bus,
  837. int *slot, int *func)
  838. {
  839. int parsed = 0;
  840. switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
  841. &parsed)) {
  842. case 3:
  843. *func = -1;
  844. sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
  845. break;
  846. case 2:
  847. *slot = *func = -1;
  848. sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
  849. break;
  850. }
  851. if (parsed && !buf[parsed])
  852. return 0;
  853. /* try again without domain */
  854. *domain = 0;
  855. switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
  856. case 2:
  857. *func = -1;
  858. sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
  859. break;
  860. case 1:
  861. *slot = *func = -1;
  862. sscanf(buf, " %x:*.* %n", bus, &parsed);
  863. break;
  864. }
  865. if (parsed && !buf[parsed])
  866. return 0;
  867. return -EINVAL;
  868. }
  869. static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
  870. *slot, int *func, int *reg, int *size, int *mask)
  871. {
  872. int parsed = 0;
  873. sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
  874. reg, size, mask, &parsed);
  875. if (parsed && !buf[parsed])
  876. return 0;
  877. /* try again without domain */
  878. *domain = 0;
  879. sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
  880. mask, &parsed);
  881. if (parsed && !buf[parsed])
  882. return 0;
  883. return -EINVAL;
  884. }
  885. static int pcistub_device_id_add(int domain, int bus, int slot, int func)
  886. {
  887. struct pcistub_device_id *pci_dev_id;
  888. int rc = 0, devfn = PCI_DEVFN(slot, func);
  889. if (slot < 0) {
  890. for (slot = 0; !rc && slot < 32; ++slot)
  891. rc = pcistub_device_id_add(domain, bus, slot, func);
  892. return rc;
  893. }
  894. if (func < 0) {
  895. for (func = 0; !rc && func < 8; ++func)
  896. rc = pcistub_device_id_add(domain, bus, slot, func);
  897. return rc;
  898. }
  899. if ((
  900. #if !defined(MODULE) /* pci_domains_supported is not being exported */ \
  901. || !defined(CONFIG_PCI_DOMAINS)
  902. !pci_domains_supported ? domain :
  903. #endif
  904. domain < 0 || domain > 0xffff)
  905. || bus < 0 || bus > 0xff
  906. || PCI_SLOT(devfn) != slot
  907. || PCI_FUNC(devfn) != func)
  908. return -EINVAL;
  909. pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
  910. if (!pci_dev_id)
  911. return -ENOMEM;
  912. pr_debug("wants to seize %04x:%02x:%02x.%d\n",
  913. domain, bus, slot, func);
  914. pcistub_device_id_add_list(pci_dev_id, domain, bus, devfn);
  915. return 0;
  916. }
  917. static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
  918. {
  919. struct pcistub_device_id *pci_dev_id, *t;
  920. int err = -ENOENT;
  921. unsigned long flags;
  922. spin_lock_irqsave(&device_ids_lock, flags);
  923. list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
  924. slot_list) {
  925. if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
  926. && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
  927. && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
  928. /* Don't break; here because it's possible the same
  929. * slot could be in the list more than once
  930. */
  931. list_del(&pci_dev_id->slot_list);
  932. kfree(pci_dev_id);
  933. err = 0;
  934. pr_debug("removed %04x:%02x:%02x.%d from seize list\n",
  935. domain, bus, slot, func);
  936. }
  937. }
  938. spin_unlock_irqrestore(&device_ids_lock, flags);
  939. return err;
  940. }
  941. static int pcistub_reg_add(int domain, int bus, int slot, int func,
  942. unsigned int reg, unsigned int size,
  943. unsigned int mask)
  944. {
  945. int err = 0;
  946. struct pcistub_device *psdev;
  947. struct pci_dev *dev;
  948. struct config_field *field;
  949. if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
  950. return -EINVAL;
  951. psdev = pcistub_device_find(domain, bus, slot, func);
  952. if (!psdev) {
  953. err = -ENODEV;
  954. goto out;
  955. }
  956. dev = psdev->dev;
  957. field = kzalloc(sizeof(*field), GFP_KERNEL);
  958. if (!field) {
  959. err = -ENOMEM;
  960. goto out;
  961. }
  962. field->offset = reg;
  963. field->size = size;
  964. field->mask = mask;
  965. field->init = NULL;
  966. field->reset = NULL;
  967. field->release = NULL;
  968. field->clean = xen_pcibk_config_field_free;
  969. err = xen_pcibk_config_quirks_add_field(dev, field);
  970. if (err)
  971. kfree(field);
  972. out:
  973. if (psdev)
  974. pcistub_device_put(psdev);
  975. return err;
  976. }
  977. static ssize_t new_slot_store(struct device_driver *drv, const char *buf,
  978. size_t count)
  979. {
  980. int domain, bus, slot, func;
  981. int err;
  982. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  983. if (err)
  984. goto out;
  985. err = pcistub_device_id_add(domain, bus, slot, func);
  986. out:
  987. if (!err)
  988. err = count;
  989. return err;
  990. }
  991. static DRIVER_ATTR_WO(new_slot);
  992. static ssize_t remove_slot_store(struct device_driver *drv, const char *buf,
  993. size_t count)
  994. {
  995. int domain, bus, slot, func;
  996. int err;
  997. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  998. if (err)
  999. goto out;
  1000. err = pcistub_device_id_remove(domain, bus, slot, func);
  1001. out:
  1002. if (!err)
  1003. err = count;
  1004. return err;
  1005. }
  1006. static DRIVER_ATTR_WO(remove_slot);
  1007. static ssize_t slots_show(struct device_driver *drv, char *buf)
  1008. {
  1009. struct pcistub_device_id *pci_dev_id;
  1010. size_t count = 0;
  1011. unsigned long flags;
  1012. spin_lock_irqsave(&device_ids_lock, flags);
  1013. list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
  1014. if (count >= PAGE_SIZE)
  1015. break;
  1016. count += scnprintf(buf + count, PAGE_SIZE - count,
  1017. "%04x:%02x:%02x.%d\n",
  1018. pci_dev_id->domain, pci_dev_id->bus,
  1019. PCI_SLOT(pci_dev_id->devfn),
  1020. PCI_FUNC(pci_dev_id->devfn));
  1021. }
  1022. spin_unlock_irqrestore(&device_ids_lock, flags);
  1023. return count;
  1024. }
  1025. static DRIVER_ATTR_RO(slots);
  1026. static ssize_t irq_handlers_show(struct device_driver *drv, char *buf)
  1027. {
  1028. struct pcistub_device *psdev;
  1029. struct xen_pcibk_dev_data *dev_data;
  1030. size_t count = 0;
  1031. unsigned long flags;
  1032. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1033. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1034. if (count >= PAGE_SIZE)
  1035. break;
  1036. if (!psdev->dev)
  1037. continue;
  1038. dev_data = pci_get_drvdata(psdev->dev);
  1039. if (!dev_data)
  1040. continue;
  1041. count +=
  1042. scnprintf(buf + count, PAGE_SIZE - count,
  1043. "%s:%s:%sing:%ld\n",
  1044. pci_name(psdev->dev),
  1045. dev_data->isr_on ? "on" : "off",
  1046. dev_data->ack_intr ? "ack" : "not ack",
  1047. dev_data->handled);
  1048. }
  1049. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1050. return count;
  1051. }
  1052. static DRIVER_ATTR_RO(irq_handlers);
  1053. static ssize_t irq_handler_state_store(struct device_driver *drv,
  1054. const char *buf, size_t count)
  1055. {
  1056. struct pcistub_device *psdev;
  1057. struct xen_pcibk_dev_data *dev_data;
  1058. int domain, bus, slot, func;
  1059. int err;
  1060. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1061. if (err)
  1062. return err;
  1063. psdev = pcistub_device_find(domain, bus, slot, func);
  1064. if (!psdev) {
  1065. err = -ENOENT;
  1066. goto out;
  1067. }
  1068. dev_data = pci_get_drvdata(psdev->dev);
  1069. if (!dev_data) {
  1070. err = -ENOENT;
  1071. goto out;
  1072. }
  1073. dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
  1074. dev_data->irq_name, dev_data->isr_on,
  1075. !dev_data->isr_on);
  1076. dev_data->isr_on = !(dev_data->isr_on);
  1077. if (dev_data->isr_on)
  1078. dev_data->ack_intr = 1;
  1079. out:
  1080. if (psdev)
  1081. pcistub_device_put(psdev);
  1082. if (!err)
  1083. err = count;
  1084. return err;
  1085. }
  1086. static DRIVER_ATTR_WO(irq_handler_state);
  1087. static ssize_t quirks_store(struct device_driver *drv, const char *buf,
  1088. size_t count)
  1089. {
  1090. int domain, bus, slot, func, reg, size, mask;
  1091. int err;
  1092. err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
  1093. &mask);
  1094. if (err)
  1095. goto out;
  1096. err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
  1097. out:
  1098. if (!err)
  1099. err = count;
  1100. return err;
  1101. }
  1102. static ssize_t quirks_show(struct device_driver *drv, char *buf)
  1103. {
  1104. int count = 0;
  1105. unsigned long flags;
  1106. struct xen_pcibk_config_quirk *quirk;
  1107. struct xen_pcibk_dev_data *dev_data;
  1108. const struct config_field *field;
  1109. const struct config_field_entry *cfg_entry;
  1110. spin_lock_irqsave(&device_ids_lock, flags);
  1111. list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
  1112. if (count >= PAGE_SIZE)
  1113. goto out;
  1114. count += scnprintf(buf + count, PAGE_SIZE - count,
  1115. "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
  1116. quirk->pdev->bus->number,
  1117. PCI_SLOT(quirk->pdev->devfn),
  1118. PCI_FUNC(quirk->pdev->devfn),
  1119. quirk->devid.vendor, quirk->devid.device,
  1120. quirk->devid.subvendor,
  1121. quirk->devid.subdevice);
  1122. dev_data = pci_get_drvdata(quirk->pdev);
  1123. list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
  1124. field = cfg_entry->field;
  1125. if (count >= PAGE_SIZE)
  1126. goto out;
  1127. count += scnprintf(buf + count, PAGE_SIZE - count,
  1128. "\t\t%08x:%01x:%08x\n",
  1129. cfg_entry->base_offset +
  1130. field->offset, field->size,
  1131. field->mask);
  1132. }
  1133. }
  1134. out:
  1135. spin_unlock_irqrestore(&device_ids_lock, flags);
  1136. return count;
  1137. }
  1138. static DRIVER_ATTR_RW(quirks);
  1139. static ssize_t permissive_store(struct device_driver *drv, const char *buf,
  1140. size_t count)
  1141. {
  1142. int domain, bus, slot, func;
  1143. int err;
  1144. struct pcistub_device *psdev;
  1145. struct xen_pcibk_dev_data *dev_data;
  1146. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1147. if (err)
  1148. goto out;
  1149. psdev = pcistub_device_find(domain, bus, slot, func);
  1150. if (!psdev) {
  1151. err = -ENODEV;
  1152. goto out;
  1153. }
  1154. dev_data = pci_get_drvdata(psdev->dev);
  1155. /* the driver data for a device should never be null at this point */
  1156. if (!dev_data) {
  1157. err = -ENXIO;
  1158. goto release;
  1159. }
  1160. if (!dev_data->permissive) {
  1161. dev_data->permissive = 1;
  1162. /* Let user know that what they're doing could be unsafe */
  1163. dev_warn(&psdev->dev->dev, "enabling permissive mode "
  1164. "configuration space accesses!\n");
  1165. dev_warn(&psdev->dev->dev,
  1166. "permissive mode is potentially unsafe!\n");
  1167. }
  1168. release:
  1169. pcistub_device_put(psdev);
  1170. out:
  1171. if (!err)
  1172. err = count;
  1173. return err;
  1174. }
  1175. static ssize_t permissive_show(struct device_driver *drv, char *buf)
  1176. {
  1177. struct pcistub_device *psdev;
  1178. struct xen_pcibk_dev_data *dev_data;
  1179. size_t count = 0;
  1180. unsigned long flags;
  1181. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1182. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1183. if (count >= PAGE_SIZE)
  1184. break;
  1185. if (!psdev->dev)
  1186. continue;
  1187. dev_data = pci_get_drvdata(psdev->dev);
  1188. if (!dev_data || !dev_data->permissive)
  1189. continue;
  1190. count +=
  1191. scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
  1192. pci_name(psdev->dev));
  1193. }
  1194. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1195. return count;
  1196. }
  1197. static DRIVER_ATTR_RW(permissive);
  1198. static void pcistub_exit(void)
  1199. {
  1200. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
  1201. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1202. &driver_attr_remove_slot);
  1203. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
  1204. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
  1205. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1206. &driver_attr_permissive);
  1207. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1208. &driver_attr_irq_handlers);
  1209. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1210. &driver_attr_irq_handler_state);
  1211. pci_unregister_driver(&xen_pcibk_pci_driver);
  1212. }
  1213. static int __init pcistub_init(void)
  1214. {
  1215. int pos = 0;
  1216. int err = 0;
  1217. int domain, bus, slot, func;
  1218. int parsed;
  1219. if (pci_devs_to_hide && *pci_devs_to_hide) {
  1220. do {
  1221. parsed = 0;
  1222. err = sscanf(pci_devs_to_hide + pos,
  1223. " (%x:%x:%x.%x) %n",
  1224. &domain, &bus, &slot, &func, &parsed);
  1225. switch (err) {
  1226. case 3:
  1227. func = -1;
  1228. sscanf(pci_devs_to_hide + pos,
  1229. " (%x:%x:%x.*) %n",
  1230. &domain, &bus, &slot, &parsed);
  1231. break;
  1232. case 2:
  1233. slot = func = -1;
  1234. sscanf(pci_devs_to_hide + pos,
  1235. " (%x:%x:*.*) %n",
  1236. &domain, &bus, &parsed);
  1237. break;
  1238. }
  1239. if (!parsed) {
  1240. domain = 0;
  1241. err = sscanf(pci_devs_to_hide + pos,
  1242. " (%x:%x.%x) %n",
  1243. &bus, &slot, &func, &parsed);
  1244. switch (err) {
  1245. case 2:
  1246. func = -1;
  1247. sscanf(pci_devs_to_hide + pos,
  1248. " (%x:%x.*) %n",
  1249. &bus, &slot, &parsed);
  1250. break;
  1251. case 1:
  1252. slot = func = -1;
  1253. sscanf(pci_devs_to_hide + pos,
  1254. " (%x:*.*) %n",
  1255. &bus, &parsed);
  1256. break;
  1257. }
  1258. }
  1259. if (parsed <= 0)
  1260. goto parse_error;
  1261. err = pcistub_device_id_add(domain, bus, slot, func);
  1262. if (err)
  1263. goto out;
  1264. pos += parsed;
  1265. } while (pci_devs_to_hide[pos]);
  1266. }
  1267. /* If we're the first PCI Device Driver to register, we're the
  1268. * first one to get offered PCI devices as they become
  1269. * available (and thus we can be the first to grab them)
  1270. */
  1271. err = pci_register_driver(&xen_pcibk_pci_driver);
  1272. if (err < 0)
  1273. goto out;
  1274. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1275. &driver_attr_new_slot);
  1276. if (!err)
  1277. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1278. &driver_attr_remove_slot);
  1279. if (!err)
  1280. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1281. &driver_attr_slots);
  1282. if (!err)
  1283. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1284. &driver_attr_quirks);
  1285. if (!err)
  1286. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1287. &driver_attr_permissive);
  1288. if (!err)
  1289. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1290. &driver_attr_irq_handlers);
  1291. if (!err)
  1292. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1293. &driver_attr_irq_handler_state);
  1294. if (err)
  1295. pcistub_exit();
  1296. out:
  1297. return err;
  1298. parse_error:
  1299. pr_err("Error parsing pci_devs_to_hide at \"%s\"\n",
  1300. pci_devs_to_hide + pos);
  1301. return -EINVAL;
  1302. }
  1303. #ifndef MODULE
  1304. /*
  1305. * fs_initcall happens before device_initcall
  1306. * so xen_pcibk *should* get called first (b/c we
  1307. * want to suck up any device before other drivers
  1308. * get a chance by being the first pci device
  1309. * driver to register)
  1310. */
  1311. fs_initcall(pcistub_init);
  1312. #endif
  1313. #ifdef CONFIG_PCI_IOV
  1314. static struct pcistub_device *find_vfs(const struct pci_dev *pdev)
  1315. {
  1316. struct pcistub_device *psdev = NULL;
  1317. unsigned long flags;
  1318. bool found = false;
  1319. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1320. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1321. if (!psdev->pdev && psdev->dev != pdev
  1322. && pci_physfn(psdev->dev) == pdev) {
  1323. found = true;
  1324. break;
  1325. }
  1326. }
  1327. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1328. if (found)
  1329. return psdev;
  1330. return NULL;
  1331. }
  1332. static int pci_stub_notifier(struct notifier_block *nb,
  1333. unsigned long action, void *data)
  1334. {
  1335. struct device *dev = data;
  1336. const struct pci_dev *pdev = to_pci_dev(dev);
  1337. if (action != BUS_NOTIFY_UNBIND_DRIVER)
  1338. return NOTIFY_DONE;
  1339. if (!pdev->is_physfn)
  1340. return NOTIFY_DONE;
  1341. for (;;) {
  1342. struct pcistub_device *psdev = find_vfs(pdev);
  1343. if (!psdev)
  1344. break;
  1345. device_release_driver(&psdev->dev->dev);
  1346. }
  1347. return NOTIFY_DONE;
  1348. }
  1349. static struct notifier_block pci_stub_nb = {
  1350. .notifier_call = pci_stub_notifier,
  1351. };
  1352. #endif
  1353. static int __init xen_pcibk_init(void)
  1354. {
  1355. int err;
  1356. if (!xen_initial_domain())
  1357. return -ENODEV;
  1358. err = xen_pcibk_config_init();
  1359. if (err)
  1360. return err;
  1361. #ifdef MODULE
  1362. err = pcistub_init();
  1363. if (err < 0)
  1364. return err;
  1365. #endif
  1366. pcistub_init_devices_late();
  1367. err = xen_pcibk_xenbus_register();
  1368. if (err)
  1369. pcistub_exit();
  1370. #ifdef CONFIG_PCI_IOV
  1371. else
  1372. bus_register_notifier(&pci_bus_type, &pci_stub_nb);
  1373. #endif
  1374. return err;
  1375. }
  1376. static void __exit xen_pcibk_cleanup(void)
  1377. {
  1378. #ifdef CONFIG_PCI_IOV
  1379. bus_unregister_notifier(&pci_bus_type, &pci_stub_nb);
  1380. #endif
  1381. xen_pcibk_xenbus_unregister();
  1382. pcistub_exit();
  1383. }
  1384. module_init(xen_pcibk_init);
  1385. module_exit(xen_pcibk_cleanup);
  1386. MODULE_LICENSE("Dual BSD/GPL");
  1387. MODULE_ALIAS("xen-backend:pci");