pci_endpoint_test.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /**
  2. * Host side test driver to test endpoint functionality
  3. *
  4. * Copyright (C) 2017 Texas Instruments
  5. * Author: Kishon Vijay Abraham I <kishon@ti.com>
  6. *
  7. * This program is free software: you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 of
  9. * the License as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/crc32.h>
  20. #include <linux/delay.h>
  21. #include <linux/fs.h>
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/irq.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/module.h>
  27. #include <linux/mutex.h>
  28. #include <linux/random.h>
  29. #include <linux/slab.h>
  30. #include <linux/pci.h>
  31. #include <linux/pci_ids.h>
  32. #include <linux/pci_regs.h>
  33. #include <uapi/linux/pcitest.h>
  34. #define DRV_MODULE_NAME "pci-endpoint-test"
  35. #define IRQ_TYPE_UNDEFINED -1
  36. #define IRQ_TYPE_LEGACY 0
  37. #define IRQ_TYPE_MSI 1
  38. #define IRQ_TYPE_MSIX 2
  39. #define PCI_ENDPOINT_TEST_MAGIC 0x0
  40. #define PCI_ENDPOINT_TEST_COMMAND 0x4
  41. #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
  42. #define COMMAND_RAISE_MSI_IRQ BIT(1)
  43. #define COMMAND_RAISE_MSIX_IRQ BIT(2)
  44. #define COMMAND_READ BIT(3)
  45. #define COMMAND_WRITE BIT(4)
  46. #define COMMAND_COPY BIT(5)
  47. #define PCI_ENDPOINT_TEST_STATUS 0x8
  48. #define STATUS_READ_SUCCESS BIT(0)
  49. #define STATUS_READ_FAIL BIT(1)
  50. #define STATUS_WRITE_SUCCESS BIT(2)
  51. #define STATUS_WRITE_FAIL BIT(3)
  52. #define STATUS_COPY_SUCCESS BIT(4)
  53. #define STATUS_COPY_FAIL BIT(5)
  54. #define STATUS_IRQ_RAISED BIT(6)
  55. #define STATUS_SRC_ADDR_INVALID BIT(7)
  56. #define STATUS_DST_ADDR_INVALID BIT(8)
  57. #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
  58. #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
  59. #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
  60. #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
  61. #define PCI_ENDPOINT_TEST_SIZE 0x1c
  62. #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
  63. #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
  64. #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
  65. static DEFINE_IDA(pci_endpoint_test_ida);
  66. #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
  67. miscdev)
  68. static bool no_msi;
  69. module_param(no_msi, bool, 0444);
  70. MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
  71. static int irq_type = IRQ_TYPE_MSI;
  72. module_param(irq_type, int, 0444);
  73. MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
  74. enum pci_barno {
  75. BAR_0,
  76. BAR_1,
  77. BAR_2,
  78. BAR_3,
  79. BAR_4,
  80. BAR_5,
  81. };
  82. struct pci_endpoint_test {
  83. struct pci_dev *pdev;
  84. void __iomem *base;
  85. void __iomem *bar[6];
  86. struct completion irq_raised;
  87. int last_irq;
  88. int num_irqs;
  89. int irq_type;
  90. /* mutex to protect the ioctls */
  91. struct mutex mutex;
  92. struct miscdevice miscdev;
  93. enum pci_barno test_reg_bar;
  94. size_t alignment;
  95. };
  96. struct pci_endpoint_test_data {
  97. enum pci_barno test_reg_bar;
  98. size_t alignment;
  99. int irq_type;
  100. };
  101. static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
  102. u32 offset)
  103. {
  104. return readl(test->base + offset);
  105. }
  106. static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
  107. u32 offset, u32 value)
  108. {
  109. writel(value, test->base + offset);
  110. }
  111. static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
  112. int bar, int offset)
  113. {
  114. return readl(test->bar[bar] + offset);
  115. }
  116. static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
  117. int bar, u32 offset, u32 value)
  118. {
  119. writel(value, test->bar[bar] + offset);
  120. }
  121. static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
  122. {
  123. struct pci_endpoint_test *test = dev_id;
  124. u32 reg;
  125. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  126. if (reg & STATUS_IRQ_RAISED) {
  127. test->last_irq = irq;
  128. complete(&test->irq_raised);
  129. reg &= ~STATUS_IRQ_RAISED;
  130. }
  131. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
  132. reg);
  133. return IRQ_HANDLED;
  134. }
  135. static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
  136. {
  137. struct pci_dev *pdev = test->pdev;
  138. pci_free_irq_vectors(pdev);
  139. test->irq_type = IRQ_TYPE_UNDEFINED;
  140. }
  141. static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
  142. int type)
  143. {
  144. int irq = -1;
  145. struct pci_dev *pdev = test->pdev;
  146. struct device *dev = &pdev->dev;
  147. bool res = true;
  148. switch (type) {
  149. case IRQ_TYPE_LEGACY:
  150. irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
  151. if (irq < 0)
  152. dev_err(dev, "Failed to get Legacy interrupt\n");
  153. break;
  154. case IRQ_TYPE_MSI:
  155. irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
  156. if (irq < 0)
  157. dev_err(dev, "Failed to get MSI interrupts\n");
  158. break;
  159. case IRQ_TYPE_MSIX:
  160. irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
  161. if (irq < 0)
  162. dev_err(dev, "Failed to get MSI-X interrupts\n");
  163. break;
  164. default:
  165. dev_err(dev, "Invalid IRQ type selected\n");
  166. }
  167. if (irq < 0) {
  168. irq = 0;
  169. res = false;
  170. }
  171. test->irq_type = type;
  172. test->num_irqs = irq;
  173. return res;
  174. }
  175. static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
  176. {
  177. int i;
  178. struct pci_dev *pdev = test->pdev;
  179. struct device *dev = &pdev->dev;
  180. for (i = 0; i < test->num_irqs; i++)
  181. devm_free_irq(dev, pci_irq_vector(pdev, i), test);
  182. test->num_irqs = 0;
  183. }
  184. static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
  185. {
  186. int i;
  187. int err;
  188. struct pci_dev *pdev = test->pdev;
  189. struct device *dev = &pdev->dev;
  190. for (i = 0; i < test->num_irqs; i++) {
  191. err = devm_request_irq(dev, pci_irq_vector(pdev, i),
  192. pci_endpoint_test_irqhandler,
  193. IRQF_SHARED, DRV_MODULE_NAME, test);
  194. if (err)
  195. goto fail;
  196. }
  197. return true;
  198. fail:
  199. switch (irq_type) {
  200. case IRQ_TYPE_LEGACY:
  201. dev_err(dev, "Failed to request IRQ %d for Legacy\n",
  202. pci_irq_vector(pdev, i));
  203. break;
  204. case IRQ_TYPE_MSI:
  205. dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
  206. pci_irq_vector(pdev, i),
  207. i + 1);
  208. break;
  209. case IRQ_TYPE_MSIX:
  210. dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
  211. pci_irq_vector(pdev, i),
  212. i + 1);
  213. break;
  214. }
  215. return false;
  216. }
  217. static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
  218. enum pci_barno barno)
  219. {
  220. int j;
  221. u32 val;
  222. int size;
  223. struct pci_dev *pdev = test->pdev;
  224. if (!test->bar[barno])
  225. return false;
  226. size = pci_resource_len(pdev, barno);
  227. if (barno == test->test_reg_bar)
  228. size = 0x4;
  229. for (j = 0; j < size; j += 4)
  230. pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
  231. for (j = 0; j < size; j += 4) {
  232. val = pci_endpoint_test_bar_readl(test, barno, j);
  233. if (val != 0xA0A0A0A0)
  234. return false;
  235. }
  236. return true;
  237. }
  238. static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
  239. {
  240. u32 val;
  241. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  242. IRQ_TYPE_LEGACY);
  243. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
  244. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  245. COMMAND_RAISE_LEGACY_IRQ);
  246. val = wait_for_completion_timeout(&test->irq_raised,
  247. msecs_to_jiffies(1000));
  248. if (!val)
  249. return false;
  250. return true;
  251. }
  252. static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
  253. u16 msi_num, bool msix)
  254. {
  255. u32 val;
  256. struct pci_dev *pdev = test->pdev;
  257. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  258. msix == false ? IRQ_TYPE_MSI :
  259. IRQ_TYPE_MSIX);
  260. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
  261. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  262. msix == false ? COMMAND_RAISE_MSI_IRQ :
  263. COMMAND_RAISE_MSIX_IRQ);
  264. val = wait_for_completion_timeout(&test->irq_raised,
  265. msecs_to_jiffies(1000));
  266. if (!val)
  267. return false;
  268. if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
  269. return true;
  270. return false;
  271. }
  272. static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
  273. {
  274. bool ret = false;
  275. void *src_addr;
  276. void *dst_addr;
  277. dma_addr_t src_phys_addr;
  278. dma_addr_t dst_phys_addr;
  279. struct pci_dev *pdev = test->pdev;
  280. struct device *dev = &pdev->dev;
  281. void *orig_src_addr;
  282. dma_addr_t orig_src_phys_addr;
  283. void *orig_dst_addr;
  284. dma_addr_t orig_dst_phys_addr;
  285. size_t offset;
  286. size_t alignment = test->alignment;
  287. int irq_type = test->irq_type;
  288. u32 src_crc32;
  289. u32 dst_crc32;
  290. if (size > SIZE_MAX - alignment)
  291. goto err;
  292. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  293. dev_err(dev, "Invalid IRQ type option\n");
  294. goto err;
  295. }
  296. orig_src_addr = dma_alloc_coherent(dev, size + alignment,
  297. &orig_src_phys_addr, GFP_KERNEL);
  298. if (!orig_src_addr) {
  299. dev_err(dev, "Failed to allocate source buffer\n");
  300. ret = false;
  301. goto err;
  302. }
  303. if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
  304. src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
  305. offset = src_phys_addr - orig_src_phys_addr;
  306. src_addr = orig_src_addr + offset;
  307. } else {
  308. src_phys_addr = orig_src_phys_addr;
  309. src_addr = orig_src_addr;
  310. }
  311. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  312. lower_32_bits(src_phys_addr));
  313. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  314. upper_32_bits(src_phys_addr));
  315. get_random_bytes(src_addr, size);
  316. src_crc32 = crc32_le(~0, src_addr, size);
  317. orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
  318. &orig_dst_phys_addr, GFP_KERNEL);
  319. if (!orig_dst_addr) {
  320. dev_err(dev, "Failed to allocate destination address\n");
  321. ret = false;
  322. goto err_orig_src_addr;
  323. }
  324. if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
  325. dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
  326. offset = dst_phys_addr - orig_dst_phys_addr;
  327. dst_addr = orig_dst_addr + offset;
  328. } else {
  329. dst_phys_addr = orig_dst_phys_addr;
  330. dst_addr = orig_dst_addr;
  331. }
  332. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  333. lower_32_bits(dst_phys_addr));
  334. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  335. upper_32_bits(dst_phys_addr));
  336. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
  337. size);
  338. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  339. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  340. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  341. COMMAND_COPY);
  342. wait_for_completion(&test->irq_raised);
  343. dst_crc32 = crc32_le(~0, dst_addr, size);
  344. if (dst_crc32 == src_crc32)
  345. ret = true;
  346. dma_free_coherent(dev, size + alignment, orig_dst_addr,
  347. orig_dst_phys_addr);
  348. err_orig_src_addr:
  349. dma_free_coherent(dev, size + alignment, orig_src_addr,
  350. orig_src_phys_addr);
  351. err:
  352. return ret;
  353. }
  354. static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
  355. {
  356. bool ret = false;
  357. u32 reg;
  358. void *addr;
  359. dma_addr_t phys_addr;
  360. struct pci_dev *pdev = test->pdev;
  361. struct device *dev = &pdev->dev;
  362. void *orig_addr;
  363. dma_addr_t orig_phys_addr;
  364. size_t offset;
  365. size_t alignment = test->alignment;
  366. int irq_type = test->irq_type;
  367. u32 crc32;
  368. if (size > SIZE_MAX - alignment)
  369. goto err;
  370. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  371. dev_err(dev, "Invalid IRQ type option\n");
  372. goto err;
  373. }
  374. orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
  375. GFP_KERNEL);
  376. if (!orig_addr) {
  377. dev_err(dev, "Failed to allocate address\n");
  378. ret = false;
  379. goto err;
  380. }
  381. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  382. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  383. offset = phys_addr - orig_phys_addr;
  384. addr = orig_addr + offset;
  385. } else {
  386. phys_addr = orig_phys_addr;
  387. addr = orig_addr;
  388. }
  389. get_random_bytes(addr, size);
  390. crc32 = crc32_le(~0, addr, size);
  391. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
  392. crc32);
  393. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  394. lower_32_bits(phys_addr));
  395. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  396. upper_32_bits(phys_addr));
  397. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  398. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  399. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  400. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  401. COMMAND_READ);
  402. wait_for_completion(&test->irq_raised);
  403. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  404. if (reg & STATUS_READ_SUCCESS)
  405. ret = true;
  406. dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
  407. err:
  408. return ret;
  409. }
  410. static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
  411. {
  412. bool ret = false;
  413. void *addr;
  414. dma_addr_t phys_addr;
  415. struct pci_dev *pdev = test->pdev;
  416. struct device *dev = &pdev->dev;
  417. void *orig_addr;
  418. dma_addr_t orig_phys_addr;
  419. size_t offset;
  420. size_t alignment = test->alignment;
  421. int irq_type = test->irq_type;
  422. u32 crc32;
  423. if (size > SIZE_MAX - alignment)
  424. goto err;
  425. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  426. dev_err(dev, "Invalid IRQ type option\n");
  427. goto err;
  428. }
  429. orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
  430. GFP_KERNEL);
  431. if (!orig_addr) {
  432. dev_err(dev, "Failed to allocate destination address\n");
  433. ret = false;
  434. goto err;
  435. }
  436. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  437. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  438. offset = phys_addr - orig_phys_addr;
  439. addr = orig_addr + offset;
  440. } else {
  441. phys_addr = orig_phys_addr;
  442. addr = orig_addr;
  443. }
  444. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  445. lower_32_bits(phys_addr));
  446. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  447. upper_32_bits(phys_addr));
  448. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  449. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  450. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  451. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  452. COMMAND_WRITE);
  453. wait_for_completion(&test->irq_raised);
  454. crc32 = crc32_le(~0, addr, size);
  455. if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
  456. ret = true;
  457. dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
  458. err:
  459. return ret;
  460. }
  461. static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
  462. int req_irq_type)
  463. {
  464. struct pci_dev *pdev = test->pdev;
  465. struct device *dev = &pdev->dev;
  466. if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
  467. dev_err(dev, "Invalid IRQ type option\n");
  468. return false;
  469. }
  470. if (test->irq_type == req_irq_type)
  471. return true;
  472. pci_endpoint_test_release_irq(test);
  473. pci_endpoint_test_free_irq_vectors(test);
  474. if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
  475. goto err;
  476. if (!pci_endpoint_test_request_irq(test))
  477. goto err;
  478. return true;
  479. err:
  480. pci_endpoint_test_free_irq_vectors(test);
  481. return false;
  482. }
  483. static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
  484. unsigned long arg)
  485. {
  486. int ret = -EINVAL;
  487. enum pci_barno bar;
  488. struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
  489. mutex_lock(&test->mutex);
  490. switch (cmd) {
  491. case PCITEST_BAR:
  492. bar = arg;
  493. if (bar < 0 || bar > 5)
  494. goto ret;
  495. ret = pci_endpoint_test_bar(test, bar);
  496. break;
  497. case PCITEST_LEGACY_IRQ:
  498. ret = pci_endpoint_test_legacy_irq(test);
  499. break;
  500. case PCITEST_MSI:
  501. case PCITEST_MSIX:
  502. ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
  503. break;
  504. case PCITEST_WRITE:
  505. ret = pci_endpoint_test_write(test, arg);
  506. break;
  507. case PCITEST_READ:
  508. ret = pci_endpoint_test_read(test, arg);
  509. break;
  510. case PCITEST_COPY:
  511. ret = pci_endpoint_test_copy(test, arg);
  512. break;
  513. case PCITEST_SET_IRQTYPE:
  514. ret = pci_endpoint_test_set_irq(test, arg);
  515. break;
  516. case PCITEST_GET_IRQTYPE:
  517. ret = irq_type;
  518. break;
  519. }
  520. ret:
  521. mutex_unlock(&test->mutex);
  522. return ret;
  523. }
  524. static const struct file_operations pci_endpoint_test_fops = {
  525. .owner = THIS_MODULE,
  526. .unlocked_ioctl = pci_endpoint_test_ioctl,
  527. };
  528. static int pci_endpoint_test_probe(struct pci_dev *pdev,
  529. const struct pci_device_id *ent)
  530. {
  531. int err;
  532. int id;
  533. char name[24];
  534. enum pci_barno bar;
  535. void __iomem *base;
  536. struct device *dev = &pdev->dev;
  537. struct pci_endpoint_test *test;
  538. struct pci_endpoint_test_data *data;
  539. enum pci_barno test_reg_bar = BAR_0;
  540. struct miscdevice *misc_device;
  541. if (pci_is_bridge(pdev))
  542. return -ENODEV;
  543. test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
  544. if (!test)
  545. return -ENOMEM;
  546. test->test_reg_bar = 0;
  547. test->alignment = 0;
  548. test->pdev = pdev;
  549. test->irq_type = IRQ_TYPE_UNDEFINED;
  550. if (no_msi)
  551. irq_type = IRQ_TYPE_LEGACY;
  552. data = (struct pci_endpoint_test_data *)ent->driver_data;
  553. if (data) {
  554. test_reg_bar = data->test_reg_bar;
  555. test->test_reg_bar = test_reg_bar;
  556. test->alignment = data->alignment;
  557. irq_type = data->irq_type;
  558. }
  559. init_completion(&test->irq_raised);
  560. mutex_init(&test->mutex);
  561. err = pci_enable_device(pdev);
  562. if (err) {
  563. dev_err(dev, "Cannot enable PCI device\n");
  564. return err;
  565. }
  566. err = pci_request_regions(pdev, DRV_MODULE_NAME);
  567. if (err) {
  568. dev_err(dev, "Cannot obtain PCI resources\n");
  569. goto err_disable_pdev;
  570. }
  571. pci_set_master(pdev);
  572. if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
  573. goto err_disable_irq;
  574. if (!pci_endpoint_test_request_irq(test))
  575. goto err_disable_irq;
  576. for (bar = BAR_0; bar <= BAR_5; bar++) {
  577. if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
  578. base = pci_ioremap_bar(pdev, bar);
  579. if (!base) {
  580. dev_err(dev, "Failed to read BAR%d\n", bar);
  581. WARN_ON(bar == test_reg_bar);
  582. }
  583. test->bar[bar] = base;
  584. }
  585. }
  586. test->base = test->bar[test_reg_bar];
  587. if (!test->base) {
  588. err = -ENOMEM;
  589. dev_err(dev, "Cannot perform PCI test without BAR%d\n",
  590. test_reg_bar);
  591. goto err_iounmap;
  592. }
  593. pci_set_drvdata(pdev, test);
  594. id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
  595. if (id < 0) {
  596. err = id;
  597. dev_err(dev, "Unable to get id\n");
  598. goto err_iounmap;
  599. }
  600. snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
  601. misc_device = &test->miscdev;
  602. misc_device->minor = MISC_DYNAMIC_MINOR;
  603. misc_device->name = kstrdup(name, GFP_KERNEL);
  604. if (!misc_device->name) {
  605. err = -ENOMEM;
  606. goto err_ida_remove;
  607. }
  608. misc_device->fops = &pci_endpoint_test_fops,
  609. err = misc_register(misc_device);
  610. if (err) {
  611. dev_err(dev, "Failed to register device\n");
  612. goto err_kfree_name;
  613. }
  614. return 0;
  615. err_kfree_name:
  616. kfree(misc_device->name);
  617. err_ida_remove:
  618. ida_simple_remove(&pci_endpoint_test_ida, id);
  619. err_iounmap:
  620. for (bar = BAR_0; bar <= BAR_5; bar++) {
  621. if (test->bar[bar])
  622. pci_iounmap(pdev, test->bar[bar]);
  623. }
  624. pci_endpoint_test_release_irq(test);
  625. err_disable_irq:
  626. pci_endpoint_test_free_irq_vectors(test);
  627. pci_release_regions(pdev);
  628. err_disable_pdev:
  629. pci_disable_device(pdev);
  630. return err;
  631. }
  632. static void pci_endpoint_test_remove(struct pci_dev *pdev)
  633. {
  634. int id;
  635. enum pci_barno bar;
  636. struct pci_endpoint_test *test = pci_get_drvdata(pdev);
  637. struct miscdevice *misc_device = &test->miscdev;
  638. if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
  639. return;
  640. if (id < 0)
  641. return;
  642. misc_deregister(&test->miscdev);
  643. kfree(misc_device->name);
  644. ida_simple_remove(&pci_endpoint_test_ida, id);
  645. for (bar = BAR_0; bar <= BAR_5; bar++) {
  646. if (test->bar[bar])
  647. pci_iounmap(pdev, test->bar[bar]);
  648. }
  649. pci_endpoint_test_release_irq(test);
  650. pci_endpoint_test_free_irq_vectors(test);
  651. pci_release_regions(pdev);
  652. pci_disable_device(pdev);
  653. }
  654. static const struct pci_device_id pci_endpoint_test_tbl[] = {
  655. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
  656. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
  657. { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
  658. { }
  659. };
  660. MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
  661. static struct pci_driver pci_endpoint_test_driver = {
  662. .name = DRV_MODULE_NAME,
  663. .id_table = pci_endpoint_test_tbl,
  664. .probe = pci_endpoint_test_probe,
  665. .remove = pci_endpoint_test_remove,
  666. };
  667. module_pci_driver(pci_endpoint_test_driver);
  668. MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
  669. MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
  670. MODULE_LICENSE("GPL v2");