isoch.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Setup routines for AGP 3.5 compliant bridges.
  4. */
  5. #include <linux/list.h>
  6. #include <linux/pci.h>
  7. #include <linux/agp_backend.h>
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include "agp.h"
  11. /* Generic AGP 3.5 enabling routines */
  12. struct agp_3_5_dev {
  13. struct list_head list;
  14. u8 capndx;
  15. u32 maxbw;
  16. struct pci_dev *dev;
  17. };
  18. static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
  19. {
  20. struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
  21. struct list_head *pos;
  22. list_for_each(pos, head) {
  23. cur = list_entry(pos, struct agp_3_5_dev, list);
  24. if (cur->maxbw > n->maxbw)
  25. break;
  26. }
  27. list_add_tail(new, pos);
  28. }
  29. static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
  30. {
  31. struct agp_3_5_dev *cur;
  32. struct pci_dev *dev;
  33. struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
  34. u32 nistat;
  35. INIT_LIST_HEAD(head);
  36. for (pos=start; pos!=head; ) {
  37. cur = list_entry(pos, struct agp_3_5_dev, list);
  38. dev = cur->dev;
  39. pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
  40. cur->maxbw = (nistat >> 16) & 0xff;
  41. tmp = pos;
  42. pos = pos->next;
  43. agp_3_5_dev_list_insert(head, tmp);
  44. }
  45. }
  46. /*
  47. * Initialize all isochronous transfer parameters for an AGP 3.0
  48. * node (i.e. a host bridge in combination with the adapters
  49. * lying behind it...)
  50. */
  51. static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
  52. struct agp_3_5_dev *dev_list, unsigned int ndevs)
  53. {
  54. /*
  55. * Convenience structure to make the calculations clearer
  56. * here. The field names come straight from the AGP 3.0 spec.
  57. */
  58. struct isoch_data {
  59. u32 maxbw;
  60. u32 n;
  61. u32 y;
  62. u32 l;
  63. u32 rq;
  64. struct agp_3_5_dev *dev;
  65. };
  66. struct pci_dev *td = bridge->dev, *dev;
  67. struct list_head *head = &dev_list->list, *pos;
  68. struct agp_3_5_dev *cur;
  69. struct isoch_data *master, target;
  70. unsigned int cdev = 0;
  71. u32 mnistat, tnistat, tstatus, mcmd;
  72. u16 tnicmd, mnicmd;
  73. u8 mcapndx;
  74. u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
  75. u32 step, rem, rem_isoch, rem_async;
  76. int ret = 0;
  77. /*
  78. * We'll work with an array of isoch_data's (one for each
  79. * device in dev_list) throughout this function.
  80. */
  81. master = kmalloc_array(ndevs, sizeof(*master), GFP_KERNEL);
  82. if (master == NULL) {
  83. ret = -ENOMEM;
  84. goto get_out;
  85. }
  86. /*
  87. * Sort the device list by maxbw. We need to do this because the
  88. * spec suggests that the devices with the smallest requirements
  89. * have their resources allocated first, with all remaining resources
  90. * falling to the device with the largest requirement.
  91. *
  92. * We don't exactly do this, we divide target resources by ndevs
  93. * and split them amongst the AGP 3.0 devices. The remainder of such
  94. * division operations are dropped on the last device, sort of like
  95. * the spec mentions it should be done.
  96. *
  97. * We can't do this sort when we initially construct the dev_list
  98. * because we don't know until this function whether isochronous
  99. * transfers are enabled and consequently whether maxbw will mean
  100. * anything.
  101. */
  102. agp_3_5_dev_list_sort(dev_list, ndevs);
  103. pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
  104. pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
  105. /* Extract power-on defaults from the target */
  106. target.maxbw = (tnistat >> 16) & 0xff;
  107. target.n = (tnistat >> 8) & 0xff;
  108. target.y = (tnistat >> 6) & 0x3;
  109. target.l = (tnistat >> 3) & 0x7;
  110. target.rq = (tstatus >> 24) & 0xff;
  111. y_max = target.y;
  112. /*
  113. * Extract power-on defaults for each device in dev_list. Along
  114. * the way, calculate the total isochronous bandwidth required
  115. * by these devices and the largest requested payload size.
  116. */
  117. list_for_each(pos, head) {
  118. cur = list_entry(pos, struct agp_3_5_dev, list);
  119. dev = cur->dev;
  120. mcapndx = cur->capndx;
  121. pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
  122. master[cdev].maxbw = (mnistat >> 16) & 0xff;
  123. master[cdev].n = (mnistat >> 8) & 0xff;
  124. master[cdev].y = (mnistat >> 6) & 0x3;
  125. master[cdev].dev = cur;
  126. tot_bw += master[cdev].maxbw;
  127. y_max = max(y_max, master[cdev].y);
  128. cdev++;
  129. }
  130. /* Check if this configuration has any chance of working */
  131. if (tot_bw > target.maxbw) {
  132. dev_err(&td->dev, "isochronous bandwidth required "
  133. "by AGP 3.0 devices exceeds that which is supported by "
  134. "the AGP 3.0 bridge!\n");
  135. ret = -ENODEV;
  136. goto free_and_exit;
  137. }
  138. target.y = y_max;
  139. /*
  140. * Write the calculated payload size into the target's NICMD
  141. * register. Doing this directly effects the ISOCH_N value
  142. * in the target's NISTAT register, so we need to do this now
  143. * to get an accurate value for ISOCH_N later.
  144. */
  145. pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
  146. tnicmd &= ~(0x3 << 6);
  147. tnicmd |= target.y << 6;
  148. pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
  149. /* Reread the target's ISOCH_N */
  150. pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
  151. target.n = (tnistat >> 8) & 0xff;
  152. /* Calculate the minimum ISOCH_N needed by each master */
  153. for (cdev=0; cdev<ndevs; cdev++) {
  154. master[cdev].y = target.y;
  155. master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
  156. tot_n += master[cdev].n;
  157. }
  158. /* Exit if the minimal ISOCH_N allocation among the masters is more
  159. * than the target can handle. */
  160. if (tot_n > target.n) {
  161. dev_err(&td->dev, "number of isochronous "
  162. "transactions per period required by AGP 3.0 devices "
  163. "exceeds that which is supported by the AGP 3.0 "
  164. "bridge!\n");
  165. ret = -ENODEV;
  166. goto free_and_exit;
  167. }
  168. /* Calculate left over ISOCH_N capability in the target. We'll give
  169. * this to the hungriest device (as per the spec) */
  170. rem = target.n - tot_n;
  171. /*
  172. * Calculate the minimum isochronous RQ depth needed by each master.
  173. * Along the way, distribute the extra ISOCH_N capability calculated
  174. * above.
  175. */
  176. for (cdev=0; cdev<ndevs; cdev++) {
  177. /*
  178. * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y
  179. * byte isochronous writes will be broken into 64B pieces.
  180. * This means we need to budget more RQ depth to account for
  181. * these kind of writes (each isochronous write is actually
  182. * many writes on the AGP bus).
  183. */
  184. master[cdev].rq = master[cdev].n;
  185. if (master[cdev].y > 0x1)
  186. master[cdev].rq *= (1 << (master[cdev].y - 1));
  187. tot_rq += master[cdev].rq;
  188. }
  189. master[ndevs-1].n += rem;
  190. /* Figure the number of isochronous and asynchronous RQ slots the
  191. * target is providing. */
  192. rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
  193. rq_async = target.rq - rq_isoch;
  194. /* Exit if the minimal RQ needs of the masters exceeds what the target
  195. * can provide. */
  196. if (tot_rq > rq_isoch) {
  197. dev_err(&td->dev, "number of request queue slots "
  198. "required by the isochronous bandwidth requested by "
  199. "AGP 3.0 devices exceeds the number provided by the "
  200. "AGP 3.0 bridge!\n");
  201. ret = -ENODEV;
  202. goto free_and_exit;
  203. }
  204. /* Calculate asynchronous RQ capability in the target (per master) as
  205. * well as the total number of leftover isochronous RQ slots. */
  206. step = rq_async / ndevs;
  207. rem_async = step + (rq_async % ndevs);
  208. rem_isoch = rq_isoch - tot_rq;
  209. /* Distribute the extra RQ slots calculated above and write our
  210. * isochronous settings out to the actual devices. */
  211. for (cdev=0; cdev<ndevs; cdev++) {
  212. cur = master[cdev].dev;
  213. dev = cur->dev;
  214. mcapndx = cur->capndx;
  215. master[cdev].rq += (cdev == ndevs - 1)
  216. ? (rem_async + rem_isoch) : step;
  217. pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
  218. pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
  219. mnicmd &= ~(0xff << 8);
  220. mnicmd &= ~(0x3 << 6);
  221. mcmd &= ~(0xff << 24);
  222. mnicmd |= master[cdev].n << 8;
  223. mnicmd |= master[cdev].y << 6;
  224. mcmd |= master[cdev].rq << 24;
  225. pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
  226. pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
  227. }
  228. free_and_exit:
  229. kfree(master);
  230. get_out:
  231. return ret;
  232. }
  233. /*
  234. * This function basically allocates request queue slots among the
  235. * AGP 3.0 systems in nonisochronous nodes. The algorithm is
  236. * pretty stupid, divide the total number of RQ slots provided by the
  237. * target by ndevs. Distribute this many slots to each AGP 3.0 device,
  238. * giving any left over slots to the last device in dev_list.
  239. */
  240. static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
  241. struct agp_3_5_dev *dev_list, unsigned int ndevs)
  242. {
  243. struct agp_3_5_dev *cur;
  244. struct list_head *head = &dev_list->list, *pos;
  245. u32 tstatus, mcmd;
  246. u32 trq, mrq, rem;
  247. unsigned int cdev = 0;
  248. pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
  249. trq = (tstatus >> 24) & 0xff;
  250. mrq = trq / ndevs;
  251. rem = mrq + (trq % ndevs);
  252. for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
  253. cur = list_entry(pos, struct agp_3_5_dev, list);
  254. pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
  255. mcmd &= ~(0xff << 24);
  256. mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
  257. pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
  258. }
  259. }
  260. /*
  261. * Fully configure and enable an AGP 3.0 host bridge and all the devices
  262. * lying behind it.
  263. */
  264. int agp_3_5_enable(struct agp_bridge_data *bridge)
  265. {
  266. struct pci_dev *td = bridge->dev, *dev = NULL;
  267. u8 mcapndx;
  268. u32 isoch, arqsz;
  269. u32 tstatus, mstatus, ncapid;
  270. u32 mmajor;
  271. u16 mpstat;
  272. struct agp_3_5_dev *dev_list, *cur;
  273. struct list_head *head, *pos;
  274. unsigned int ndevs = 0;
  275. int ret = 0;
  276. /* Extract some power-on defaults from the target */
  277. pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
  278. isoch = (tstatus >> 17) & 0x1;
  279. if (isoch == 0) /* isoch xfers not available, bail out. */
  280. return -ENODEV;
  281. arqsz = (tstatus >> 13) & 0x7;
  282. /*
  283. * Allocate a head for our AGP 3.5 device list
  284. * (multiple AGP v3 devices are allowed behind a single bridge).
  285. */
  286. if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
  287. ret = -ENOMEM;
  288. goto get_out;
  289. }
  290. head = &dev_list->list;
  291. INIT_LIST_HEAD(head);
  292. /* Find all AGP devices, and add them to dev_list. */
  293. for_each_pci_dev(dev) {
  294. mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
  295. if (mcapndx == 0)
  296. continue;
  297. switch ((dev->class >>8) & 0xff00) {
  298. case 0x0600: /* Bridge */
  299. /* Skip bridges. We should call this function for each one. */
  300. continue;
  301. case 0x0001: /* Unclassified device */
  302. /* Don't know what this is, but log it for investigation. */
  303. if (mcapndx != 0) {
  304. dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
  305. pci_name(dev),
  306. dev->vendor, dev->device);
  307. }
  308. continue;
  309. case 0x0300: /* Display controller */
  310. case 0x0400: /* Multimedia controller */
  311. if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
  312. ret = -ENOMEM;
  313. goto free_and_exit;
  314. }
  315. cur->dev = dev;
  316. pos = &cur->list;
  317. list_add(pos, head);
  318. ndevs++;
  319. continue;
  320. default:
  321. continue;
  322. }
  323. }
  324. /*
  325. * Take an initial pass through the devices lying behind our host
  326. * bridge. Make sure each one is actually an AGP 3.0 device, otherwise
  327. * exit with an error message. Along the way store the AGP 3.0
  328. * cap_ptr for each device
  329. */
  330. list_for_each(pos, head) {
  331. cur = list_entry(pos, struct agp_3_5_dev, list);
  332. dev = cur->dev;
  333. pci_read_config_word(dev, PCI_STATUS, &mpstat);
  334. if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
  335. continue;
  336. pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
  337. if (mcapndx != 0) {
  338. do {
  339. pci_read_config_dword(dev, mcapndx, &ncapid);
  340. if ((ncapid & 0xff) != 2)
  341. mcapndx = (ncapid >> 8) & 0xff;
  342. }
  343. while (((ncapid & 0xff) != 2) && (mcapndx != 0));
  344. }
  345. if (mcapndx == 0) {
  346. dev_err(&td->dev, "woah! Non-AGP device %s on "
  347. "secondary bus of AGP 3.5 bridge!\n",
  348. pci_name(dev));
  349. ret = -ENODEV;
  350. goto free_and_exit;
  351. }
  352. mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
  353. if (mmajor < 3) {
  354. dev_err(&td->dev, "woah! AGP 2.0 device %s on "
  355. "secondary bus of AGP 3.5 bridge operating "
  356. "with AGP 3.0 electricals!\n", pci_name(dev));
  357. ret = -ENODEV;
  358. goto free_and_exit;
  359. }
  360. cur->capndx = mcapndx;
  361. pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
  362. if (((mstatus >> 3) & 0x1) == 0) {
  363. dev_err(&td->dev, "woah! AGP 3.x device %s not "
  364. "operating in AGP 3.x mode on secondary bus "
  365. "of AGP 3.5 bridge operating with AGP 3.0 "
  366. "electricals!\n", pci_name(dev));
  367. ret = -ENODEV;
  368. goto free_and_exit;
  369. }
  370. }
  371. /*
  372. * Call functions to divide target resources amongst the AGP 3.0
  373. * masters. This process is dramatically different depending on
  374. * whether isochronous transfers are supported.
  375. */
  376. if (isoch) {
  377. ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
  378. if (ret) {
  379. dev_info(&td->dev, "something bad happened setting "
  380. "up isochronous xfers; falling back to "
  381. "non-isochronous xfer mode\n");
  382. } else {
  383. goto free_and_exit;
  384. }
  385. }
  386. agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
  387. free_and_exit:
  388. /* Be sure to free the dev_list */
  389. for (pos=head->next; pos!=head; ) {
  390. cur = list_entry(pos, struct agp_3_5_dev, list);
  391. pos = pos->next;
  392. kfree(cur);
  393. }
  394. kfree(dev_list);
  395. get_out:
  396. return ret;
  397. }