sba_iommu.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242
  1. /*
  2. ** IA64 System Bus Adapter (SBA) I/O MMU manager
  3. **
  4. ** (c) Copyright 2002-2005 Alex Williamson
  5. ** (c) Copyright 2002-2003 Grant Grundler
  6. ** (c) Copyright 2002-2005 Hewlett-Packard Company
  7. **
  8. ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
  9. ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
  10. **
  11. ** This program is free software; you can redistribute it and/or modify
  12. ** it under the terms of the GNU General Public License as published by
  13. ** the Free Software Foundation; either version 2 of the License, or
  14. ** (at your option) any later version.
  15. **
  16. **
  17. ** This module initializes the IOC (I/O Controller) found on HP
  18. ** McKinley machines and their successors.
  19. **
  20. */
  21. #include <linux/types.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/slab.h>
  26. #include <linux/init.h>
  27. #include <linux/mm.h>
  28. #include <linux/string.h>
  29. #include <linux/pci.h>
  30. #include <linux/proc_fs.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/acpi.h>
  33. #include <linux/efi.h>
  34. #include <linux/nodemask.h>
  35. #include <linux/bitops.h> /* hweight64() */
  36. #include <linux/crash_dump.h>
  37. #include <linux/iommu-helper.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/prefetch.h>
  40. #include <asm/delay.h> /* ia64_get_itc() */
  41. #include <asm/io.h>
  42. #include <asm/page.h> /* PAGE_OFFSET */
  43. #include <asm/dma.h>
  44. #include <asm/acpi-ext.h>
  45. extern int swiotlb_late_init_with_default_size (size_t size);
  46. #define PFX "IOC: "
  47. /*
  48. ** Enabling timing search of the pdir resource map. Output in /proc.
  49. ** Disabled by default to optimize performance.
  50. */
  51. #undef PDIR_SEARCH_TIMING
  52. /*
  53. ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
  54. ** not defined, all DMA will be 32bit and go through the TLB.
  55. ** There's potentially a conflict in the bio merge code with us
  56. ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
  57. ** appears to give more performance than bio-level virtual merging, we'll
  58. ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
  59. ** completely restrict DMA to the IOMMU.
  60. */
  61. #define ALLOW_IOV_BYPASS
  62. /*
  63. ** This option specifically allows/disallows bypassing scatterlists with
  64. ** multiple entries. Coalescing these entries can allow better DMA streaming
  65. ** and in some cases shows better performance than entirely bypassing the
  66. ** IOMMU. Performance increase on the order of 1-2% sequential output/input
  67. ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
  68. */
  69. #undef ALLOW_IOV_BYPASS_SG
  70. /*
  71. ** If a device prefetches beyond the end of a valid pdir entry, it will cause
  72. ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
  73. ** disconnect on 4k boundaries and prevent such issues. If the device is
  74. ** particularly aggressive, this option will keep the entire pdir valid such
  75. ** that prefetching will hit a valid address. This could severely impact
  76. ** error containment, and is therefore off by default. The page that is
  77. ** used for spill-over is poisoned, so that should help debugging somewhat.
  78. */
  79. #undef FULL_VALID_PDIR
  80. #define ENABLE_MARK_CLEAN
  81. /*
  82. ** The number of debug flags is a clue - this code is fragile. NOTE: since
  83. ** tightening the use of res_lock the resource bitmap and actual pdir are no
  84. ** longer guaranteed to stay in sync. The sanity checking code isn't going to
  85. ** like that.
  86. */
  87. #undef DEBUG_SBA_INIT
  88. #undef DEBUG_SBA_RUN
  89. #undef DEBUG_SBA_RUN_SG
  90. #undef DEBUG_SBA_RESOURCE
  91. #undef ASSERT_PDIR_SANITY
  92. #undef DEBUG_LARGE_SG_ENTRIES
  93. #undef DEBUG_BYPASS
  94. #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
  95. #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
  96. #endif
  97. #define SBA_INLINE __inline__
  98. /* #define SBA_INLINE */
  99. #ifdef DEBUG_SBA_INIT
  100. #define DBG_INIT(x...) printk(x)
  101. #else
  102. #define DBG_INIT(x...)
  103. #endif
  104. #ifdef DEBUG_SBA_RUN
  105. #define DBG_RUN(x...) printk(x)
  106. #else
  107. #define DBG_RUN(x...)
  108. #endif
  109. #ifdef DEBUG_SBA_RUN_SG
  110. #define DBG_RUN_SG(x...) printk(x)
  111. #else
  112. #define DBG_RUN_SG(x...)
  113. #endif
  114. #ifdef DEBUG_SBA_RESOURCE
  115. #define DBG_RES(x...) printk(x)
  116. #else
  117. #define DBG_RES(x...)
  118. #endif
  119. #ifdef DEBUG_BYPASS
  120. #define DBG_BYPASS(x...) printk(x)
  121. #else
  122. #define DBG_BYPASS(x...)
  123. #endif
  124. #ifdef ASSERT_PDIR_SANITY
  125. #define ASSERT(expr) \
  126. if(!(expr)) { \
  127. printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
  128. panic(#expr); \
  129. }
  130. #else
  131. #define ASSERT(expr)
  132. #endif
  133. /*
  134. ** The number of pdir entries to "free" before issuing
  135. ** a read to PCOM register to flush out PCOM writes.
  136. ** Interacts with allocation granularity (ie 4 or 8 entries
  137. ** allocated and free'd/purged at a time might make this
  138. ** less interesting).
  139. */
  140. #define DELAYED_RESOURCE_CNT 64
  141. #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
  142. #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
  143. #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
  144. #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
  145. #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
  146. #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
  147. #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
  148. #define IOC_FUNC_ID 0x000
  149. #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
  150. #define IOC_IBASE 0x300 /* IO TLB */
  151. #define IOC_IMASK 0x308
  152. #define IOC_PCOM 0x310
  153. #define IOC_TCNFG 0x318
  154. #define IOC_PDIR_BASE 0x320
  155. #define IOC_ROPE0_CFG 0x500
  156. #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
  157. /* AGP GART driver looks for this */
  158. #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
  159. /*
  160. ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
  161. **
  162. ** Some IOCs (sx1000) can run at the above pages sizes, but are
  163. ** really only supported using the IOC at a 4k page size.
  164. **
  165. ** iovp_size could only be greater than PAGE_SIZE if we are
  166. ** confident the drivers really only touch the next physical
  167. ** page iff that driver instance owns it.
  168. */
  169. static unsigned long iovp_size;
  170. static unsigned long iovp_shift;
  171. static unsigned long iovp_mask;
  172. struct ioc {
  173. void __iomem *ioc_hpa; /* I/O MMU base address */
  174. char *res_map; /* resource map, bit == pdir entry */
  175. u64 *pdir_base; /* physical base address */
  176. unsigned long ibase; /* pdir IOV Space base */
  177. unsigned long imask; /* pdir IOV Space mask */
  178. unsigned long *res_hint; /* next avail IOVP - circular search */
  179. unsigned long dma_mask;
  180. spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
  181. /* clearing pdir to prevent races with allocations. */
  182. unsigned int res_bitshift; /* from the RIGHT! */
  183. unsigned int res_size; /* size of resource map in bytes */
  184. #ifdef CONFIG_NUMA
  185. unsigned int node; /* node where this IOC lives */
  186. #endif
  187. #if DELAYED_RESOURCE_CNT > 0
  188. spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
  189. /* than res_lock for bigger systems. */
  190. int saved_cnt;
  191. struct sba_dma_pair {
  192. dma_addr_t iova;
  193. size_t size;
  194. } saved[DELAYED_RESOURCE_CNT];
  195. #endif
  196. #ifdef PDIR_SEARCH_TIMING
  197. #define SBA_SEARCH_SAMPLE 0x100
  198. unsigned long avg_search[SBA_SEARCH_SAMPLE];
  199. unsigned long avg_idx; /* current index into avg_search */
  200. #endif
  201. /* Stuff we don't need in performance path */
  202. struct ioc *next; /* list of IOC's in system */
  203. acpi_handle handle; /* for multiple IOC's */
  204. const char *name;
  205. unsigned int func_id;
  206. unsigned int rev; /* HW revision of chip */
  207. u32 iov_size;
  208. unsigned int pdir_size; /* in bytes, determined by IOV Space size */
  209. struct pci_dev *sac_only_dev;
  210. };
  211. static struct ioc *ioc_list, *ioc_found;
  212. static int reserve_sba_gart = 1;
  213. static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
  214. static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
  215. #define sba_sg_address(sg) sg_virt((sg))
  216. #ifdef FULL_VALID_PDIR
  217. static u64 prefetch_spill_page;
  218. #endif
  219. #ifdef CONFIG_PCI
  220. # define GET_IOC(dev) ((dev_is_pci(dev)) \
  221. ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
  222. #else
  223. # define GET_IOC(dev) NULL
  224. #endif
  225. /*
  226. ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
  227. ** (or rather not merge) DMAs into manageable chunks.
  228. ** On parisc, this is more of the software/tuning constraint
  229. ** rather than the HW. I/O MMU allocation algorithms can be
  230. ** faster with smaller sizes (to some degree).
  231. */
  232. #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
  233. #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
  234. /************************************
  235. ** SBA register read and write support
  236. **
  237. ** BE WARNED: register writes are posted.
  238. ** (ie follow writes which must reach HW with a read)
  239. **
  240. */
  241. #define READ_REG(addr) __raw_readq(addr)
  242. #define WRITE_REG(val, addr) __raw_writeq(val, addr)
  243. #ifdef DEBUG_SBA_INIT
  244. /**
  245. * sba_dump_tlb - debugging only - print IOMMU operating parameters
  246. * @hpa: base address of the IOMMU
  247. *
  248. * Print the size/location of the IO MMU PDIR.
  249. */
  250. static void
  251. sba_dump_tlb(char *hpa)
  252. {
  253. DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
  254. DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
  255. DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
  256. DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
  257. DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
  258. DBG_INIT("\n");
  259. }
  260. #endif
  261. #ifdef ASSERT_PDIR_SANITY
  262. /**
  263. * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
  264. * @ioc: IO MMU structure which owns the pdir we are interested in.
  265. * @msg: text to print ont the output line.
  266. * @pide: pdir index.
  267. *
  268. * Print one entry of the IO MMU PDIR in human readable form.
  269. */
  270. static void
  271. sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
  272. {
  273. /* start printing from lowest pde in rval */
  274. u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
  275. unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
  276. uint rcnt;
  277. printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
  278. msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
  279. rcnt = 0;
  280. while (rcnt < BITS_PER_LONG) {
  281. printk(KERN_DEBUG "%s %2d %p %016Lx\n",
  282. (rcnt == (pide & (BITS_PER_LONG - 1)))
  283. ? " -->" : " ",
  284. rcnt, ptr, (unsigned long long) *ptr );
  285. rcnt++;
  286. ptr++;
  287. }
  288. printk(KERN_DEBUG "%s", msg);
  289. }
  290. /**
  291. * sba_check_pdir - debugging only - consistency checker
  292. * @ioc: IO MMU structure which owns the pdir we are interested in.
  293. * @msg: text to print ont the output line.
  294. *
  295. * Verify the resource map and pdir state is consistent
  296. */
  297. static int
  298. sba_check_pdir(struct ioc *ioc, char *msg)
  299. {
  300. u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
  301. u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
  302. u64 *pptr = ioc->pdir_base; /* pdir ptr */
  303. uint pide = 0;
  304. while (rptr < rptr_end) {
  305. u64 rval;
  306. int rcnt; /* number of bits we might check */
  307. rval = *rptr;
  308. rcnt = 64;
  309. while (rcnt) {
  310. /* Get last byte and highest bit from that */
  311. u32 pde = ((u32)((*pptr >> (63)) & 0x1));
  312. if ((rval & 0x1) ^ pde)
  313. {
  314. /*
  315. ** BUMMER! -- res_map != pdir --
  316. ** Dump rval and matching pdir entries
  317. */
  318. sba_dump_pdir_entry(ioc, msg, pide);
  319. return(1);
  320. }
  321. rcnt--;
  322. rval >>= 1; /* try the next bit */
  323. pptr++;
  324. pide++;
  325. }
  326. rptr++; /* look at next word of res_map */
  327. }
  328. /* It'd be nice if we always got here :^) */
  329. return 0;
  330. }
  331. /**
  332. * sba_dump_sg - debugging only - print Scatter-Gather list
  333. * @ioc: IO MMU structure which owns the pdir we are interested in.
  334. * @startsg: head of the SG list
  335. * @nents: number of entries in SG list
  336. *
  337. * print the SG list so we can verify it's correct by hand.
  338. */
  339. static void
  340. sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  341. {
  342. while (nents-- > 0) {
  343. printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
  344. startsg->dma_address, startsg->dma_length,
  345. sba_sg_address(startsg));
  346. startsg = sg_next(startsg);
  347. }
  348. }
  349. static void
  350. sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  351. {
  352. struct scatterlist *the_sg = startsg;
  353. int the_nents = nents;
  354. while (the_nents-- > 0) {
  355. if (sba_sg_address(the_sg) == 0x0UL)
  356. sba_dump_sg(NULL, startsg, nents);
  357. the_sg = sg_next(the_sg);
  358. }
  359. }
  360. #endif /* ASSERT_PDIR_SANITY */
  361. /**************************************************************
  362. *
  363. * I/O Pdir Resource Management
  364. *
  365. * Bits set in the resource map are in use.
  366. * Each bit can represent a number of pages.
  367. * LSbs represent lower addresses (IOVA's).
  368. *
  369. ***************************************************************/
  370. #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
  371. /* Convert from IOVP to IOVA and vice versa. */
  372. #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
  373. #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
  374. #define PDIR_ENTRY_SIZE sizeof(u64)
  375. #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
  376. #define RESMAP_MASK(n) ~(~0UL << (n))
  377. #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
  378. /**
  379. * For most cases the normal get_order is sufficient, however it limits us
  380. * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
  381. * It only incurs about 1 clock cycle to use this one with the static variable
  382. * and makes the code more intuitive.
  383. */
  384. static SBA_INLINE int
  385. get_iovp_order (unsigned long size)
  386. {
  387. long double d = size - 1;
  388. long order;
  389. order = ia64_getf_exp(d);
  390. order = order - iovp_shift - 0xffff + 1;
  391. if (order < 0)
  392. order = 0;
  393. return order;
  394. }
  395. static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
  396. unsigned int bitshiftcnt)
  397. {
  398. return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
  399. + bitshiftcnt;
  400. }
  401. /**
  402. * sba_search_bitmap - find free space in IO PDIR resource bitmap
  403. * @ioc: IO MMU structure which owns the pdir we are interested in.
  404. * @bits_wanted: number of entries we need.
  405. * @use_hint: use res_hint to indicate where to start looking
  406. *
  407. * Find consecutive free bits in resource bitmap.
  408. * Each bit represents one entry in the IO Pdir.
  409. * Cool perf optimization: search for log2(size) bits at a time.
  410. */
  411. static SBA_INLINE unsigned long
  412. sba_search_bitmap(struct ioc *ioc, struct device *dev,
  413. unsigned long bits_wanted, int use_hint)
  414. {
  415. unsigned long *res_ptr;
  416. unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
  417. unsigned long flags, pide = ~0UL, tpide;
  418. unsigned long boundary_size;
  419. unsigned long shift;
  420. int ret;
  421. ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
  422. ASSERT(res_ptr < res_end);
  423. boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
  424. boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
  425. BUG_ON(ioc->ibase & ~iovp_mask);
  426. shift = ioc->ibase >> iovp_shift;
  427. spin_lock_irqsave(&ioc->res_lock, flags);
  428. /* Allow caller to force a search through the entire resource space */
  429. if (likely(use_hint)) {
  430. res_ptr = ioc->res_hint;
  431. } else {
  432. res_ptr = (ulong *)ioc->res_map;
  433. ioc->res_bitshift = 0;
  434. }
  435. /*
  436. * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
  437. * if a TLB entry is purged while in use. sba_mark_invalid()
  438. * purges IOTLB entries in power-of-two sizes, so we also
  439. * allocate IOVA space in power-of-two sizes.
  440. */
  441. bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
  442. if (likely(bits_wanted == 1)) {
  443. unsigned int bitshiftcnt;
  444. for(; res_ptr < res_end ; res_ptr++) {
  445. if (likely(*res_ptr != ~0UL)) {
  446. bitshiftcnt = ffz(*res_ptr);
  447. *res_ptr |= (1UL << bitshiftcnt);
  448. pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
  449. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  450. goto found_it;
  451. }
  452. }
  453. goto not_found;
  454. }
  455. if (likely(bits_wanted <= BITS_PER_LONG/2)) {
  456. /*
  457. ** Search the resource bit map on well-aligned values.
  458. ** "o" is the alignment.
  459. ** We need the alignment to invalidate I/O TLB using
  460. ** SBA HW features in the unmap path.
  461. */
  462. unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
  463. uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
  464. unsigned long mask, base_mask;
  465. base_mask = RESMAP_MASK(bits_wanted);
  466. mask = base_mask << bitshiftcnt;
  467. DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
  468. for(; res_ptr < res_end ; res_ptr++)
  469. {
  470. DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
  471. ASSERT(0 != mask);
  472. for (; mask ; mask <<= o, bitshiftcnt += o) {
  473. tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
  474. ret = iommu_is_span_boundary(tpide, bits_wanted,
  475. shift,
  476. boundary_size);
  477. if ((0 == ((*res_ptr) & mask)) && !ret) {
  478. *res_ptr |= mask; /* mark resources busy! */
  479. pide = tpide;
  480. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  481. goto found_it;
  482. }
  483. }
  484. bitshiftcnt = 0;
  485. mask = base_mask;
  486. }
  487. } else {
  488. int qwords, bits, i;
  489. unsigned long *end;
  490. qwords = bits_wanted >> 6; /* /64 */
  491. bits = bits_wanted - (qwords * BITS_PER_LONG);
  492. end = res_end - qwords;
  493. for (; res_ptr < end; res_ptr++) {
  494. tpide = ptr_to_pide(ioc, res_ptr, 0);
  495. ret = iommu_is_span_boundary(tpide, bits_wanted,
  496. shift, boundary_size);
  497. if (ret)
  498. goto next_ptr;
  499. for (i = 0 ; i < qwords ; i++) {
  500. if (res_ptr[i] != 0)
  501. goto next_ptr;
  502. }
  503. if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
  504. continue;
  505. /* Found it, mark it */
  506. for (i = 0 ; i < qwords ; i++)
  507. res_ptr[i] = ~0UL;
  508. res_ptr[i] |= RESMAP_MASK(bits);
  509. pide = tpide;
  510. res_ptr += qwords;
  511. ioc->res_bitshift = bits;
  512. goto found_it;
  513. next_ptr:
  514. ;
  515. }
  516. }
  517. not_found:
  518. prefetch(ioc->res_map);
  519. ioc->res_hint = (unsigned long *) ioc->res_map;
  520. ioc->res_bitshift = 0;
  521. spin_unlock_irqrestore(&ioc->res_lock, flags);
  522. return (pide);
  523. found_it:
  524. ioc->res_hint = res_ptr;
  525. spin_unlock_irqrestore(&ioc->res_lock, flags);
  526. return (pide);
  527. }
  528. /**
  529. * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
  530. * @ioc: IO MMU structure which owns the pdir we are interested in.
  531. * @size: number of bytes to create a mapping for
  532. *
  533. * Given a size, find consecutive unmarked and then mark those bits in the
  534. * resource bit map.
  535. */
  536. static int
  537. sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
  538. {
  539. unsigned int pages_needed = size >> iovp_shift;
  540. #ifdef PDIR_SEARCH_TIMING
  541. unsigned long itc_start;
  542. #endif
  543. unsigned long pide;
  544. ASSERT(pages_needed);
  545. ASSERT(0 == (size & ~iovp_mask));
  546. #ifdef PDIR_SEARCH_TIMING
  547. itc_start = ia64_get_itc();
  548. #endif
  549. /*
  550. ** "seek and ye shall find"...praying never hurts either...
  551. */
  552. pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
  553. if (unlikely(pide >= (ioc->res_size << 3))) {
  554. pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
  555. if (unlikely(pide >= (ioc->res_size << 3))) {
  556. #if DELAYED_RESOURCE_CNT > 0
  557. unsigned long flags;
  558. /*
  559. ** With delayed resource freeing, we can give this one more shot. We're
  560. ** getting close to being in trouble here, so do what we can to make this
  561. ** one count.
  562. */
  563. spin_lock_irqsave(&ioc->saved_lock, flags);
  564. if (ioc->saved_cnt > 0) {
  565. struct sba_dma_pair *d;
  566. int cnt = ioc->saved_cnt;
  567. d = &(ioc->saved[ioc->saved_cnt - 1]);
  568. spin_lock(&ioc->res_lock);
  569. while (cnt--) {
  570. sba_mark_invalid(ioc, d->iova, d->size);
  571. sba_free_range(ioc, d->iova, d->size);
  572. d--;
  573. }
  574. ioc->saved_cnt = 0;
  575. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  576. spin_unlock(&ioc->res_lock);
  577. }
  578. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  579. pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
  580. if (unlikely(pide >= (ioc->res_size << 3))) {
  581. printk(KERN_WARNING "%s: I/O MMU @ %p is"
  582. "out of mapping resources, %u %u %lx\n",
  583. __func__, ioc->ioc_hpa, ioc->res_size,
  584. pages_needed, dma_get_seg_boundary(dev));
  585. return -1;
  586. }
  587. #else
  588. printk(KERN_WARNING "%s: I/O MMU @ %p is"
  589. "out of mapping resources, %u %u %lx\n",
  590. __func__, ioc->ioc_hpa, ioc->res_size,
  591. pages_needed, dma_get_seg_boundary(dev));
  592. return -1;
  593. #endif
  594. }
  595. }
  596. #ifdef PDIR_SEARCH_TIMING
  597. ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
  598. ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
  599. #endif
  600. prefetchw(&(ioc->pdir_base[pide]));
  601. #ifdef ASSERT_PDIR_SANITY
  602. /* verify the first enable bit is clear */
  603. if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
  604. sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
  605. }
  606. #endif
  607. DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
  608. __func__, size, pages_needed, pide,
  609. (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
  610. ioc->res_bitshift );
  611. return (pide);
  612. }
  613. /**
  614. * sba_free_range - unmark bits in IO PDIR resource bitmap
  615. * @ioc: IO MMU structure which owns the pdir we are interested in.
  616. * @iova: IO virtual address which was previously allocated.
  617. * @size: number of bytes to create a mapping for
  618. *
  619. * clear bits in the ioc's resource map
  620. */
  621. static SBA_INLINE void
  622. sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
  623. {
  624. unsigned long iovp = SBA_IOVP(ioc, iova);
  625. unsigned int pide = PDIR_INDEX(iovp);
  626. unsigned int ridx = pide >> 3; /* convert bit to byte address */
  627. unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
  628. int bits_not_wanted = size >> iovp_shift;
  629. unsigned long m;
  630. /* Round up to power-of-two size: see AR2305 note above */
  631. bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
  632. for (; bits_not_wanted > 0 ; res_ptr++) {
  633. if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
  634. /* these mappings start 64bit aligned */
  635. *res_ptr = 0UL;
  636. bits_not_wanted -= BITS_PER_LONG;
  637. pide += BITS_PER_LONG;
  638. } else {
  639. /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
  640. m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
  641. bits_not_wanted = 0;
  642. DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
  643. bits_not_wanted, m, pide, res_ptr, *res_ptr);
  644. ASSERT(m != 0);
  645. ASSERT(bits_not_wanted);
  646. ASSERT((*res_ptr & m) == m); /* verify same bits are set */
  647. *res_ptr &= ~m;
  648. }
  649. }
  650. }
  651. /**************************************************************
  652. *
  653. * "Dynamic DMA Mapping" support (aka "Coherent I/O")
  654. *
  655. ***************************************************************/
  656. /**
  657. * sba_io_pdir_entry - fill in one IO PDIR entry
  658. * @pdir_ptr: pointer to IO PDIR entry
  659. * @vba: Virtual CPU address of buffer to map
  660. *
  661. * SBA Mapping Routine
  662. *
  663. * Given a virtual address (vba, arg1) sba_io_pdir_entry()
  664. * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
  665. * Each IO Pdir entry consists of 8 bytes as shown below
  666. * (LSB == bit 0):
  667. *
  668. * 63 40 11 7 0
  669. * +-+---------------------+----------------------------------+----+--------+
  670. * |V| U | PPN[39:12] | U | FF |
  671. * +-+---------------------+----------------------------------+----+--------+
  672. *
  673. * V == Valid Bit
  674. * U == Unused
  675. * PPN == Physical Page Number
  676. *
  677. * The physical address fields are filled with the results of virt_to_phys()
  678. * on the vba.
  679. */
  680. #if 1
  681. #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
  682. | 0x8000000000000000ULL)
  683. #else
  684. void SBA_INLINE
  685. sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
  686. {
  687. *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
  688. }
  689. #endif
  690. #ifdef ENABLE_MARK_CLEAN
  691. /**
  692. * Since DMA is i-cache coherent, any (complete) pages that were written via
  693. * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
  694. * flush them when they get mapped into an executable vm-area.
  695. */
  696. static void
  697. mark_clean (void *addr, size_t size)
  698. {
  699. unsigned long pg_addr, end;
  700. pg_addr = PAGE_ALIGN((unsigned long) addr);
  701. end = (unsigned long) addr + size;
  702. while (pg_addr + PAGE_SIZE <= end) {
  703. struct page *page = virt_to_page((void *)pg_addr);
  704. set_bit(PG_arch_1, &page->flags);
  705. pg_addr += PAGE_SIZE;
  706. }
  707. }
  708. #endif
  709. /**
  710. * sba_mark_invalid - invalidate one or more IO PDIR entries
  711. * @ioc: IO MMU structure which owns the pdir we are interested in.
  712. * @iova: IO Virtual Address mapped earlier
  713. * @byte_cnt: number of bytes this mapping covers.
  714. *
  715. * Marking the IO PDIR entry(ies) as Invalid and invalidate
  716. * corresponding IO TLB entry. The PCOM (Purge Command Register)
  717. * is to purge stale entries in the IO TLB when unmapping entries.
  718. *
  719. * The PCOM register supports purging of multiple pages, with a minium
  720. * of 1 page and a maximum of 2GB. Hardware requires the address be
  721. * aligned to the size of the range being purged. The size of the range
  722. * must be a power of 2. The "Cool perf optimization" in the
  723. * allocation routine helps keep that true.
  724. */
  725. static SBA_INLINE void
  726. sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
  727. {
  728. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  729. int off = PDIR_INDEX(iovp);
  730. /* Must be non-zero and rounded up */
  731. ASSERT(byte_cnt > 0);
  732. ASSERT(0 == (byte_cnt & ~iovp_mask));
  733. #ifdef ASSERT_PDIR_SANITY
  734. /* Assert first pdir entry is set */
  735. if (!(ioc->pdir_base[off] >> 60)) {
  736. sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
  737. }
  738. #endif
  739. if (byte_cnt <= iovp_size)
  740. {
  741. ASSERT(off < ioc->pdir_size);
  742. iovp |= iovp_shift; /* set "size" field for PCOM */
  743. #ifndef FULL_VALID_PDIR
  744. /*
  745. ** clear I/O PDIR entry "valid" bit
  746. ** Do NOT clear the rest - save it for debugging.
  747. ** We should only clear bits that have previously
  748. ** been enabled.
  749. */
  750. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  751. #else
  752. /*
  753. ** If we want to maintain the PDIR as valid, put in
  754. ** the spill page so devices prefetching won't
  755. ** cause a hard fail.
  756. */
  757. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  758. #endif
  759. } else {
  760. u32 t = get_iovp_order(byte_cnt) + iovp_shift;
  761. iovp |= t;
  762. ASSERT(t <= 31); /* 2GB! Max value of "size" field */
  763. do {
  764. /* verify this pdir entry is enabled */
  765. ASSERT(ioc->pdir_base[off] >> 63);
  766. #ifndef FULL_VALID_PDIR
  767. /* clear I/O Pdir entry "valid" bit first */
  768. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  769. #else
  770. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  771. #endif
  772. off++;
  773. byte_cnt -= iovp_size;
  774. } while (byte_cnt > 0);
  775. }
  776. WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
  777. }
  778. /**
  779. * sba_map_single_attrs - map one buffer and return IOVA for DMA
  780. * @dev: instance of PCI owned by the driver that's asking.
  781. * @addr: driver buffer to map.
  782. * @size: number of bytes to map in driver buffer.
  783. * @dir: R/W or both.
  784. * @attrs: optional dma attributes
  785. *
  786. * See Documentation/DMA-API-HOWTO.txt
  787. */
  788. static dma_addr_t sba_map_page(struct device *dev, struct page *page,
  789. unsigned long poff, size_t size,
  790. enum dma_data_direction dir,
  791. struct dma_attrs *attrs)
  792. {
  793. struct ioc *ioc;
  794. void *addr = page_address(page) + poff;
  795. dma_addr_t iovp;
  796. dma_addr_t offset;
  797. u64 *pdir_start;
  798. int pide;
  799. #ifdef ASSERT_PDIR_SANITY
  800. unsigned long flags;
  801. #endif
  802. #ifdef ALLOW_IOV_BYPASS
  803. unsigned long pci_addr = virt_to_phys(addr);
  804. #endif
  805. #ifdef ALLOW_IOV_BYPASS
  806. ASSERT(to_pci_dev(dev)->dma_mask);
  807. /*
  808. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  809. */
  810. if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
  811. /*
  812. ** Device is bit capable of DMA'ing to the buffer...
  813. ** just return the PCI address of ptr
  814. */
  815. DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
  816. "0x%lx/0x%lx\n",
  817. to_pci_dev(dev)->dma_mask, pci_addr);
  818. return pci_addr;
  819. }
  820. #endif
  821. ioc = GET_IOC(dev);
  822. ASSERT(ioc);
  823. prefetch(ioc->res_hint);
  824. ASSERT(size > 0);
  825. ASSERT(size <= DMA_CHUNK_SIZE);
  826. /* save offset bits */
  827. offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
  828. /* round up to nearest iovp_size */
  829. size = (size + offset + ~iovp_mask) & iovp_mask;
  830. #ifdef ASSERT_PDIR_SANITY
  831. spin_lock_irqsave(&ioc->res_lock, flags);
  832. if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
  833. panic("Sanity check failed");
  834. spin_unlock_irqrestore(&ioc->res_lock, flags);
  835. #endif
  836. pide = sba_alloc_range(ioc, dev, size);
  837. if (pide < 0)
  838. return 0;
  839. iovp = (dma_addr_t) pide << iovp_shift;
  840. DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
  841. pdir_start = &(ioc->pdir_base[pide]);
  842. while (size > 0) {
  843. ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
  844. sba_io_pdir_entry(pdir_start, (unsigned long) addr);
  845. DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
  846. addr += iovp_size;
  847. size -= iovp_size;
  848. pdir_start++;
  849. }
  850. /* force pdir update */
  851. wmb();
  852. /* form complete address */
  853. #ifdef ASSERT_PDIR_SANITY
  854. spin_lock_irqsave(&ioc->res_lock, flags);
  855. sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
  856. spin_unlock_irqrestore(&ioc->res_lock, flags);
  857. #endif
  858. return SBA_IOVA(ioc, iovp, offset);
  859. }
  860. static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
  861. size_t size, enum dma_data_direction dir,
  862. struct dma_attrs *attrs)
  863. {
  864. return sba_map_page(dev, virt_to_page(addr),
  865. (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
  866. }
  867. #ifdef ENABLE_MARK_CLEAN
  868. static SBA_INLINE void
  869. sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
  870. {
  871. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  872. int off = PDIR_INDEX(iovp);
  873. void *addr;
  874. if (size <= iovp_size) {
  875. addr = phys_to_virt(ioc->pdir_base[off] &
  876. ~0xE000000000000FFFULL);
  877. mark_clean(addr, size);
  878. } else {
  879. do {
  880. addr = phys_to_virt(ioc->pdir_base[off] &
  881. ~0xE000000000000FFFULL);
  882. mark_clean(addr, min(size, iovp_size));
  883. off++;
  884. size -= iovp_size;
  885. } while (size > 0);
  886. }
  887. }
  888. #endif
  889. /**
  890. * sba_unmap_single_attrs - unmap one IOVA and free resources
  891. * @dev: instance of PCI owned by the driver that's asking.
  892. * @iova: IOVA of driver buffer previously mapped.
  893. * @size: number of bytes mapped in driver buffer.
  894. * @dir: R/W or both.
  895. * @attrs: optional dma attributes
  896. *
  897. * See Documentation/DMA-API-HOWTO.txt
  898. */
  899. static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
  900. enum dma_data_direction dir, struct dma_attrs *attrs)
  901. {
  902. struct ioc *ioc;
  903. #if DELAYED_RESOURCE_CNT > 0
  904. struct sba_dma_pair *d;
  905. #endif
  906. unsigned long flags;
  907. dma_addr_t offset;
  908. ioc = GET_IOC(dev);
  909. ASSERT(ioc);
  910. #ifdef ALLOW_IOV_BYPASS
  911. if (likely((iova & ioc->imask) != ioc->ibase)) {
  912. /*
  913. ** Address does not fall w/in IOVA, must be bypassing
  914. */
  915. DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
  916. iova);
  917. #ifdef ENABLE_MARK_CLEAN
  918. if (dir == DMA_FROM_DEVICE) {
  919. mark_clean(phys_to_virt(iova), size);
  920. }
  921. #endif
  922. return;
  923. }
  924. #endif
  925. offset = iova & ~iovp_mask;
  926. DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
  927. iova ^= offset; /* clear offset bits */
  928. size += offset;
  929. size = ROUNDUP(size, iovp_size);
  930. #ifdef ENABLE_MARK_CLEAN
  931. if (dir == DMA_FROM_DEVICE)
  932. sba_mark_clean(ioc, iova, size);
  933. #endif
  934. #if DELAYED_RESOURCE_CNT > 0
  935. spin_lock_irqsave(&ioc->saved_lock, flags);
  936. d = &(ioc->saved[ioc->saved_cnt]);
  937. d->iova = iova;
  938. d->size = size;
  939. if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
  940. int cnt = ioc->saved_cnt;
  941. spin_lock(&ioc->res_lock);
  942. while (cnt--) {
  943. sba_mark_invalid(ioc, d->iova, d->size);
  944. sba_free_range(ioc, d->iova, d->size);
  945. d--;
  946. }
  947. ioc->saved_cnt = 0;
  948. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  949. spin_unlock(&ioc->res_lock);
  950. }
  951. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  952. #else /* DELAYED_RESOURCE_CNT == 0 */
  953. spin_lock_irqsave(&ioc->res_lock, flags);
  954. sba_mark_invalid(ioc, iova, size);
  955. sba_free_range(ioc, iova, size);
  956. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  957. spin_unlock_irqrestore(&ioc->res_lock, flags);
  958. #endif /* DELAYED_RESOURCE_CNT == 0 */
  959. }
  960. void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
  961. enum dma_data_direction dir, struct dma_attrs *attrs)
  962. {
  963. sba_unmap_page(dev, iova, size, dir, attrs);
  964. }
  965. /**
  966. * sba_alloc_coherent - allocate/map shared mem for DMA
  967. * @dev: instance of PCI owned by the driver that's asking.
  968. * @size: number of bytes mapped in driver buffer.
  969. * @dma_handle: IOVA of new buffer.
  970. *
  971. * See Documentation/DMA-API-HOWTO.txt
  972. */
  973. static void *
  974. sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  975. gfp_t flags, struct dma_attrs *attrs)
  976. {
  977. struct ioc *ioc;
  978. void *addr;
  979. ioc = GET_IOC(dev);
  980. ASSERT(ioc);
  981. #ifdef CONFIG_NUMA
  982. {
  983. int node = ioc->node;
  984. struct page *page;
  985. if (node == NUMA_NO_NODE)
  986. node = numa_node_id();
  987. page = alloc_pages_exact_node(node, flags, get_order(size));
  988. if (unlikely(!page))
  989. return NULL;
  990. addr = page_address(page);
  991. }
  992. #else
  993. addr = (void *) __get_free_pages(flags, get_order(size));
  994. #endif
  995. if (unlikely(!addr))
  996. return NULL;
  997. memset(addr, 0, size);
  998. *dma_handle = virt_to_phys(addr);
  999. #ifdef ALLOW_IOV_BYPASS
  1000. ASSERT(dev->coherent_dma_mask);
  1001. /*
  1002. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  1003. */
  1004. if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
  1005. DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
  1006. dev->coherent_dma_mask, *dma_handle);
  1007. return addr;
  1008. }
  1009. #endif
  1010. /*
  1011. * If device can't bypass or bypass is disabled, pass the 32bit fake
  1012. * device to map single to get an iova mapping.
  1013. */
  1014. *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
  1015. size, 0, NULL);
  1016. return addr;
  1017. }
  1018. /**
  1019. * sba_free_coherent - free/unmap shared mem for DMA
  1020. * @dev: instance of PCI owned by the driver that's asking.
  1021. * @size: number of bytes mapped in driver buffer.
  1022. * @vaddr: virtual address IOVA of "consistent" buffer.
  1023. * @dma_handler: IO virtual address of "consistent" buffer.
  1024. *
  1025. * See Documentation/DMA-API-HOWTO.txt
  1026. */
  1027. static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
  1028. dma_addr_t dma_handle, struct dma_attrs *attrs)
  1029. {
  1030. sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
  1031. free_pages((unsigned long) vaddr, get_order(size));
  1032. }
  1033. /*
  1034. ** Since 0 is a valid pdir_base index value, can't use that
  1035. ** to determine if a value is valid or not. Use a flag to indicate
  1036. ** the SG list entry contains a valid pdir index.
  1037. */
  1038. #define PIDE_FLAG 0x1UL
  1039. #ifdef DEBUG_LARGE_SG_ENTRIES
  1040. int dump_run_sg = 0;
  1041. #endif
  1042. /**
  1043. * sba_fill_pdir - write allocated SG entries into IO PDIR
  1044. * @ioc: IO MMU structure which owns the pdir we are interested in.
  1045. * @startsg: list of IOVA/size pairs
  1046. * @nents: number of entries in startsg list
  1047. *
  1048. * Take preprocessed SG list and write corresponding entries
  1049. * in the IO PDIR.
  1050. */
  1051. static SBA_INLINE int
  1052. sba_fill_pdir(
  1053. struct ioc *ioc,
  1054. struct scatterlist *startsg,
  1055. int nents)
  1056. {
  1057. struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
  1058. int n_mappings = 0;
  1059. u64 *pdirp = NULL;
  1060. unsigned long dma_offset = 0;
  1061. while (nents-- > 0) {
  1062. int cnt = startsg->dma_length;
  1063. startsg->dma_length = 0;
  1064. #ifdef DEBUG_LARGE_SG_ENTRIES
  1065. if (dump_run_sg)
  1066. printk(" %2d : %08lx/%05x %p\n",
  1067. nents, startsg->dma_address, cnt,
  1068. sba_sg_address(startsg));
  1069. #else
  1070. DBG_RUN_SG(" %d : %08lx/%05x %p\n",
  1071. nents, startsg->dma_address, cnt,
  1072. sba_sg_address(startsg));
  1073. #endif
  1074. /*
  1075. ** Look for the start of a new DMA stream
  1076. */
  1077. if (startsg->dma_address & PIDE_FLAG) {
  1078. u32 pide = startsg->dma_address & ~PIDE_FLAG;
  1079. dma_offset = (unsigned long) pide & ~iovp_mask;
  1080. startsg->dma_address = 0;
  1081. if (n_mappings)
  1082. dma_sg = sg_next(dma_sg);
  1083. dma_sg->dma_address = pide | ioc->ibase;
  1084. pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
  1085. n_mappings++;
  1086. }
  1087. /*
  1088. ** Look for a VCONTIG chunk
  1089. */
  1090. if (cnt) {
  1091. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1092. ASSERT(pdirp);
  1093. /* Since multiple Vcontig blocks could make up
  1094. ** one DMA stream, *add* cnt to dma_len.
  1095. */
  1096. dma_sg->dma_length += cnt;
  1097. cnt += dma_offset;
  1098. dma_offset=0; /* only want offset on first chunk */
  1099. cnt = ROUNDUP(cnt, iovp_size);
  1100. do {
  1101. sba_io_pdir_entry(pdirp, vaddr);
  1102. vaddr += iovp_size;
  1103. cnt -= iovp_size;
  1104. pdirp++;
  1105. } while (cnt > 0);
  1106. }
  1107. startsg = sg_next(startsg);
  1108. }
  1109. /* force pdir update */
  1110. wmb();
  1111. #ifdef DEBUG_LARGE_SG_ENTRIES
  1112. dump_run_sg = 0;
  1113. #endif
  1114. return(n_mappings);
  1115. }
  1116. /*
  1117. ** Two address ranges are DMA contiguous *iff* "end of prev" and
  1118. ** "start of next" are both on an IOV page boundary.
  1119. **
  1120. ** (shift left is a quick trick to mask off upper bits)
  1121. */
  1122. #define DMA_CONTIG(__X, __Y) \
  1123. (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
  1124. /**
  1125. * sba_coalesce_chunks - preprocess the SG list
  1126. * @ioc: IO MMU structure which owns the pdir we are interested in.
  1127. * @startsg: list of IOVA/size pairs
  1128. * @nents: number of entries in startsg list
  1129. *
  1130. * First pass is to walk the SG list and determine where the breaks are
  1131. * in the DMA stream. Allocates PDIR entries but does not fill them.
  1132. * Returns the number of DMA chunks.
  1133. *
  1134. * Doing the fill separate from the coalescing/allocation keeps the
  1135. * code simpler. Future enhancement could make one pass through
  1136. * the sglist do both.
  1137. */
  1138. static SBA_INLINE int
  1139. sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
  1140. struct scatterlist *startsg,
  1141. int nents)
  1142. {
  1143. struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
  1144. unsigned long vcontig_len; /* len of VCONTIG chunk */
  1145. unsigned long vcontig_end;
  1146. struct scatterlist *dma_sg; /* next DMA stream head */
  1147. unsigned long dma_offset, dma_len; /* start/len of DMA stream */
  1148. int n_mappings = 0;
  1149. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  1150. int idx;
  1151. while (nents > 0) {
  1152. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1153. /*
  1154. ** Prepare for first/next DMA stream
  1155. */
  1156. dma_sg = vcontig_sg = startsg;
  1157. dma_len = vcontig_len = vcontig_end = startsg->length;
  1158. vcontig_end += vaddr;
  1159. dma_offset = vaddr & ~iovp_mask;
  1160. /* PARANOID: clear entries */
  1161. startsg->dma_address = startsg->dma_length = 0;
  1162. /*
  1163. ** This loop terminates one iteration "early" since
  1164. ** it's always looking one "ahead".
  1165. */
  1166. while (--nents > 0) {
  1167. unsigned long vaddr; /* tmp */
  1168. startsg = sg_next(startsg);
  1169. /* PARANOID */
  1170. startsg->dma_address = startsg->dma_length = 0;
  1171. /* catch brokenness in SCSI layer */
  1172. ASSERT(startsg->length <= DMA_CHUNK_SIZE);
  1173. /*
  1174. ** First make sure current dma stream won't
  1175. ** exceed DMA_CHUNK_SIZE if we coalesce the
  1176. ** next entry.
  1177. */
  1178. if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
  1179. > DMA_CHUNK_SIZE)
  1180. break;
  1181. if (dma_len + startsg->length > max_seg_size)
  1182. break;
  1183. /*
  1184. ** Then look for virtually contiguous blocks.
  1185. **
  1186. ** append the next transaction?
  1187. */
  1188. vaddr = (unsigned long) sba_sg_address(startsg);
  1189. if (vcontig_end == vaddr)
  1190. {
  1191. vcontig_len += startsg->length;
  1192. vcontig_end += startsg->length;
  1193. dma_len += startsg->length;
  1194. continue;
  1195. }
  1196. #ifdef DEBUG_LARGE_SG_ENTRIES
  1197. dump_run_sg = (vcontig_len > iovp_size);
  1198. #endif
  1199. /*
  1200. ** Not virtually contiguous.
  1201. ** Terminate prev chunk.
  1202. ** Start a new chunk.
  1203. **
  1204. ** Once we start a new VCONTIG chunk, dma_offset
  1205. ** can't change. And we need the offset from the first
  1206. ** chunk - not the last one. Ergo Successive chunks
  1207. ** must start on page boundaries and dove tail
  1208. ** with it's predecessor.
  1209. */
  1210. vcontig_sg->dma_length = vcontig_len;
  1211. vcontig_sg = startsg;
  1212. vcontig_len = startsg->length;
  1213. /*
  1214. ** 3) do the entries end/start on page boundaries?
  1215. ** Don't update vcontig_end until we've checked.
  1216. */
  1217. if (DMA_CONTIG(vcontig_end, vaddr))
  1218. {
  1219. vcontig_end = vcontig_len + vaddr;
  1220. dma_len += vcontig_len;
  1221. continue;
  1222. } else {
  1223. break;
  1224. }
  1225. }
  1226. /*
  1227. ** End of DMA Stream
  1228. ** Terminate last VCONTIG block.
  1229. ** Allocate space for DMA stream.
  1230. */
  1231. vcontig_sg->dma_length = vcontig_len;
  1232. dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
  1233. ASSERT(dma_len <= DMA_CHUNK_SIZE);
  1234. idx = sba_alloc_range(ioc, dev, dma_len);
  1235. if (idx < 0) {
  1236. dma_sg->dma_length = 0;
  1237. return -1;
  1238. }
  1239. dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
  1240. | dma_offset);
  1241. n_mappings++;
  1242. }
  1243. return n_mappings;
  1244. }
  1245. static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
  1246. int nents, enum dma_data_direction dir,
  1247. struct dma_attrs *attrs);
  1248. /**
  1249. * sba_map_sg - map Scatter/Gather list
  1250. * @dev: instance of PCI owned by the driver that's asking.
  1251. * @sglist: array of buffer/length pairs
  1252. * @nents: number of entries in list
  1253. * @dir: R/W or both.
  1254. * @attrs: optional dma attributes
  1255. *
  1256. * See Documentation/DMA-API-HOWTO.txt
  1257. */
  1258. static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
  1259. int nents, enum dma_data_direction dir,
  1260. struct dma_attrs *attrs)
  1261. {
  1262. struct ioc *ioc;
  1263. int coalesced, filled = 0;
  1264. #ifdef ASSERT_PDIR_SANITY
  1265. unsigned long flags;
  1266. #endif
  1267. #ifdef ALLOW_IOV_BYPASS_SG
  1268. struct scatterlist *sg;
  1269. #endif
  1270. DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
  1271. ioc = GET_IOC(dev);
  1272. ASSERT(ioc);
  1273. #ifdef ALLOW_IOV_BYPASS_SG
  1274. ASSERT(to_pci_dev(dev)->dma_mask);
  1275. if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
  1276. for_each_sg(sglist, sg, nents, filled) {
  1277. sg->dma_length = sg->length;
  1278. sg->dma_address = virt_to_phys(sba_sg_address(sg));
  1279. }
  1280. return filled;
  1281. }
  1282. #endif
  1283. /* Fast path single entry scatterlists. */
  1284. if (nents == 1) {
  1285. sglist->dma_length = sglist->length;
  1286. sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
  1287. return 1;
  1288. }
  1289. #ifdef ASSERT_PDIR_SANITY
  1290. spin_lock_irqsave(&ioc->res_lock, flags);
  1291. if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
  1292. {
  1293. sba_dump_sg(ioc, sglist, nents);
  1294. panic("Check before sba_map_sg_attrs()");
  1295. }
  1296. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1297. #endif
  1298. prefetch(ioc->res_hint);
  1299. /*
  1300. ** First coalesce the chunks and allocate I/O pdir space
  1301. **
  1302. ** If this is one DMA stream, we can properly map using the
  1303. ** correct virtual address associated with each DMA page.
  1304. ** w/o this association, we wouldn't have coherent DMA!
  1305. ** Access to the virtual address is what forces a two pass algorithm.
  1306. */
  1307. coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
  1308. if (coalesced < 0) {
  1309. sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
  1310. return 0;
  1311. }
  1312. /*
  1313. ** Program the I/O Pdir
  1314. **
  1315. ** map the virtual addresses to the I/O Pdir
  1316. ** o dma_address will contain the pdir index
  1317. ** o dma_len will contain the number of bytes to map
  1318. ** o address contains the virtual address.
  1319. */
  1320. filled = sba_fill_pdir(ioc, sglist, nents);
  1321. #ifdef ASSERT_PDIR_SANITY
  1322. spin_lock_irqsave(&ioc->res_lock, flags);
  1323. if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
  1324. {
  1325. sba_dump_sg(ioc, sglist, nents);
  1326. panic("Check after sba_map_sg_attrs()\n");
  1327. }
  1328. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1329. #endif
  1330. ASSERT(coalesced == filled);
  1331. DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
  1332. return filled;
  1333. }
  1334. /**
  1335. * sba_unmap_sg_attrs - unmap Scatter/Gather list
  1336. * @dev: instance of PCI owned by the driver that's asking.
  1337. * @sglist: array of buffer/length pairs
  1338. * @nents: number of entries in list
  1339. * @dir: R/W or both.
  1340. * @attrs: optional dma attributes
  1341. *
  1342. * See Documentation/DMA-API-HOWTO.txt
  1343. */
  1344. static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
  1345. int nents, enum dma_data_direction dir,
  1346. struct dma_attrs *attrs)
  1347. {
  1348. #ifdef ASSERT_PDIR_SANITY
  1349. struct ioc *ioc;
  1350. unsigned long flags;
  1351. #endif
  1352. DBG_RUN_SG("%s() START %d entries, %p,%x\n",
  1353. __func__, nents, sba_sg_address(sglist), sglist->length);
  1354. #ifdef ASSERT_PDIR_SANITY
  1355. ioc = GET_IOC(dev);
  1356. ASSERT(ioc);
  1357. spin_lock_irqsave(&ioc->res_lock, flags);
  1358. sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
  1359. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1360. #endif
  1361. while (nents && sglist->dma_length) {
  1362. sba_unmap_single_attrs(dev, sglist->dma_address,
  1363. sglist->dma_length, dir, attrs);
  1364. sglist = sg_next(sglist);
  1365. nents--;
  1366. }
  1367. DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
  1368. #ifdef ASSERT_PDIR_SANITY
  1369. spin_lock_irqsave(&ioc->res_lock, flags);
  1370. sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
  1371. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1372. #endif
  1373. }
  1374. /**************************************************************
  1375. *
  1376. * Initialization and claim
  1377. *
  1378. ***************************************************************/
  1379. static void
  1380. ioc_iova_init(struct ioc *ioc)
  1381. {
  1382. int tcnfg;
  1383. int agp_found = 0;
  1384. struct pci_dev *device = NULL;
  1385. #ifdef FULL_VALID_PDIR
  1386. unsigned long index;
  1387. #endif
  1388. /*
  1389. ** Firmware programs the base and size of a "safe IOVA space"
  1390. ** (one that doesn't overlap memory or LMMIO space) in the
  1391. ** IBASE and IMASK registers.
  1392. */
  1393. ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
  1394. ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
  1395. ioc->iov_size = ~ioc->imask + 1;
  1396. DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
  1397. __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
  1398. ioc->iov_size >> 20);
  1399. switch (iovp_size) {
  1400. case 4*1024: tcnfg = 0; break;
  1401. case 8*1024: tcnfg = 1; break;
  1402. case 16*1024: tcnfg = 2; break;
  1403. case 64*1024: tcnfg = 3; break;
  1404. default:
  1405. panic(PFX "Unsupported IOTLB page size %ldK",
  1406. iovp_size >> 10);
  1407. break;
  1408. }
  1409. WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
  1410. ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
  1411. ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
  1412. get_order(ioc->pdir_size));
  1413. if (!ioc->pdir_base)
  1414. panic(PFX "Couldn't allocate I/O Page Table\n");
  1415. memset(ioc->pdir_base, 0, ioc->pdir_size);
  1416. DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
  1417. iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
  1418. ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
  1419. WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
  1420. /*
  1421. ** If an AGP device is present, only use half of the IOV space
  1422. ** for PCI DMA. Unfortunately we can't know ahead of time
  1423. ** whether GART support will actually be used, for now we
  1424. ** can just key on an AGP device found in the system.
  1425. ** We program the next pdir index after we stop w/ a key for
  1426. ** the GART code to handshake on.
  1427. */
  1428. for_each_pci_dev(device)
  1429. agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
  1430. if (agp_found && reserve_sba_gart) {
  1431. printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
  1432. ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
  1433. ioc->pdir_size /= 2;
  1434. ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
  1435. }
  1436. #ifdef FULL_VALID_PDIR
  1437. /*
  1438. ** Check to see if the spill page has been allocated, we don't need more than
  1439. ** one across multiple SBAs.
  1440. */
  1441. if (!prefetch_spill_page) {
  1442. char *spill_poison = "SBAIOMMU POISON";
  1443. int poison_size = 16;
  1444. void *poison_addr, *addr;
  1445. addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
  1446. if (!addr)
  1447. panic(PFX "Couldn't allocate PDIR spill page\n");
  1448. poison_addr = addr;
  1449. for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
  1450. memcpy(poison_addr, spill_poison, poison_size);
  1451. prefetch_spill_page = virt_to_phys(addr);
  1452. DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
  1453. }
  1454. /*
  1455. ** Set all the PDIR entries valid w/ the spill page as the target
  1456. */
  1457. for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
  1458. ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
  1459. #endif
  1460. /* Clear I/O TLB of any possible entries */
  1461. WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
  1462. READ_REG(ioc->ioc_hpa + IOC_PCOM);
  1463. /* Enable IOVA translation */
  1464. WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
  1465. READ_REG(ioc->ioc_hpa + IOC_IBASE);
  1466. }
  1467. static void __init
  1468. ioc_resource_init(struct ioc *ioc)
  1469. {
  1470. spin_lock_init(&ioc->res_lock);
  1471. #if DELAYED_RESOURCE_CNT > 0
  1472. spin_lock_init(&ioc->saved_lock);
  1473. #endif
  1474. /* resource map size dictated by pdir_size */
  1475. ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
  1476. ioc->res_size >>= 3; /* convert bit count to byte count */
  1477. DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
  1478. ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
  1479. get_order(ioc->res_size));
  1480. if (!ioc->res_map)
  1481. panic(PFX "Couldn't allocate resource map\n");
  1482. memset(ioc->res_map, 0, ioc->res_size);
  1483. /* next available IOVP - circular search */
  1484. ioc->res_hint = (unsigned long *) ioc->res_map;
  1485. #ifdef ASSERT_PDIR_SANITY
  1486. /* Mark first bit busy - ie no IOVA 0 */
  1487. ioc->res_map[0] = 0x1;
  1488. ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
  1489. #endif
  1490. #ifdef FULL_VALID_PDIR
  1491. /* Mark the last resource used so we don't prefetch beyond IOVA space */
  1492. ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
  1493. ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
  1494. | prefetch_spill_page);
  1495. #endif
  1496. DBG_INIT("%s() res_map %x %p\n", __func__,
  1497. ioc->res_size, (void *) ioc->res_map);
  1498. }
  1499. static void __init
  1500. ioc_sac_init(struct ioc *ioc)
  1501. {
  1502. struct pci_dev *sac = NULL;
  1503. struct pci_controller *controller = NULL;
  1504. /*
  1505. * pci_alloc_coherent() must return a DMA address which is
  1506. * SAC (single address cycle) addressable, so allocate a
  1507. * pseudo-device to enforce that.
  1508. */
  1509. sac = kzalloc(sizeof(*sac), GFP_KERNEL);
  1510. if (!sac)
  1511. panic(PFX "Couldn't allocate struct pci_dev");
  1512. controller = kzalloc(sizeof(*controller), GFP_KERNEL);
  1513. if (!controller)
  1514. panic(PFX "Couldn't allocate struct pci_controller");
  1515. controller->iommu = ioc;
  1516. sac->sysdata = controller;
  1517. sac->dma_mask = 0xFFFFFFFFUL;
  1518. #ifdef CONFIG_PCI
  1519. sac->dev.bus = &pci_bus_type;
  1520. #endif
  1521. ioc->sac_only_dev = sac;
  1522. }
  1523. static void __init
  1524. ioc_zx1_init(struct ioc *ioc)
  1525. {
  1526. unsigned long rope_config;
  1527. unsigned int i;
  1528. if (ioc->rev < 0x20)
  1529. panic(PFX "IOC 2.0 or later required for IOMMU support\n");
  1530. /* 38 bit memory controller + extra bit for range displaced by MMIO */
  1531. ioc->dma_mask = (0x1UL << 39) - 1;
  1532. /*
  1533. ** Clear ROPE(N)_CONFIG AO bit.
  1534. ** Disables "NT Ordering" (~= !"Relaxed Ordering")
  1535. ** Overrides bit 1 in DMA Hint Sets.
  1536. ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
  1537. */
  1538. for (i=0; i<(8*8); i+=8) {
  1539. rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1540. rope_config &= ~IOC_ROPE_AO;
  1541. WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1542. }
  1543. }
  1544. typedef void (initfunc)(struct ioc *);
  1545. struct ioc_iommu {
  1546. u32 func_id;
  1547. char *name;
  1548. initfunc *init;
  1549. };
  1550. static struct ioc_iommu ioc_iommu_info[] __initdata = {
  1551. { ZX1_IOC_ID, "zx1", ioc_zx1_init },
  1552. { ZX2_IOC_ID, "zx2", NULL },
  1553. { SX1000_IOC_ID, "sx1000", NULL },
  1554. { SX2000_IOC_ID, "sx2000", NULL },
  1555. };
  1556. static void ioc_init(unsigned long hpa, struct ioc *ioc)
  1557. {
  1558. struct ioc_iommu *info;
  1559. ioc->next = ioc_list;
  1560. ioc_list = ioc;
  1561. ioc->ioc_hpa = ioremap(hpa, 0x1000);
  1562. ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
  1563. ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
  1564. ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
  1565. for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
  1566. if (ioc->func_id == info->func_id) {
  1567. ioc->name = info->name;
  1568. if (info->init)
  1569. (info->init)(ioc);
  1570. }
  1571. }
  1572. iovp_size = (1 << iovp_shift);
  1573. iovp_mask = ~(iovp_size - 1);
  1574. DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
  1575. PAGE_SIZE >> 10, iovp_size >> 10);
  1576. if (!ioc->name) {
  1577. ioc->name = kmalloc(24, GFP_KERNEL);
  1578. if (ioc->name)
  1579. sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
  1580. ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
  1581. else
  1582. ioc->name = "Unknown";
  1583. }
  1584. ioc_iova_init(ioc);
  1585. ioc_resource_init(ioc);
  1586. ioc_sac_init(ioc);
  1587. if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
  1588. ia64_max_iommu_merge_mask = ~iovp_mask;
  1589. printk(KERN_INFO PFX
  1590. "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
  1591. ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
  1592. hpa, ioc->iov_size >> 20, ioc->ibase);
  1593. }
  1594. /**************************************************************************
  1595. **
  1596. ** SBA initialization code (HW and SW)
  1597. **
  1598. ** o identify SBA chip itself
  1599. ** o FIXME: initialize DMA hints for reasonable defaults
  1600. **
  1601. **************************************************************************/
  1602. #ifdef CONFIG_PROC_FS
  1603. static void *
  1604. ioc_start(struct seq_file *s, loff_t *pos)
  1605. {
  1606. struct ioc *ioc;
  1607. loff_t n = *pos;
  1608. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1609. if (!n--)
  1610. return ioc;
  1611. return NULL;
  1612. }
  1613. static void *
  1614. ioc_next(struct seq_file *s, void *v, loff_t *pos)
  1615. {
  1616. struct ioc *ioc = v;
  1617. ++*pos;
  1618. return ioc->next;
  1619. }
  1620. static void
  1621. ioc_stop(struct seq_file *s, void *v)
  1622. {
  1623. }
  1624. static int
  1625. ioc_show(struct seq_file *s, void *v)
  1626. {
  1627. struct ioc *ioc = v;
  1628. unsigned long *res_ptr = (unsigned long *)ioc->res_map;
  1629. int i, used = 0;
  1630. seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
  1631. ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
  1632. #ifdef CONFIG_NUMA
  1633. if (ioc->node != NUMA_NO_NODE)
  1634. seq_printf(s, "NUMA node : %d\n", ioc->node);
  1635. #endif
  1636. seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
  1637. seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
  1638. for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
  1639. used += hweight64(*res_ptr);
  1640. seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
  1641. seq_printf(s, "PDIR used : %d entries\n", used);
  1642. #ifdef PDIR_SEARCH_TIMING
  1643. {
  1644. unsigned long i = 0, avg = 0, min, max;
  1645. min = max = ioc->avg_search[0];
  1646. for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
  1647. avg += ioc->avg_search[i];
  1648. if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
  1649. if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
  1650. }
  1651. avg /= SBA_SEARCH_SAMPLE;
  1652. seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
  1653. min, avg, max);
  1654. }
  1655. #endif
  1656. #ifndef ALLOW_IOV_BYPASS
  1657. seq_printf(s, "IOVA bypass disabled\n");
  1658. #endif
  1659. return 0;
  1660. }
  1661. static const struct seq_operations ioc_seq_ops = {
  1662. .start = ioc_start,
  1663. .next = ioc_next,
  1664. .stop = ioc_stop,
  1665. .show = ioc_show
  1666. };
  1667. static int
  1668. ioc_open(struct inode *inode, struct file *file)
  1669. {
  1670. return seq_open(file, &ioc_seq_ops);
  1671. }
  1672. static const struct file_operations ioc_fops = {
  1673. .open = ioc_open,
  1674. .read = seq_read,
  1675. .llseek = seq_lseek,
  1676. .release = seq_release
  1677. };
  1678. static void __init
  1679. ioc_proc_init(void)
  1680. {
  1681. struct proc_dir_entry *dir;
  1682. dir = proc_mkdir("bus/mckinley", NULL);
  1683. if (!dir)
  1684. return;
  1685. proc_create(ioc_list->name, 0, dir, &ioc_fops);
  1686. }
  1687. #endif
  1688. static void
  1689. sba_connect_bus(struct pci_bus *bus)
  1690. {
  1691. acpi_handle handle, parent;
  1692. acpi_status status;
  1693. struct ioc *ioc;
  1694. if (!PCI_CONTROLLER(bus))
  1695. panic(PFX "no sysdata on bus %d!\n", bus->number);
  1696. if (PCI_CONTROLLER(bus)->iommu)
  1697. return;
  1698. handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
  1699. if (!handle)
  1700. return;
  1701. /*
  1702. * The IOC scope encloses PCI root bridges in the ACPI
  1703. * namespace, so work our way out until we find an IOC we
  1704. * claimed previously.
  1705. */
  1706. do {
  1707. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1708. if (ioc->handle == handle) {
  1709. PCI_CONTROLLER(bus)->iommu = ioc;
  1710. return;
  1711. }
  1712. status = acpi_get_parent(handle, &parent);
  1713. handle = parent;
  1714. } while (ACPI_SUCCESS(status));
  1715. printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
  1716. }
  1717. static void __init
  1718. sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
  1719. {
  1720. #ifdef CONFIG_NUMA
  1721. unsigned int node;
  1722. node = acpi_get_node(handle);
  1723. if (node != NUMA_NO_NODE && !node_online(node))
  1724. node = NUMA_NO_NODE;
  1725. ioc->node = node;
  1726. #endif
  1727. }
  1728. static void acpi_sba_ioc_add(struct ioc *ioc)
  1729. {
  1730. acpi_handle handle = ioc->handle;
  1731. acpi_status status;
  1732. u64 hpa, length;
  1733. struct acpi_device_info *adi;
  1734. ioc_found = ioc->next;
  1735. status = hp_acpi_csr_space(handle, &hpa, &length);
  1736. if (ACPI_FAILURE(status))
  1737. goto err;
  1738. status = acpi_get_object_info(handle, &adi);
  1739. if (ACPI_FAILURE(status))
  1740. goto err;
  1741. /*
  1742. * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
  1743. * root bridges, and its CSR space includes the IOC function.
  1744. */
  1745. if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
  1746. hpa += ZX1_IOC_OFFSET;
  1747. /* zx1 based systems default to kernel page size iommu pages */
  1748. if (!iovp_shift)
  1749. iovp_shift = min(PAGE_SHIFT, 16);
  1750. }
  1751. kfree(adi);
  1752. /*
  1753. * default anything not caught above or specified on cmdline to 4k
  1754. * iommu page size
  1755. */
  1756. if (!iovp_shift)
  1757. iovp_shift = 12;
  1758. ioc_init(hpa, ioc);
  1759. /* setup NUMA node association */
  1760. sba_map_ioc_to_node(ioc, handle);
  1761. return;
  1762. err:
  1763. kfree(ioc);
  1764. }
  1765. static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
  1766. {"HWP0001", 0},
  1767. {"HWP0004", 0},
  1768. {"", 0},
  1769. };
  1770. static int acpi_sba_ioc_attach(struct acpi_device *device,
  1771. const struct acpi_device_id *not_used)
  1772. {
  1773. struct ioc *ioc;
  1774. ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
  1775. if (!ioc)
  1776. return -ENOMEM;
  1777. ioc->next = ioc_found;
  1778. ioc_found = ioc;
  1779. ioc->handle = device->handle;
  1780. return 1;
  1781. }
  1782. static struct acpi_scan_handler acpi_sba_ioc_handler = {
  1783. .ids = hp_ioc_iommu_device_ids,
  1784. .attach = acpi_sba_ioc_attach,
  1785. };
  1786. static int __init acpi_sba_ioc_init_acpi(void)
  1787. {
  1788. return acpi_scan_add_handler(&acpi_sba_ioc_handler);
  1789. }
  1790. /* This has to run before acpi_scan_init(). */
  1791. arch_initcall(acpi_sba_ioc_init_acpi);
  1792. extern struct dma_map_ops swiotlb_dma_ops;
  1793. static int __init
  1794. sba_init(void)
  1795. {
  1796. if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
  1797. return 0;
  1798. #if defined(CONFIG_IA64_GENERIC)
  1799. /* If we are booting a kdump kernel, the sba_iommu will
  1800. * cause devices that were not shutdown properly to MCA
  1801. * as soon as they are turned back on. Our only option for
  1802. * a successful kdump kernel boot is to use the swiotlb.
  1803. */
  1804. if (is_kdump_kernel()) {
  1805. dma_ops = &swiotlb_dma_ops;
  1806. if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
  1807. panic("Unable to initialize software I/O TLB:"
  1808. " Try machvec=dig boot option");
  1809. machvec_init("dig");
  1810. return 0;
  1811. }
  1812. #endif
  1813. /*
  1814. * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
  1815. * routine, but that only happens if acpi_scan_init() has already run.
  1816. */
  1817. while (ioc_found)
  1818. acpi_sba_ioc_add(ioc_found);
  1819. if (!ioc_list) {
  1820. #ifdef CONFIG_IA64_GENERIC
  1821. /*
  1822. * If we didn't find something sba_iommu can claim, we
  1823. * need to setup the swiotlb and switch to the dig machvec.
  1824. */
  1825. dma_ops = &swiotlb_dma_ops;
  1826. if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
  1827. panic("Unable to find SBA IOMMU or initialize "
  1828. "software I/O TLB: Try machvec=dig boot option");
  1829. machvec_init("dig");
  1830. #else
  1831. panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
  1832. #endif
  1833. return 0;
  1834. }
  1835. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
  1836. /*
  1837. * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
  1838. * buffer setup to support devices with smaller DMA masks than
  1839. * sba_iommu can handle.
  1840. */
  1841. if (ia64_platform_is("hpzx1_swiotlb")) {
  1842. extern void hwsw_init(void);
  1843. hwsw_init();
  1844. }
  1845. #endif
  1846. #ifdef CONFIG_PCI
  1847. {
  1848. struct pci_bus *b = NULL;
  1849. while ((b = pci_find_next_bus(b)) != NULL)
  1850. sba_connect_bus(b);
  1851. }
  1852. #endif
  1853. #ifdef CONFIG_PROC_FS
  1854. ioc_proc_init();
  1855. #endif
  1856. return 0;
  1857. }
  1858. subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
  1859. static int __init
  1860. nosbagart(char *str)
  1861. {
  1862. reserve_sba_gart = 0;
  1863. return 1;
  1864. }
  1865. static int sba_dma_supported (struct device *dev, u64 mask)
  1866. {
  1867. /* make sure it's at least 32bit capable */
  1868. return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
  1869. }
  1870. static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  1871. {
  1872. return 0;
  1873. }
  1874. __setup("nosbagart", nosbagart);
  1875. static int __init
  1876. sba_page_override(char *str)
  1877. {
  1878. unsigned long page_size;
  1879. page_size = memparse(str, &str);
  1880. switch (page_size) {
  1881. case 4096:
  1882. case 8192:
  1883. case 16384:
  1884. case 65536:
  1885. iovp_shift = ffs(page_size) - 1;
  1886. break;
  1887. default:
  1888. printk("%s: unknown/unsupported iommu page size %ld\n",
  1889. __func__, page_size);
  1890. }
  1891. return 1;
  1892. }
  1893. __setup("sbapagesize=",sba_page_override);
  1894. struct dma_map_ops sba_dma_ops = {
  1895. .alloc = sba_alloc_coherent,
  1896. .free = sba_free_coherent,
  1897. .map_page = sba_map_page,
  1898. .unmap_page = sba_unmap_page,
  1899. .map_sg = sba_map_sg_attrs,
  1900. .unmap_sg = sba_unmap_sg_attrs,
  1901. .sync_single_for_cpu = machvec_dma_sync_single,
  1902. .sync_sg_for_cpu = machvec_dma_sync_sg,
  1903. .sync_single_for_device = machvec_dma_sync_single,
  1904. .sync_sg_for_device = machvec_dma_sync_sg,
  1905. .dma_supported = sba_dma_supported,
  1906. .mapping_error = sba_dma_mapping_error,
  1907. };
  1908. void sba_dma_init(void)
  1909. {
  1910. dma_ops = &sba_dma_ops;
  1911. }