bus_dma.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /* $OpenBSD: bus_dma.c,v 1.14 2014/11/16 12:30:58 deraadt Exp $ */
  2. /*
  3. * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. *
  14. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
  15. * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  16. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  17. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  18. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  19. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  20. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  21. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  22. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  23. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  24. * SUCH DAMAGE.
  25. *
  26. */
  27. /*-
  28. * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
  29. * All rights reserved.
  30. *
  31. * This code is derived from software contributed to The NetBSD Foundation
  32. * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
  33. * NASA Ames Research Center.
  34. *
  35. * Redistribution and use in source and binary forms, with or without
  36. * modification, are permitted provided that the following conditions
  37. * are met:
  38. * 1. Redistributions of source code must retain the above copyright
  39. * notice, this list of conditions and the following disclaimer.
  40. * 2. Redistributions in binary form must reproduce the above copyright
  41. * notice, this list of conditions and the following disclaimer in the
  42. * documentation and/or other materials provided with the distribution.
  43. *
  44. * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
  45. * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  47. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
  48. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  54. * POSSIBILITY OF SUCH DAMAGE.
  55. */
  56. #include <sys/param.h>
  57. #include <sys/systm.h>
  58. #include <sys/kernel.h>
  59. #include <sys/proc.h>
  60. #include <sys/malloc.h>
  61. #include <sys/mbuf.h>
  62. #include <uvm/uvm_extern.h>
  63. #include <mips64/cache.h>
  64. #include <machine/cpu.h>
  65. #include <machine/autoconf.h>
  66. #include <machine/bus.h>
  67. /*
  68. * Common function for DMA map creation. May be called by bus-specific
  69. * DMA map creation functions.
  70. */
  71. int
  72. _dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
  73. bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
  74. {
  75. struct machine_bus_dmamap *map;
  76. void *mapstore;
  77. size_t mapsize;
  78. /*
  79. * Allocate and initialize the DMA map. The end of the map
  80. * is a variable-sized array of segments, so we allocate enough
  81. * room for them in one shot.
  82. *
  83. * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
  84. * of ALLOCNOW notifies others that we've reserved these resources,
  85. * and they are not to be freed.
  86. *
  87. * The bus_dmamap_t includes one bus_dma_segment_t, hence
  88. * the (nsegments - 1).
  89. */
  90. mapsize = sizeof(struct machine_bus_dmamap) +
  91. (sizeof(bus_dma_segment_t) * (nsegments - 1));
  92. if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
  93. (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
  94. return (ENOMEM);
  95. map = (struct machine_bus_dmamap *)mapstore;
  96. map->_dm_size = size;
  97. map->_dm_segcnt = nsegments;
  98. map->_dm_maxsegsz = maxsegsz;
  99. map->_dm_boundary = boundary;
  100. map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
  101. *dmamp = map;
  102. return (0);
  103. }
  104. /*
  105. * Common function for DMA map destruction. May be called by bus-specific
  106. * DMA map destruction functions.
  107. */
  108. void
  109. _dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
  110. {
  111. free(map, M_DEVBUF, 0);
  112. }
  113. /*
  114. * Common function for loading a DMA map with a linear buffer. May
  115. * be called by bus-specific DMA map load functions.
  116. */
  117. int
  118. _dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
  119. struct proc *p, int flags)
  120. {
  121. paddr_t lastaddr;
  122. int seg, error;
  123. /*
  124. * Make sure that on error condition we return "no valid mappings".
  125. */
  126. map->dm_nsegs = 0;
  127. map->dm_mapsize = 0;
  128. if (buflen > map->_dm_size)
  129. return (EINVAL);
  130. seg = 0;
  131. error = (*t->_dmamap_load_buffer)(t, map, buf, buflen, p, flags,
  132. &lastaddr, &seg, 1);
  133. if (error == 0) {
  134. map->dm_nsegs = seg + 1;
  135. map->dm_mapsize = buflen;
  136. }
  137. return (error);
  138. }
  139. /*
  140. * Like _bus_dmamap_load(), but for mbufs.
  141. */
  142. int
  143. _dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags)
  144. {
  145. paddr_t lastaddr;
  146. int seg, error, first;
  147. struct mbuf *m;
  148. /*
  149. * Make sure that on error condition we return "no valid mappings".
  150. */
  151. map->dm_nsegs = 0;
  152. map->dm_mapsize = 0;
  153. #ifdef DIAGNOSTIC
  154. if ((m0->m_flags & M_PKTHDR) == 0)
  155. panic("_dmamap_load_mbuf: no packet header");
  156. #endif
  157. if (m0->m_pkthdr.len > map->_dm_size)
  158. return (EINVAL);
  159. first = 1;
  160. seg = 0;
  161. error = 0;
  162. for (m = m0; m != NULL && error == 0; m = m->m_next) {
  163. if (m->m_len == 0)
  164. continue;
  165. error = (*t->_dmamap_load_buffer)(t, map, m->m_data, m->m_len,
  166. NULL, flags, &lastaddr, &seg, first);
  167. first = 0;
  168. }
  169. if (error == 0) {
  170. map->dm_nsegs = seg + 1;
  171. map->dm_mapsize = m0->m_pkthdr.len;
  172. }
  173. return (error);
  174. }
  175. /*
  176. * Like _dmamap_load(), but for uios.
  177. */
  178. int
  179. _dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
  180. {
  181. paddr_t lastaddr;
  182. int seg, i, error, first;
  183. bus_size_t minlen, resid;
  184. struct proc *p = NULL;
  185. struct iovec *iov;
  186. void *addr;
  187. /*
  188. * Make sure that on error condition we return "no valid mappings".
  189. */
  190. map->dm_nsegs = 0;
  191. map->dm_mapsize = 0;
  192. resid = uio->uio_resid;
  193. iov = uio->uio_iov;
  194. if (uio->uio_segflg == UIO_USERSPACE) {
  195. p = uio->uio_procp;
  196. #ifdef DIAGNOSTIC
  197. if (p == NULL)
  198. panic("_dmamap_load_uio: USERSPACE but no proc");
  199. #endif
  200. }
  201. first = 1;
  202. seg = 0;
  203. error = 0;
  204. for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
  205. /*
  206. * Now at the first iovec to load. Load each iovec
  207. * until we have exhausted the residual count.
  208. */
  209. minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
  210. addr = (void *)iov[i].iov_base;
  211. error = (*t->_dmamap_load_buffer)(t, map, addr, minlen,
  212. p, flags, &lastaddr, &seg, first);
  213. first = 0;
  214. resid -= minlen;
  215. }
  216. if (error == 0) {
  217. map->dm_nsegs = seg + 1;
  218. map->dm_mapsize = uio->uio_resid;
  219. }
  220. return (error);
  221. }
  222. /*
  223. * Like _dmamap_load(), but for raw memory allocated with
  224. * bus_dmamem_alloc().
  225. */
  226. int
  227. _dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
  228. int nsegs, bus_size_t size, int flags)
  229. {
  230. if (nsegs > map->_dm_segcnt || size > map->_dm_size)
  231. return (EINVAL);
  232. /*
  233. * Make sure we don't cross any boundaries.
  234. */
  235. if (map->_dm_boundary) {
  236. bus_addr_t bmask = ~(map->_dm_boundary - 1);
  237. int i;
  238. if (t->_dma_mask != 0)
  239. bmask &= t->_dma_mask;
  240. for (i = 0; i < nsegs; i++) {
  241. if (segs[i].ds_len > map->_dm_maxsegsz)
  242. return (EINVAL);
  243. if ((segs[i].ds_addr & bmask) !=
  244. ((segs[i].ds_addr + segs[i].ds_len - 1) & bmask))
  245. return (EINVAL);
  246. }
  247. }
  248. bcopy(segs, map->dm_segs, nsegs * sizeof(*segs));
  249. map->dm_nsegs = nsegs;
  250. map->dm_mapsize = size;
  251. return (0);
  252. }
  253. /*
  254. * Common function for unloading a DMA map. May be called by
  255. * bus-specific DMA map unload functions.
  256. */
  257. void
  258. _dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
  259. {
  260. /*
  261. * No resources to free; just mark the mappings as
  262. * invalid.
  263. */
  264. map->dm_nsegs = 0;
  265. map->dm_mapsize = 0;
  266. }
  267. /*
  268. * Common function for DMA map synchronization. May be called
  269. * by bus-specific DMA map synchronization functions.
  270. */
  271. void
  272. _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
  273. bus_size_t size, int op)
  274. {
  275. int nsegs;
  276. int curseg;
  277. struct cpu_info *ci = curcpu();
  278. nsegs = map->dm_nsegs;
  279. curseg = 0;
  280. while (size && nsegs) {
  281. paddr_t paddr;
  282. vaddr_t vaddr;
  283. bus_size_t ssize;
  284. ssize = map->dm_segs[curseg].ds_len;
  285. paddr = map->dm_segs[curseg]._ds_paddr;
  286. vaddr = map->dm_segs[curseg]._ds_vaddr;
  287. if (addr != 0) {
  288. if (addr >= ssize) {
  289. addr -= ssize;
  290. ssize = 0;
  291. } else {
  292. vaddr += addr;
  293. paddr += addr;
  294. ssize -= addr;
  295. addr = 0;
  296. }
  297. }
  298. if (ssize > size)
  299. ssize = size;
  300. if (IS_XKPHYS(vaddr) && XKPHYS_TO_CCA(vaddr) == CCA_NC) {
  301. size -= ssize;
  302. ssize = 0;
  303. }
  304. if (ssize != 0) {
  305. /*
  306. * If only PREWRITE is requested, writeback.
  307. * PREWRITE with PREREAD writebacks
  308. * and invalidates (if noncoherent) *all* cache levels.
  309. * Otherwise, just invalidate (if noncoherent).
  310. */
  311. if (op & BUS_DMASYNC_PREWRITE) {
  312. if (op & BUS_DMASYNC_PREREAD)
  313. Mips_IOSyncDCache(ci, vaddr,
  314. ssize, CACHE_SYNC_X);
  315. else
  316. Mips_IOSyncDCache(ci, vaddr,
  317. ssize, CACHE_SYNC_W);
  318. } else
  319. if (op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD)) {
  320. Mips_IOSyncDCache(ci, vaddr,
  321. ssize, CACHE_SYNC_R);
  322. }
  323. size -= ssize;
  324. }
  325. curseg++;
  326. nsegs--;
  327. }
  328. if (size != 0) {
  329. panic("_dmamap_sync: ran off map!");
  330. }
  331. }
  332. /*
  333. * Common function for DMA-safe memory allocation. May be called
  334. * by bus-specific DMA memory allocation functions.
  335. */
  336. int
  337. _dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
  338. bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
  339. int flags)
  340. {
  341. return _dmamem_alloc_range(t, size, alignment, boundary,
  342. segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)-1);
  343. }
  344. /*
  345. * Common function for freeing DMA-safe memory. May be called by
  346. * bus-specific DMA memory free functions.
  347. */
  348. void
  349. _dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
  350. {
  351. vm_page_t m;
  352. bus_addr_t addr;
  353. struct pglist mlist;
  354. int curseg;
  355. /*
  356. * Build a list of pages to free back to the VM system.
  357. */
  358. TAILQ_INIT(&mlist);
  359. for (curseg = 0; curseg < nsegs; curseg++) {
  360. for (addr = segs[curseg].ds_addr;
  361. addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
  362. addr += PAGE_SIZE) {
  363. m = PHYS_TO_VM_PAGE((*t->_device_to_pa)(addr));
  364. TAILQ_INSERT_TAIL(&mlist, m, pageq);
  365. }
  366. }
  367. uvm_pglistfree(&mlist);
  368. }
  369. /*
  370. * Common function for mapping DMA-safe memory. May be called by
  371. * bus-specific DMA memory map functions.
  372. */
  373. int
  374. _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
  375. caddr_t *kvap, int flags)
  376. {
  377. vaddr_t va, sva;
  378. size_t ssize;
  379. paddr_t pa;
  380. bus_addr_t addr;
  381. int curseg, error, pmap_flags;
  382. const struct kmem_dyn_mode *kd;
  383. if (nsegs == 1) {
  384. pa = (*t->_device_to_pa)(segs[0].ds_addr);
  385. if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
  386. *kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
  387. else
  388. *kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
  389. return (0);
  390. }
  391. size = round_page(size);
  392. kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
  393. va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
  394. if (va == 0)
  395. return (ENOMEM);
  396. *kvap = (caddr_t)va;
  397. sva = va;
  398. ssize = size;
  399. pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
  400. if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
  401. pmap_flags |= PMAP_NOCACHE;
  402. for (curseg = 0; curseg < nsegs; curseg++) {
  403. for (addr = segs[curseg].ds_addr;
  404. addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
  405. addr += NBPG, va += NBPG, size -= NBPG) {
  406. if (size == 0)
  407. panic("_dmamem_map: size botch");
  408. pa = (*t->_device_to_pa)(addr);
  409. error = pmap_enter(pmap_kernel(), va, pa,
  410. PROT_READ | PROT_WRITE,
  411. PROT_READ | PROT_WRITE | pmap_flags);
  412. if (error) {
  413. pmap_update(pmap_kernel());
  414. km_free((void *)sva, ssize, &kv_any, &kp_none);
  415. return (error);
  416. }
  417. /*
  418. * This is redundant with what pmap_enter() did
  419. * above, but will take care of forcing other
  420. * mappings of the same page (if any) to be
  421. * uncached.
  422. * If there are no multiple mappings of that
  423. * page, this amounts to a noop.
  424. */
  425. if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
  426. pmap_page_cache(PHYS_TO_VM_PAGE(pa),
  427. PGF_UNCACHED);
  428. }
  429. pmap_update(pmap_kernel());
  430. }
  431. return (0);
  432. }
  433. /*
  434. * Common function for unmapping DMA-safe memory. May be called by
  435. * bus-specific DMA memory unmapping functions.
  436. */
  437. void
  438. _dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
  439. {
  440. if (IS_XKPHYS((vaddr_t)kva))
  441. return;
  442. km_free(kva, round_page(size), &kv_any, &kp_none);
  443. }
  444. /*
  445. * Common function for mmap(2)'ing DMA-safe memory. May be called by
  446. * bus-specific DMA mmap(2)'ing functions.
  447. */
  448. paddr_t
  449. _dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
  450. int prot, int flags)
  451. {
  452. int i;
  453. for (i = 0; i < nsegs; i++) {
  454. #ifdef DIAGNOSTIC
  455. if (off & PGOFSET)
  456. panic("_dmamem_mmap: offset unaligned");
  457. if (segs[i].ds_addr & PGOFSET)
  458. panic("_dmamem_mmap: segment unaligned");
  459. if (segs[i].ds_len & PGOFSET)
  460. panic("_dmamem_mmap: segment size not multiple"
  461. " of page size");
  462. #endif
  463. if (off >= segs[i].ds_len) {
  464. off -= segs[i].ds_len;
  465. continue;
  466. }
  467. return ((*t->_device_to_pa)(segs[i].ds_addr) + off);
  468. }
  469. /* Page not found. */
  470. return (-1);
  471. }
  472. /**********************************************************************
  473. * DMA utility functions
  474. **********************************************************************/
  475. /*
  476. * Utility function to load a linear buffer. lastaddrp holds state
  477. * between invocations (for multiple-buffer loads). segp contains
  478. * the starting segment on entrance, and the ending segment on exit.
  479. * first indicates if this is the first invocation of this function.
  480. */
  481. int
  482. _dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
  483. bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
  484. int *segp, int first)
  485. {
  486. bus_size_t sgsize;
  487. bus_addr_t lastaddr, baddr, bmask;
  488. paddr_t curaddr;
  489. vaddr_t vaddr = (vaddr_t)buf;
  490. int seg;
  491. pmap_t pmap;
  492. if (p != NULL)
  493. pmap = p->p_vmspace->vm_map.pmap;
  494. else
  495. pmap = pmap_kernel();
  496. lastaddr = *lastaddrp;
  497. bmask = ~(map->_dm_boundary - 1);
  498. if (t->_dma_mask != 0)
  499. bmask &= t->_dma_mask;
  500. for (seg = *segp; buflen > 0; ) {
  501. /*
  502. * Get the physical address for this segment.
  503. */
  504. if (pmap_extract(pmap, vaddr, &curaddr) == FALSE)
  505. panic("_dmapmap_load_buffer: pmap_extract(%p, %lx) failed!",
  506. pmap, vaddr);
  507. /*
  508. * Compute the segment size, and adjust counts.
  509. */
  510. sgsize = NBPG - ((u_long)vaddr & PGOFSET);
  511. if (buflen < sgsize)
  512. sgsize = buflen;
  513. /*
  514. * Make sure we don't cross any boundaries.
  515. */
  516. if (map->_dm_boundary > 0) {
  517. baddr = ((bus_addr_t)curaddr + map->_dm_boundary) &
  518. bmask;
  519. if (sgsize > (baddr - (bus_addr_t)curaddr))
  520. sgsize = (baddr - (bus_addr_t)curaddr);
  521. }
  522. /*
  523. * Insert chunk into a segment, coalescing with
  524. * previous segment if possible.
  525. */
  526. if (first) {
  527. map->dm_segs[seg].ds_addr =
  528. (*t->_pa_to_device)(curaddr);
  529. map->dm_segs[seg].ds_len = sgsize;
  530. map->dm_segs[seg]._ds_paddr = curaddr;
  531. map->dm_segs[seg]._ds_vaddr = vaddr;
  532. first = 0;
  533. } else {
  534. if ((bus_addr_t)curaddr == lastaddr &&
  535. (map->dm_segs[seg].ds_len + sgsize) <=
  536. map->_dm_maxsegsz &&
  537. (map->_dm_boundary == 0 ||
  538. (map->dm_segs[seg].ds_addr & bmask) ==
  539. ((bus_addr_t)curaddr & bmask)))
  540. map->dm_segs[seg].ds_len += sgsize;
  541. else {
  542. if (++seg >= map->_dm_segcnt)
  543. break;
  544. map->dm_segs[seg].ds_addr =
  545. (*t->_pa_to_device)(curaddr);
  546. map->dm_segs[seg].ds_len = sgsize;
  547. map->dm_segs[seg]._ds_paddr = curaddr;
  548. map->dm_segs[seg]._ds_vaddr = vaddr;
  549. }
  550. }
  551. lastaddr = (bus_addr_t)curaddr + sgsize;
  552. vaddr += sgsize;
  553. buflen -= sgsize;
  554. }
  555. *segp = seg;
  556. *lastaddrp = lastaddr;
  557. /*
  558. * Did we fit?
  559. */
  560. if (buflen != 0)
  561. return (EFBIG); /* XXX better return value here? */
  562. return (0);
  563. }
  564. /*
  565. * Allocate physical memory from the given physical address range.
  566. * Called by DMA-safe memory allocation methods.
  567. */
  568. int
  569. _dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
  570. bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
  571. int flags, paddr_t low, paddr_t high)
  572. {
  573. paddr_t curaddr, lastaddr;
  574. vm_page_t m;
  575. struct pglist mlist;
  576. int curseg, error, plaflag;
  577. /* Always round the size. */
  578. size = round_page(size);
  579. /*
  580. * Allocate pages from the VM system.
  581. */
  582. plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
  583. if (flags & BUS_DMA_ZERO)
  584. plaflag |= UVM_PLA_ZERO;
  585. TAILQ_INIT(&mlist);
  586. error = uvm_pglistalloc(size, low, high, alignment, boundary,
  587. &mlist, nsegs, plaflag);
  588. if (error)
  589. return (error);
  590. /*
  591. * Compute the location, size, and number of segments actually
  592. * returned by the VM code.
  593. */
  594. m = TAILQ_FIRST(&mlist);
  595. curseg = 0;
  596. lastaddr = segs[curseg].ds_addr =
  597. (*t->_pa_to_device)(VM_PAGE_TO_PHYS(m));
  598. segs[curseg].ds_len = PAGE_SIZE;
  599. m = TAILQ_NEXT(m, pageq);
  600. for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
  601. curaddr = VM_PAGE_TO_PHYS(m);
  602. #ifdef DIAGNOSTIC
  603. if (curaddr < low || curaddr >= high) {
  604. printf("vm_page_alloc_memory returned non-sensical"
  605. " address 0x%lx\n", curaddr);
  606. panic("_dmamem_alloc_range");
  607. }
  608. #endif
  609. curaddr = (*t->_pa_to_device)(curaddr);
  610. if (curaddr == (lastaddr + PAGE_SIZE))
  611. segs[curseg].ds_len += PAGE_SIZE;
  612. else {
  613. curseg++;
  614. segs[curseg].ds_addr = curaddr;
  615. segs[curseg].ds_len = PAGE_SIZE;
  616. }
  617. lastaddr = curaddr;
  618. }
  619. *rsegs = curseg + 1;
  620. return (0);
  621. }