if_le_ioasic.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /* $OpenBSD: if_le_ioasic.c,v 1.17 2014/12/22 02:28:52 tedu Exp $ */
  2. /* $NetBSD: if_le_ioasic.c,v 1.18 2001/11/13 06:26:10 lukem Exp $ */
  3. /*
  4. * Copyright (c) 1996 Carnegie-Mellon University.
  5. * All rights reserved.
  6. *
  7. * Author: Chris G. Demetriou
  8. *
  9. * Permission to use, copy, modify and distribute this software and
  10. * its documentation is hereby granted, provided that both the copyright
  11. * notice and this permission notice appear in all copies of the
  12. * software, derivative works or modified versions, and any portions
  13. * thereof, and that both notices appear in supporting documentation.
  14. *
  15. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  16. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  17. * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  18. *
  19. * Carnegie Mellon requests users of this software to return to
  20. *
  21. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  22. * School of Computer Science
  23. * Carnegie Mellon University
  24. * Pittsburgh PA 15213-3890
  25. *
  26. * any improvements or extensions that they make and grant Carnegie the
  27. * rights to redistribute these changes.
  28. */
  29. /*
  30. * LANCE on DEC IOCTL ASIC.
  31. */
  32. #include <sys/param.h>
  33. #include <sys/systm.h>
  34. #include <sys/mbuf.h>
  35. #include <sys/syslog.h>
  36. #include <sys/socket.h>
  37. #include <sys/device.h>
  38. #include <net/if.h>
  39. #include <net/if_media.h>
  40. #include <netinet/in.h>
  41. #include <netinet/if_ether.h>
  42. #include <dev/ic/lancereg.h>
  43. #include <dev/ic/lancevar.h>
  44. #include <dev/ic/am7990reg.h>
  45. #include <dev/ic/am7990var.h>
  46. #include <dev/tc/if_levar.h>
  47. #include <dev/tc/tcvar.h>
  48. #include <dev/tc/ioasicreg.h>
  49. #include <dev/tc/ioasicvar.h>
  50. struct le_ioasic_softc {
  51. struct am7990_softc sc_am7990; /* glue to MI code */
  52. struct lereg1 *sc_r1; /* LANCE registers */
  53. /* XXX must match with le_softc of if_levar.h XXX */
  54. bus_dma_tag_t sc_dmat; /* bus dma tag */
  55. bus_dmamap_t sc_dmamap; /* bus dmamap */
  56. };
  57. int le_ioasic_match(struct device *, void *, void *);
  58. void le_ioasic_attach(struct device *, struct device *, void *);
  59. struct cfattach le_ioasic_ca = {
  60. sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach
  61. };
  62. void le_ioasic_copytobuf_gap2(struct lance_softc *, void *, int, int);
  63. void le_ioasic_copyfrombuf_gap2(struct lance_softc *, void *, int, int);
  64. void le_ioasic_copytobuf_gap16(struct lance_softc *, void *, int, int);
  65. void le_ioasic_copyfrombuf_gap16(struct lance_softc *, void *, int, int);
  66. void le_ioasic_zerobuf_gap16(struct lance_softc *, int, int);
  67. int
  68. le_ioasic_match(struct device *parent, void *match, void *aux)
  69. {
  70. struct ioasicdev_attach_args *d = aux;
  71. if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0)
  72. return 0;
  73. return 1;
  74. }
  75. /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
  76. #define LE_IOASIC_MEMSIZE (128*1024)
  77. #define LE_IOASIC_MEMALIGN (128*1024)
  78. void
  79. le_ioasic_attach(struct device *parent, struct device *self, void *aux)
  80. {
  81. struct le_ioasic_softc *sc = (void *)self;
  82. struct ioasicdev_attach_args *d = aux;
  83. struct lance_softc *le = &sc->sc_am7990.lsc;
  84. bus_space_tag_t ioasic_bst;
  85. bus_space_handle_t ioasic_bsh;
  86. bus_dma_tag_t dmat;
  87. bus_dma_segment_t seg;
  88. tc_addr_t tca;
  89. u_int32_t ssr;
  90. int rseg;
  91. caddr_t le_iomem;
  92. ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst;
  93. ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh;
  94. dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat;
  95. /*
  96. * Allocate a DMA area for the chip.
  97. */
  98. if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
  99. 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
  100. printf("can't allocate DMA area for LANCE\n");
  101. return;
  102. }
  103. if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
  104. &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
  105. printf("can't map DMA area for LANCE\n");
  106. bus_dmamem_free(dmat, &seg, rseg);
  107. return;
  108. }
  109. /*
  110. * Create and load the DMA map for the DMA area.
  111. */
  112. if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
  113. LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
  114. printf("can't create DMA map\n");
  115. goto bad;
  116. }
  117. if (bus_dmamap_load(dmat, sc->sc_dmamap,
  118. le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
  119. printf("can't load DMA map\n");
  120. goto bad;
  121. }
  122. /*
  123. * Bind 128KB buffer with IOASIC DMA.
  124. */
  125. tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr);
  126. bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca);
  127. ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR);
  128. ssr |= IOASIC_CSR_DMAEN_LANCE;
  129. bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr);
  130. sc->sc_r1 = (struct lereg1 *)
  131. TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
  132. le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
  133. le->sc_copytodesc = le_ioasic_copytobuf_gap2;
  134. le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
  135. le->sc_copytobuf = le_ioasic_copytobuf_gap16;
  136. le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
  137. le->sc_zerobuf = le_ioasic_zerobuf_gap16;
  138. dec_le_common_attach(&sc->sc_am7990,
  139. (u_char *)((struct ioasic_softc *)parent)->sc_base
  140. + IOASIC_SLOT_2_START);
  141. ioasic_intr_establish(parent, d->iada_cookie, IPL_NET,
  142. am7990_intr, sc, self->dv_xname);
  143. return;
  144. bad:
  145. bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
  146. bus_dmamem_free(dmat, &seg, rseg);
  147. }
  148. /*
  149. * Special memory access functions needed by ioasic-attached LANCE
  150. * chips.
  151. */
  152. /*
  153. * gap2: two bytes of data followed by two bytes of pad.
  154. *
  155. * Buffers must be 4-byte aligned. The code doesn't worry about
  156. * doing an extra byte.
  157. */
  158. void
  159. le_ioasic_copytobuf_gap2(struct lance_softc *sc, void *fromv,
  160. int boff, int len)
  161. {
  162. volatile caddr_t buf = sc->sc_mem;
  163. caddr_t from = fromv;
  164. volatile u_int16_t *bptr;
  165. if (boff & 0x1) {
  166. /* handle unaligned first byte */
  167. bptr = ((volatile u_int16_t *)buf) + (boff - 1);
  168. *bptr = (*from++ << 8) | (*bptr & 0xff);
  169. bptr += 2;
  170. len--;
  171. } else
  172. bptr = ((volatile u_int16_t *)buf) + boff;
  173. while (len > 1) {
  174. *bptr = (from[1] << 8) | (from[0] & 0xff);
  175. bptr += 2;
  176. from += 2;
  177. len -= 2;
  178. }
  179. if (len == 1)
  180. *bptr = (u_int16_t)*from;
  181. }
  182. void
  183. le_ioasic_copyfrombuf_gap2(struct lance_softc *sc, void *tov,
  184. int boff, int len)
  185. {
  186. volatile caddr_t buf = sc->sc_mem;
  187. caddr_t to = tov;
  188. volatile u_int16_t *bptr;
  189. u_int16_t tmp;
  190. if (boff & 0x1) {
  191. /* handle unaligned first byte */
  192. bptr = ((volatile u_int16_t *)buf) + (boff - 1);
  193. *to++ = (*bptr >> 8) & 0xff;
  194. bptr += 2;
  195. len--;
  196. } else
  197. bptr = ((volatile u_int16_t *)buf) + boff;
  198. while (len > 1) {
  199. tmp = *bptr;
  200. *to++ = tmp & 0xff;
  201. *to++ = (tmp >> 8) & 0xff;
  202. bptr += 2;
  203. len -= 2;
  204. }
  205. if (len == 1)
  206. *to = *bptr & 0xff;
  207. }
  208. /*
  209. * gap16: 16 bytes of data followed by 16 bytes of pad.
  210. *
  211. * Buffers must be 32-byte aligned.
  212. */
  213. void
  214. le_ioasic_copytobuf_gap16(struct lance_softc *sc, void *fromv,
  215. int boff, int len)
  216. {
  217. volatile caddr_t buf = sc->sc_mem;
  218. caddr_t from = fromv;
  219. caddr_t bptr;
  220. bptr = buf + ((boff << 1) & ~0x1f);
  221. boff &= 0xf;
  222. /*
  223. * Dispose of boff so destination of subsequent copies is
  224. * 16-byte aligned.
  225. */
  226. if (boff) {
  227. int xfer;
  228. xfer = min(len, 16 - boff);
  229. bcopy(from, bptr + boff, xfer);
  230. from += xfer;
  231. bptr += 32;
  232. len -= xfer;
  233. }
  234. /* Destination of copies is now 16-byte aligned. */
  235. if (len >= 16)
  236. switch ((u_long)from & (sizeof(u_int32_t) -1)) {
  237. case 2:
  238. /* Ethernet headers make this the dominant case. */
  239. do {
  240. u_int32_t *dst = (u_int32_t*)bptr;
  241. u_int16_t t0;
  242. u_int32_t t1, t2, t3, t4;
  243. /* read from odd-16-bit-aligned, cached src */
  244. t0 = *(u_int16_t*)from;
  245. t1 = *(u_int32_t*)(from+2);
  246. t2 = *(u_int32_t*)(from+6);
  247. t3 = *(u_int32_t*)(from+10);
  248. t4 = *(u_int16_t*)(from+14);
  249. /* DMA buffer is uncached on mips */
  250. dst[0] = t0 | (t1 << 16);
  251. dst[1] = (t1 >> 16) | (t2 << 16);
  252. dst[2] = (t2 >> 16) | (t3 << 16);
  253. dst[3] = (t3 >> 16) | (t4 << 16);
  254. from += 16;
  255. bptr += 32;
  256. len -= 16;
  257. } while (len >= 16);
  258. break;
  259. case 0:
  260. do {
  261. u_int32_t *src = (u_int32_t*)from;
  262. u_int32_t *dst = (u_int32_t*)bptr;
  263. u_int32_t t0, t1, t2, t3;
  264. t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
  265. dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
  266. from += 16;
  267. bptr += 32;
  268. len -= 16;
  269. } while (len >= 16);
  270. break;
  271. default:
  272. /* Does odd-aligned case ever happen? */
  273. do {
  274. bcopy(from, bptr, 16);
  275. from += 16;
  276. bptr += 32;
  277. len -= 16;
  278. } while (len >= 16);
  279. break;
  280. }
  281. if (len)
  282. bcopy(from, bptr, len);
  283. }
  284. void
  285. le_ioasic_copyfrombuf_gap16(struct lance_softc *sc, void *tov,
  286. int boff, int len)
  287. {
  288. volatile caddr_t buf = sc->sc_mem;
  289. caddr_t to = tov;
  290. caddr_t bptr;
  291. bptr = buf + ((boff << 1) & ~0x1f);
  292. boff &= 0xf;
  293. /* Dispose of boff. source of copy is subsequently 16-byte aligned. */
  294. if (boff) {
  295. int xfer;
  296. xfer = min(len, 16 - boff);
  297. bcopy(bptr+boff, to, xfer);
  298. to += xfer;
  299. bptr += 32;
  300. len -= xfer;
  301. }
  302. if (len >= 16)
  303. switch ((u_long)to & (sizeof(u_int32_t) -1)) {
  304. case 2:
  305. /*
  306. * to is aligned to an odd 16-bit boundary. Ethernet headers
  307. * make this the dominant case (98% or more).
  308. */
  309. do {
  310. u_int32_t *src = (u_int32_t*)bptr;
  311. u_int32_t t0, t1, t2, t3;
  312. /* read from uncached aligned DMA buf */
  313. t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
  314. /* write to odd-16-bit-word aligned dst */
  315. *(u_int16_t *) (to+0) = (u_short) t0;
  316. *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16);
  317. *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16);
  318. *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16);
  319. *(u_int16_t *) (to+14) = (t3 >> 16);
  320. bptr += 32;
  321. to += 16;
  322. len -= 16;
  323. } while (len > 16);
  324. break;
  325. case 0:
  326. /* 32-bit aligned aligned copy. Rare. */
  327. do {
  328. u_int32_t *src = (u_int32_t*)bptr;
  329. u_int32_t *dst = (u_int32_t*)to;
  330. u_int32_t t0, t1, t2, t3;
  331. t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
  332. dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
  333. to += 16;
  334. bptr += 32;
  335. len -= 16;
  336. } while (len > 16);
  337. break;
  338. /* XXX Does odd-byte-aligned case ever happen? */
  339. default:
  340. do {
  341. bcopy(bptr, to, 16);
  342. to += 16;
  343. bptr += 32;
  344. len -= 16;
  345. } while (len > 16);
  346. break;
  347. }
  348. if (len)
  349. bcopy(bptr, to, len);
  350. }
  351. void
  352. le_ioasic_zerobuf_gap16(struct lance_softc *sc, int boff, int len)
  353. {
  354. volatile caddr_t buf = sc->sc_mem;
  355. caddr_t bptr;
  356. int xfer;
  357. bptr = buf + ((boff << 1) & ~0x1f);
  358. boff &= 0xf;
  359. xfer = min(len, 16 - boff);
  360. while (len > 0) {
  361. bzero(bptr + boff, xfer);
  362. bptr += 32;
  363. boff = 0;
  364. len -= xfer;
  365. xfer = min(len, 16);
  366. }
  367. }