dma.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #ifndef _BRCM_DMA_H_
  17. #define _BRCM_DMA_H_
  18. #include <linux/delay.h>
  19. #include <linux/skbuff.h>
  20. #include "types.h" /* forward structure declarations */
  21. /* map/unmap direction */
  22. #define DMA_TX 1 /* TX direction for DMA */
  23. #define DMA_RX 2 /* RX direction for DMA */
  24. /* DMA structure:
  25. * support two DMA engines: 32 bits address or 64 bit addressing
  26. * basic DMA register set is per channel(transmit or receive)
  27. * a pair of channels is defined for convenience
  28. */
  29. /* 32 bits addressing */
  30. struct dma32diag { /* diag access */
  31. u32 fifoaddr; /* diag address */
  32. u32 fifodatalow; /* low 32bits of data */
  33. u32 fifodatahigh; /* high 32bits of data */
  34. u32 pad; /* reserved */
  35. };
  36. /* 64 bits addressing */
  37. /* dma registers per channel(xmt or rcv) */
  38. struct dma64regs {
  39. u32 control; /* enable, et al */
  40. u32 ptr; /* last descriptor posted to chip */
  41. u32 addrlow; /* desc ring base address low 32-bits (8K aligned) */
  42. u32 addrhigh; /* desc ring base address bits 63:32 (8K aligned) */
  43. u32 status0; /* current descriptor, xmt state */
  44. u32 status1; /* active descriptor, xmt error */
  45. };
  46. /* range param for dma_getnexttxp() and dma_txreclaim */
  47. enum txd_range {
  48. DMA_RANGE_ALL = 1,
  49. DMA_RANGE_TRANSMITTED,
  50. DMA_RANGE_TRANSFERED
  51. };
  52. /*
  53. * Exported data structure (read-only)
  54. */
  55. /* export structure */
  56. struct dma_pub {
  57. uint txavail; /* # free tx descriptors */
  58. uint dmactrlflags; /* dma control flags */
  59. /* rx error counters */
  60. uint rxgiants; /* rx giant frames */
  61. uint rxnobuf; /* rx out of dma descriptors */
  62. /* tx error counters */
  63. uint txnobuf; /* tx out of dma descriptors */
  64. };
  65. extern struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
  66. uint txregbase, uint rxregbase,
  67. uint ntxd, uint nrxd,
  68. uint rxbufsize, int rxextheadroom,
  69. uint nrxpost, uint rxoffset);
  70. void dma_rxinit(struct dma_pub *pub);
  71. int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
  72. bool dma_rxfill(struct dma_pub *pub);
  73. bool dma_rxreset(struct dma_pub *pub);
  74. bool dma_txreset(struct dma_pub *pub);
  75. void dma_txinit(struct dma_pub *pub);
  76. int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
  77. struct sk_buff *p0);
  78. void dma_txflush(struct dma_pub *pub);
  79. int dma_txpending(struct dma_pub *pub);
  80. void dma_kick_tx(struct dma_pub *pub);
  81. void dma_txsuspend(struct dma_pub *pub);
  82. bool dma_txsuspended(struct dma_pub *pub);
  83. void dma_txresume(struct dma_pub *pub);
  84. void dma_txreclaim(struct dma_pub *pub, enum txd_range range);
  85. void dma_rxreclaim(struct dma_pub *pub);
  86. void dma_detach(struct dma_pub *pub);
  87. unsigned long dma_getvar(struct dma_pub *pub, const char *name);
  88. struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
  89. void dma_counterreset(struct dma_pub *pub);
  90. void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
  91. (void *pkt, void *arg_a), void *arg_a);
  92. /*
  93. * DMA(Bug) on bcm47xx chips seems to declare that the packet is ready, but
  94. * the packet length is not updated yet (by DMA) on the expected time.
  95. * Workaround is to hold processor till DMA updates the length, and stay off
  96. * the bus to allow DMA update the length in buffer
  97. */
  98. static inline void dma_spin_for_len(uint len, struct sk_buff *head)
  99. {
  100. #if defined(CONFIG_BCM47XX)
  101. if (!len) {
  102. while (!(len = *(u16 *) KSEG1ADDR(head->data)))
  103. udelay(1);
  104. *(u16 *) (head->data) = cpu_to_le16((u16) len);
  105. }
  106. #endif /* defined(CONFIG_BCM47XX) */
  107. }
  108. #endif /* _BRCM_DMA_H_ */