123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261 |
- #ifndef _OCTEON_MAIN_H_
- #define _OCTEON_MAIN_H_
- #if BITS_PER_LONG == 32
- #define CVM_CAST64(v) ((long long)(v))
- #elif BITS_PER_LONG == 64
- #define CVM_CAST64(v) ((long long)(long)(v))
- #else
- #error "Unknown system architecture"
- #endif
- #define DRV_NAME "LiquidIO"
- struct octnet_buf_free_info {
-
- struct lio *lio;
-
- struct sk_buff *skb;
-
- struct octnic_gather *g;
-
- u64 dptr;
-
- struct octeon_soft_command *sc;
- };
- void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
- void octeon_update_tx_completion_counters(void *buf, int reqtype,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl);
- void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
- unsigned int bytes_compl);
- static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
- {
- while (blocks) {
- cpu_to_be64s(data);
- blocks--;
- data++;
- }
- }
- static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
- {
- dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
- baridx);
- if (oct->mmio[baridx].done)
- iounmap(oct->mmio[baridx].hw_addr);
- if (oct->mmio[baridx].start)
- pci_release_region(oct->pci_dev, baridx * 2);
- }
- static inline int octeon_map_pci_barx(struct octeon_device *oct,
- int baridx, int max_map_len)
- {
- u32 mapped_len = 0;
- if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
- dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
- baridx);
- return 1;
- }
- oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
- oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
- mapped_len = oct->mmio[baridx].len;
- if (!mapped_len)
- return 1;
- if (max_map_len && (mapped_len > max_map_len))
- mapped_len = max_map_len;
- oct->mmio[baridx].hw_addr =
- ioremap(oct->mmio[baridx].start, mapped_len);
- oct->mmio[baridx].mapped_len = mapped_len;
- dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
- baridx, oct->mmio[baridx].start, mapped_len,
- oct->mmio[baridx].len);
- if (!oct->mmio[baridx].hw_addr) {
- dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
- baridx);
- return 1;
- }
- oct->mmio[baridx].done = 1;
- return 0;
- }
- static inline void *
- cnnic_numa_alloc_aligned_dma(u32 size,
- u32 *alloc_size,
- size_t *orig_ptr,
- int numa_node)
- {
- int retries = 0;
- void *ptr = NULL;
- #define OCTEON_MAX_ALLOC_RETRIES 1
- do {
- struct page *page = NULL;
- page = alloc_pages_node(numa_node,
- GFP_KERNEL,
- get_order(size));
- if (!page)
- page = alloc_pages(GFP_KERNEL,
- get_order(size));
- ptr = (void *)page_address(page);
- if ((unsigned long)ptr & 0x07) {
- __free_pages(page, get_order(size));
- ptr = NULL;
-
- if (!retries)
- size += 7;
- }
- retries++;
- } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
- *alloc_size = size;
- *orig_ptr = (unsigned long)ptr;
- if ((unsigned long)ptr & 0x07)
- ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
- return ptr;
- }
- #define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
- free_pages(orig_ptr, get_order(size))
- static inline int
- sleep_cond(wait_queue_head_t *wait_queue, int *condition)
- {
- int errno = 0;
- wait_queue_t we;
- init_waitqueue_entry(&we, current);
- add_wait_queue(wait_queue, &we);
- while (!(READ_ONCE(*condition))) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (signal_pending(current)) {
- errno = -EINTR;
- goto out;
- }
- schedule();
- }
- out:
- set_current_state(TASK_RUNNING);
- remove_wait_queue(wait_queue, &we);
- return errno;
- }
- static inline void
- sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond)
- {
- wait_queue_t we;
- init_waitqueue_entry(&we, current);
- add_wait_queue(waitq, &we);
- while (!atomic_read(pcond)) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (signal_pending(current))
- goto out;
- schedule();
- }
- out:
- set_current_state(TASK_RUNNING);
- remove_wait_queue(waitq, &we);
- }
- static inline void
- sleep_timeout_cond(wait_queue_head_t *wait_queue,
- int *condition,
- int timeout)
- {
- wait_queue_t we;
- init_waitqueue_entry(&we, current);
- add_wait_queue(wait_queue, &we);
- set_current_state(TASK_INTERRUPTIBLE);
- if (!(*condition))
- schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(wait_queue, &we);
- }
- #ifndef ROUNDUP4
- #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
- #endif
- #ifndef ROUNDUP8
- #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
- #endif
- #ifndef ROUNDUP16
- #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
- #endif
- #ifndef ROUNDUP128
- #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
- #endif
- #endif
|