123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
- #include <linux/platform_device.h>
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/netdevice.h>
- #include <linux/etherdevice.h>
- #include <linux/phy.h>
- #include <linux/slab.h>
- #include <linux/interrupt.h>
- #include <linux/of_net.h>
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <net/dst.h>
- #include <asm/octeon/octeon.h>
- #include "ethernet-defines.h"
- #include "octeon-ethernet.h"
- #include "ethernet-mem.h"
- #include "ethernet-rx.h"
- #include "ethernet-tx.h"
- #include "ethernet-mdio.h"
- #include "ethernet-util.h"
- #include <asm/octeon/cvmx-pip.h>
- #include <asm/octeon/cvmx-pko.h>
- #include <asm/octeon/cvmx-fau.h>
- #include <asm/octeon/cvmx-ipd.h>
- #include <asm/octeon/cvmx-helper.h>
- #include <asm/octeon/cvmx-asxx-defs.h>
- #include <asm/octeon/cvmx-gmxx-defs.h>
- #include <asm/octeon/cvmx-smix-defs.h>
- #define OCTEON_MAX_MTU 65392
- static int num_packet_buffers = 1024;
- module_param(num_packet_buffers, int, 0444);
- MODULE_PARM_DESC(num_packet_buffers, "\n"
- "\tNumber of packet buffers to allocate and store in the\n"
- "\tFPA. By default, 1024 packet buffers are used.\n");
- static int pow_receive_group = 15;
- module_param(pow_receive_group, int, 0444);
- MODULE_PARM_DESC(pow_receive_group, "\n"
- "\tPOW group to receive packets from. All ethernet hardware\n"
- "\twill be configured to send incoming packets to this POW\n"
- "\tgroup. Also any other software can submit packets to this\n"
- "\tgroup for the kernel to process.");
- static int receive_group_order;
- module_param(receive_group_order, int, 0444);
- MODULE_PARM_DESC(receive_group_order, "\n"
- "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
- "\twill be configured to send incoming packets to multiple POW\n"
- "\tgroups. pow_receive_group parameter is ignored when multiple\n"
- "\tgroups are taken into use and groups are allocated starting\n"
- "\tfrom 0. By default, a single group is used.\n");
- int pow_send_group = -1;
- module_param(pow_send_group, int, 0644);
- MODULE_PARM_DESC(pow_send_group, "\n"
- "\tPOW group to send packets to other software on. This\n"
- "\tcontrols the creation of the virtual device pow0.\n"
- "\talways_use_pow also depends on this value.");
- int always_use_pow;
- module_param(always_use_pow, int, 0444);
- MODULE_PARM_DESC(always_use_pow, "\n"
- "\tWhen set, always send to the pow group. This will cause\n"
- "\tpackets sent to real ethernet devices to be sent to the\n"
- "\tPOW group instead of the hardware. Unless some other\n"
- "\tapplication changes the config, packets will still be\n"
- "\treceived from the low level hardware. Use this option\n"
- "\tto allow a CVMX app to intercept all packets from the\n"
- "\tlinux kernel. You must specify pow_send_group along with\n"
- "\tthis option.");
- char pow_send_list[128] = "";
- module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
- MODULE_PARM_DESC(pow_send_list, "\n"
- "\tComma separated list of ethernet devices that should use the\n"
- "\tPOW for transmit instead of the actual ethernet hardware. This\n"
- "\tis a per port version of always_use_pow. always_use_pow takes\n"
- "\tprecedence over this list. For example, setting this to\n"
- "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
- "\tusing the pow_send_group.");
- int rx_napi_weight = 32;
- module_param(rx_napi_weight, int, 0444);
- MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
- /* Mask indicating which receive groups are in use. */
- int pow_receive_groups;
- /*
- * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
- *
- * Set to one right before cvm_oct_poll_queue is destroyed.
- */
- atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
- /*
- * Array of every ethernet device owned by this driver indexed by
- * the ipd input port number.
- */
- struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
- u64 cvm_oct_tx_poll_interval;
- static void cvm_oct_rx_refill_worker(struct work_struct *work);
- static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
- static void cvm_oct_rx_refill_worker(struct work_struct *work)
- {
- /*
- * FPA 0 may have been drained, try to refill it if we need
- * more than num_packet_buffers / 2, otherwise normal receive
- * processing will refill it. If it were drained, no packets
- * could be received so cvm_oct_napi_poll would never be
- * invoked to do the refill.
- */
- cvm_oct_rx_refill_pool(num_packet_buffers / 2);
- if (!atomic_read(&cvm_oct_poll_queue_stopping))
- schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
- }
- static void cvm_oct_periodic_worker(struct work_struct *work)
- {
- struct octeon_ethernet *priv = container_of(work,
- struct octeon_ethernet,
- port_periodic_work.work);
- if (priv->poll)
- priv->poll(cvm_oct_device[priv->port]);
- cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
- cvm_oct_device[priv->port]);
- if (!atomic_read(&cvm_oct_poll_queue_stopping))
- schedule_delayed_work(&priv->port_periodic_work, HZ);
- }
- static void cvm_oct_configure_common_hw(void)
- {
- /* Setup the FPA */
- cvmx_fpa_enable();
- cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
- num_packet_buffers);
- cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
- num_packet_buffers);
- if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
- cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
- #ifdef __LITTLE_ENDIAN
- {
- union cvmx_ipd_ctl_status ipd_ctl_status;
- ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- ipd_ctl_status.s.pkt_lend = 1;
- ipd_ctl_status.s.wqe_lend = 1;
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
- }
- #endif
- cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
- }
- /**
- * cvm_oct_free_work- Free a work queue entry
- *
- * @work_queue_entry: Work queue entry to free
- *
- * Returns Zero on success, Negative on failure.
- */
- int cvm_oct_free_work(void *work_queue_entry)
- {
- cvmx_wqe_t *work = work_queue_entry;
- int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr = work->packet_ptr;
- while (segments--) {
- union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
- cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
- if (unlikely(!segment_ptr.s.i))
- cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
- segment_ptr.s.pool,
- CVMX_FPA_PACKET_POOL_SIZE / 128);
- segment_ptr = next_ptr;
- }
- cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
- return 0;
- }
- EXPORT_SYMBOL(cvm_oct_free_work);
- /**
- * cvm_oct_common_get_stats - get the low level ethernet statistics
- * @dev: Device to get the statistics from
- *
- * Returns Pointer to the statistics
- */
- static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
- {
- cvmx_pip_port_status_t rx_status;
- cvmx_pko_port_status_t tx_status;
- struct octeon_ethernet *priv = netdev_priv(dev);
- if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
- if (octeon_is_simulation()) {
- /* The simulator doesn't support statistics */
- memset(&rx_status, 0, sizeof(rx_status));
- memset(&tx_status, 0, sizeof(tx_status));
- } else {
- cvmx_pip_get_port_status(priv->port, 1, &rx_status);
- cvmx_pko_get_port_status(priv->port, 1, &tx_status);
- }
- dev->stats.rx_packets += rx_status.inb_packets;
- dev->stats.tx_packets += tx_status.packets;
- dev->stats.rx_bytes += rx_status.inb_octets;
- dev->stats.tx_bytes += tx_status.octets;
- dev->stats.multicast += rx_status.multicast_packets;
- dev->stats.rx_crc_errors += rx_status.inb_errors;
- dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
- dev->stats.rx_dropped += rx_status.dropped_packets;
- }
- return &dev->stats;
- }
- /**
- * cvm_oct_common_change_mtu - change the link MTU
- * @dev: Device to change
- * @new_mtu: The new MTU
- *
- * Returns Zero on success
- */
- static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
- {
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- #if IS_ENABLED(CONFIG_VLAN_8021Q)
- int vlan_bytes = VLAN_HLEN;
- #else
- int vlan_bytes = 0;
- #endif
- int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
- dev->mtu = new_mtu;
- if ((interface < 2) &&
- (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- int index = INDEX(priv->port);
- /* Add ethernet header and FCS, and VLAN if configured. */
- int max_packet = new_mtu + mtu_overhead;
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
- OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Signal errors on packets larger than the MTU */
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
- max_packet);
- } else {
- /*
- * Set the hardware to truncate packets larger
- * than the MTU and smaller the 64 bytes.
- */
- union cvmx_pip_frm_len_chkx frm_len_chk;
- frm_len_chk.u64 = 0;
- frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
- frm_len_chk.s.maxlen = max_packet;
- cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
- frm_len_chk.u64);
- }
- /*
- * Set the hardware to truncate packets larger than
- * the MTU. The jabber register must be set to a
- * multiple of 8 bytes, so round up.
- */
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
- (max_packet + 7) & ~7u);
- }
- return 0;
- }
- /**
- * cvm_oct_common_set_multicast_list - set the multicast list
- * @dev: Device to work on
- */
- static void cvm_oct_common_set_multicast_list(struct net_device *dev)
- {
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- if ((interface < 2) &&
- (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- union cvmx_gmxx_rxx_adr_ctl control;
- int index = INDEX(priv->port);
- control.u64 = 0;
- control.s.bcst = 1; /* Allow broadcast MAC addresses */
- if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
- (dev->flags & IFF_PROMISC))
- /* Force accept multicast packets */
- control.s.mcst = 2;
- else
- /* Force reject multicast packets */
- control.s.mcst = 1;
- if (dev->flags & IFF_PROMISC)
- /*
- * Reject matches if promisc. Since CAM is
- * shut off, should accept everything.
- */
- control.s.cam_mode = 0;
- else
- /* Filter packets based on the CAM */
- control.s.cam_mode = 1;
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
- control.u64);
- if (dev->flags & IFF_PROMISC)
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 0);
- else
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 1);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
- }
- static int cvm_oct_set_mac_filter(struct net_device *dev)
- {
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int interface = INTERFACE(priv->port);
- if ((interface < 2) &&
- (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- int i;
- u8 *ptr = dev->dev_addr;
- u64 mac = 0;
- int index = INDEX(priv->port);
- for (i = 0; i < 6; i++)
- mac = (mac << 8) | (u64)ptr[i];
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
- cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
- ptr[0]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
- ptr[1]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
- ptr[2]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
- ptr[3]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
- ptr[4]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
- ptr[5]);
- cvm_oct_common_set_multicast_list(dev);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
- return 0;
- }
- /**
- * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
- * @dev: The device in question.
- * @addr: Socket address.
- *
- * Returns Zero on success
- */
- static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
- {
- int r = eth_mac_addr(dev, addr);
- if (r)
- return r;
- return cvm_oct_set_mac_filter(dev);
- }
- /**
- * cvm_oct_common_init - per network device initialization
- * @dev: Device to initialize
- *
- * Returns Zero on success
- */
- int cvm_oct_common_init(struct net_device *dev)
- {
- struct octeon_ethernet *priv = netdev_priv(dev);
- const u8 *mac = NULL;
- if (priv->of_node)
- mac = of_get_mac_address(priv->of_node);
- if (mac)
- ether_addr_copy(dev->dev_addr, mac);
- else
- eth_hw_addr_random(dev);
- /*
- * Force the interface to use the POW send if always_use_pow
- * was specified or it is in the pow send list.
- */
- if ((pow_send_group != -1) &&
- (always_use_pow || strstr(pow_send_list, dev->name)))
- priv->queue = -1;
- if (priv->queue != -1)
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- /* We do our own locking, Linux doesn't need to */
- dev->features |= NETIF_F_LLTX;
- dev->ethtool_ops = &cvm_oct_ethtool_ops;
- cvm_oct_set_mac_filter(dev);
- dev_set_mtu(dev, dev->mtu);
- /*
- * Zero out stats for port so we won't mistakenly show
- * counters from the bootloader.
- */
- memset(dev->netdev_ops->ndo_get_stats(dev), 0,
- sizeof(struct net_device_stats));
- if (dev->netdev_ops->ndo_stop)
- dev->netdev_ops->ndo_stop(dev);
- return 0;
- }
- void cvm_oct_common_uninit(struct net_device *dev)
- {
- if (dev->phydev)
- phy_disconnect(dev->phydev);
- }
- int cvm_oct_common_open(struct net_device *dev,
- void (*link_poll)(struct net_device *))
- {
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
- cvmx_helper_link_info_t link_info;
- int rv;
- rv = cvm_oct_phy_setup_device(dev);
- if (rv)
- return rv;
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- gmx_cfg.s.en = 1;
- if (octeon_has_feature(OCTEON_FEATURE_PKND))
- gmx_cfg.s.pknd = priv->port;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
- if (octeon_is_simulation())
- return 0;
- if (dev->phydev) {
- int r = phy_read_status(dev->phydev);
- if (r == 0 && dev->phydev->link == 0)
- netif_carrier_off(dev);
- cvm_oct_adjust_link(dev);
- } else {
- link_info = cvmx_helper_link_get(priv->port);
- if (!link_info.s.link_up)
- netif_carrier_off(dev);
- priv->poll = link_poll;
- link_poll(dev);
- }
- return 0;
- }
- void cvm_oct_link_poll(struct net_device *dev)
- {
- struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
- link_info = cvmx_helper_link_get(priv->port);
- if (link_info.u64 == priv->link_info)
- return;
- if (cvmx_helper_link_set(priv->port, link_info))
- link_info.u64 = priv->link_info;
- else
- priv->link_info = link_info.u64;
- if (link_info.s.link_up) {
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
- } else if (netif_carrier_ok(dev)) {
- netif_carrier_off(dev);
- }
- cvm_oct_note_carrier(priv, link_info);
- }
- static int cvm_oct_xaui_open(struct net_device *dev)
- {
- return cvm_oct_common_open(dev, cvm_oct_link_poll);
- }
- static const struct net_device_ops cvm_oct_npi_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
- #endif
- };
- static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_open = cvm_oct_xaui_open,
- .ndo_stop = cvm_oct_common_stop,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
- #endif
- };
- static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
- .ndo_init = cvm_oct_sgmii_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_open = cvm_oct_sgmii_open,
- .ndo_stop = cvm_oct_common_stop,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
- #endif
- };
- static const struct net_device_ops cvm_oct_spi_netdev_ops = {
- .ndo_init = cvm_oct_spi_init,
- .ndo_uninit = cvm_oct_spi_uninit,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
- #endif
- };
- static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_open = cvm_oct_rgmii_open,
- .ndo_stop = cvm_oct_common_stop,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
- #endif
- };
- static const struct net_device_ops cvm_oct_pow_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_start_xmit = cvm_oct_xmit_pow,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
- #endif
- };
- static struct device_node *cvm_oct_of_get_child(
- const struct device_node *parent, int reg_val)
- {
- struct device_node *node = NULL;
- int size;
- const __be32 *addr;
- for (;;) {
- node = of_get_next_child(parent, node);
- if (!node)
- break;
- addr = of_get_property(node, "reg", &size);
- if (addr && (be32_to_cpu(*addr) == reg_val))
- break;
- }
- return node;
- }
- static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
- int interface, int port)
- {
- struct device_node *ni, *np;
- ni = cvm_oct_of_get_child(pip, interface);
- if (!ni)
- return NULL;
- np = cvm_oct_of_get_child(ni, port);
- of_node_put(ni);
- return np;
- }
- static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
- {
- u32 delay_value;
- if (!of_property_read_u32(np, "rx-delay", &delay_value))
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
- if (!of_property_read_u32(np, "tx-delay", &delay_value))
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
- }
- static int cvm_oct_probe(struct platform_device *pdev)
- {
- int num_interfaces;
- int interface;
- int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
- int qos;
- struct device_node *pip;
- int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
- #if IS_ENABLED(CONFIG_VLAN_8021Q)
- mtu_overhead += VLAN_HLEN;
- #endif
- octeon_mdiobus_force_mod_depencency();
- pip = pdev->dev.of_node;
- if (!pip) {
- pr_err("Error: No 'pip' in /aliases\n");
- return -EINVAL;
- }
- cvm_oct_configure_common_hw();
- cvmx_helper_initialize_packet_io_global();
- if (receive_group_order) {
- if (receive_group_order > 4)
- receive_group_order = 4;
- pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
- } else {
- pow_receive_groups = BIT(pow_receive_group);
- }
- /* Change the input group for all ports before input is enabled */
- num_interfaces = cvmx_helper_get_number_of_interfaces();
- for (interface = 0; interface < num_interfaces; interface++) {
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
- for (port = cvmx_helper_get_ipd_port(interface, 0);
- port < cvmx_helper_get_ipd_port(interface, num_ports);
- port++) {
- union cvmx_pip_prt_tagx pip_prt_tagx;
- pip_prt_tagx.u64 =
- cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
- if (receive_group_order) {
- int tag_mask;
- /* We support only 16 groups at the moment, so
- * always disable the two additional "hidden"
- * tag_mask bits on CN68XX.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX))
- pip_prt_tagx.u64 |= 0x3ull << 44;
- tag_mask = ~((1 << receive_group_order) - 1);
- pip_prt_tagx.s.grptagbase = 0;
- pip_prt_tagx.s.grptagmask = tag_mask;
- pip_prt_tagx.s.grptag = 1;
- pip_prt_tagx.s.tag_mode = 0;
- pip_prt_tagx.s.inc_prt_flag = 1;
- pip_prt_tagx.s.ip6_dprt_flag = 1;
- pip_prt_tagx.s.ip4_dprt_flag = 1;
- pip_prt_tagx.s.ip6_sprt_flag = 1;
- pip_prt_tagx.s.ip4_sprt_flag = 1;
- pip_prt_tagx.s.ip6_dst_flag = 1;
- pip_prt_tagx.s.ip4_dst_flag = 1;
- pip_prt_tagx.s.ip6_src_flag = 1;
- pip_prt_tagx.s.ip4_src_flag = 1;
- pip_prt_tagx.s.grp = 0;
- } else {
- pip_prt_tagx.s.grptag = 0;
- pip_prt_tagx.s.grp = pow_receive_group;
- }
- cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
- pip_prt_tagx.u64);
- }
- }
- cvmx_helper_ipd_and_packet_input_enable();
- memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
- /*
- * Initialize the FAU used for counting packet buffers that
- * need to be freed.
- */
- cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
- /* Initialize the FAU used for counting tx SKBs that need to be freed */
- cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
- if ((pow_send_group != -1)) {
- struct net_device *dev;
- dev = alloc_etherdev(sizeof(struct octeon_ethernet));
- if (dev) {
- /* Initialize the device private structure. */
- struct octeon_ethernet *priv = netdev_priv(dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
- dev->netdev_ops = &cvm_oct_pow_netdev_ops;
- priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
- priv->port = CVMX_PIP_NUM_INPUT_PORTS;
- priv->queue = -1;
- strcpy(dev->name, "pow%d");
- for (qos = 0; qos < 16; qos++)
- skb_queue_head_init(&priv->tx_free_list[qos]);
- dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
- dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
- if (register_netdev(dev) < 0) {
- pr_err("Failed to register ethernet device for POW\n");
- free_netdev(dev);
- } else {
- cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
- pr_info("%s: POW send group %d, receive group %d\n",
- dev->name, pow_send_group,
- pow_receive_group);
- }
- } else {
- pr_err("Failed to allocate ethernet device for POW\n");
- }
- }
- num_interfaces = cvmx_helper_get_number_of_interfaces();
- for (interface = 0; interface < num_interfaces; interface++) {
- cvmx_helper_interface_mode_t imode =
- cvmx_helper_interface_get_mode(interface);
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
- int port_index;
- for (port_index = 0,
- port = cvmx_helper_get_ipd_port(interface, 0);
- port < cvmx_helper_get_ipd_port(interface, num_ports);
- port_index++, port++) {
- struct octeon_ethernet *priv;
- struct net_device *dev =
- alloc_etherdev(sizeof(struct octeon_ethernet));
- if (!dev) {
- pr_err("Failed to allocate ethernet device for port %d\n",
- port);
- continue;
- }
- /* Initialize the device private structure. */
- SET_NETDEV_DEV(dev, &pdev->dev);
- priv = netdev_priv(dev);
- priv->netdev = dev;
- priv->of_node = cvm_oct_node_for_port(pip, interface,
- port_index);
- INIT_DELAYED_WORK(&priv->port_periodic_work,
- cvm_oct_periodic_worker);
- priv->imode = imode;
- priv->port = port;
- priv->queue = cvmx_pko_get_base_queue(priv->port);
- priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
- for (qos = 0; qos < 16; qos++)
- skb_queue_head_init(&priv->tx_free_list[qos]);
- for (qos = 0; qos < cvmx_pko_get_num_queues(port);
- qos++)
- cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
- dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
- dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
- switch (priv->imode) {
- /* These types don't support ports to IPD/PKO */
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- break;
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strcpy(dev->name, "npi%d");
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
- strcpy(dev->name, "xaui%d");
- break;
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strcpy(dev->name, "loop%d");
- break;
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
- strcpy(dev->name, "eth%d");
- break;
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- dev->netdev_ops = &cvm_oct_spi_netdev_ops;
- strcpy(dev->name, "spi%d");
- break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
- strcpy(dev->name, "eth%d");
- cvm_set_rgmii_delay(priv->of_node, interface,
- port_index);
- break;
- }
- if (!dev->netdev_ops) {
- free_netdev(dev);
- } else if (register_netdev(dev) < 0) {
- pr_err("Failed to register ethernet device for interface %d, port %d\n",
- interface, priv->port);
- free_netdev(dev);
- } else {
- cvm_oct_device[priv->port] = dev;
- fau -=
- cvmx_pko_get_num_queues(priv->port) *
- sizeof(u32);
- schedule_delayed_work(&priv->port_periodic_work,
- HZ);
- }
- }
- }
- cvm_oct_tx_initialize();
- cvm_oct_rx_initialize();
- /*
- * 150 uS: about 10 1500-byte packets at 1GE.
- */
- cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
- schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
- return 0;
- }
- static int cvm_oct_remove(struct platform_device *pdev)
- {
- int port;
- cvmx_ipd_disable();
- atomic_inc_return(&cvm_oct_poll_queue_stopping);
- cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
- cvm_oct_rx_shutdown();
- cvm_oct_tx_shutdown();
- cvmx_pko_disable();
- /* Free the ethernet devices */
- for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
- if (cvm_oct_device[port]) {
- struct net_device *dev = cvm_oct_device[port];
- struct octeon_ethernet *priv = netdev_priv(dev);
- cancel_delayed_work_sync(&priv->port_periodic_work);
- cvm_oct_tx_shutdown_dev(dev);
- unregister_netdev(dev);
- free_netdev(dev);
- cvm_oct_device[port] = NULL;
- }
- }
- cvmx_pko_shutdown();
- cvmx_ipd_free_ptr();
- /* Free the HW pools */
- cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
- num_packet_buffers);
- cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
- num_packet_buffers);
- if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
- cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
- return 0;
- }
- static const struct of_device_id cvm_oct_match[] = {
- {
- .compatible = "cavium,octeon-3860-pip",
- },
- {},
- };
- MODULE_DEVICE_TABLE(of, cvm_oct_match);
- static struct platform_driver cvm_oct_driver = {
- .probe = cvm_oct_probe,
- .remove = cvm_oct_remove,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = cvm_oct_match,
- },
- };
- module_platform_driver(cvm_oct_driver);
- MODULE_LICENSE("GPL");
- MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
- MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
|