if_cgem: Add support for ZynqMP CGEM

This is a port of the latest patch in FreeBSD that adds support for
64bit CGEMs as used in ZynqMP. This does not include the work necessary
for support on RISC-V systems.
This commit is contained in:
Kinsey Moore 2021-03-08 10:09:48 -06:00 committed by Joel Sherrill
parent 0c60fe524e
commit b7a12cc8f9
2 changed files with 284 additions and 2 deletions

View File

@ -81,6 +81,10 @@ __FBSDID("$FreeBSD$");
#include <dev/mii/mii.h> #include <dev/mii/mii.h>
#include <dev/mii/miivar.h> #include <dev/mii/miivar.h>
#if defined(__LP64__) || defined(__ILP32__)
#define CGEM64
#endif
#include <dev/cadence/if_cgem_hw.h> #include <dev/cadence/if_cgem_hw.h>
#include <rtems/bsd/local/miibus_if.h> #include <rtems/bsd/local/miibus_if.h>
@ -127,8 +131,14 @@ struct cgem_softc {
void *intrhand; void *intrhand;
struct callout tick_ch; struct callout tick_ch;
uint32_t net_ctl_shadow; uint32_t net_ctl_shadow;
#ifdef __rtems__
uint32_t net_cfg_shadow;
int neednullqs;
#endif /* __rtems__ */
int ref_clk_num; int ref_clk_num;
#ifndef __rtems__
u_char eaddr[6]; u_char eaddr[6];
#endif /* __rtems__ */
bus_dma_tag_t desc_dma_tag; bus_dma_tag_t desc_dma_tag;
bus_dma_tag_t mbuf_dma_tag; bus_dma_tag_t mbuf_dma_tag;
@ -161,12 +171,18 @@ struct cgem_softc {
int txring_hd_ptr; /* where to put next xmits */ int txring_hd_ptr; /* where to put next xmits */
int txring_tl_ptr; /* next xmit mbuf to free */ int txring_tl_ptr; /* next xmit mbuf to free */
int txring_queued; /* num xmits segs queued */ int txring_queued; /* num xmits segs queued */
#ifndef __rtems__
bus_dmamap_t txring_dma_map; bus_dmamap_t txring_dma_map;
#endif /* __rtems__ */
u_int txfull; /* tx ring full events */ u_int txfull; /* tx ring full events */
u_int txdefrags; /* tx calls to m_defrag() */ u_int txdefrags; /* tx calls to m_defrag() */
u_int txdefragfails; /* tx m_defrag() failures */ u_int txdefragfails; /* tx m_defrag() failures */
u_int txdmamapfails; /* tx dmamap failures */ u_int txdmamapfails; /* tx dmamap failures */
/* null descriptor rings */
void *null_qs;
bus_addr_t null_qs_physaddr;
/* hardware provided statistics */ /* hardware provided statistics */
struct cgem_hw_stats { struct cgem_hw_stats {
uint64_t tx_bytes; uint64_t tx_bytes;
@ -337,7 +353,11 @@ cgem_rx_filter(struct cgem_softc *sc)
hash_hi = 0; hash_hi = 0;
hash_lo = 0; hash_lo = 0;
#ifdef __rtems__
net_cfg = sc->net_cfg_shadow;
#else
net_cfg = RD4(sc, CGEM_NET_CFG); net_cfg = RD4(sc, CGEM_NET_CFG);
#endif
net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN | net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_NO_BCAST |
@ -379,6 +399,9 @@ cgem_rx_filter(struct cgem_softc *sc)
WR4(sc, CGEM_HASH_TOP, hash_hi); WR4(sc, CGEM_HASH_TOP, hash_hi);
WR4(sc, CGEM_HASH_BOT, hash_lo); WR4(sc, CGEM_HASH_BOT, hash_lo);
#ifdef __rtems__
sc->net_cfg_shadow = net_cfg;
#endif /* __rtems__ */
WR4(sc, CGEM_NET_CFG, net_cfg); WR4(sc, CGEM_NET_CFG, net_cfg);
} }
@ -392,24 +415,87 @@ cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
*(bus_addr_t *)arg = segs[0].ds_addr; *(bus_addr_t *)arg = segs[0].ds_addr;
} }
#ifdef __rtems__
/* Set up null queues for priority queues we actually can't disable. */
static void
cgem_null_qs(struct cgem_softc *sc)
{
struct cgem_rx_desc *rx_desc;
struct cgem_tx_desc *tx_desc;
uint32_t queue_mask;
int n;
/* Read design config register 6 to determine number of queues. */
queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) &
CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1;
if (queue_mask == 0)
return;
/* Create empty RX queue and empty TX buf queues. */
memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) +
sizeof(struct cgem_tx_desc));
rx_desc = sc->null_qs;
rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP;
tx_desc = (struct cgem_tx_desc *)(rx_desc + 1);
tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP;
/* Point all valid ring base pointers to the null queues. */
for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) {
WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr);
WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr +
sizeof(struct cgem_rx_desc));
}
}
#endif /* __rtems__ */
/* Create DMA'able descriptor rings. */ /* Create DMA'able descriptor rings. */
static int static int
cgem_setup_descs(struct cgem_softc *sc) cgem_setup_descs(struct cgem_softc *sc)
{ {
int i, err; int i, err;
#ifdef __rtems__
int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) +
CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
if (sc->neednullqs)
desc_rings_size += sizeof(struct cgem_rx_desc) +
sizeof(struct cgem_tx_desc);
#endif /* __rtems__ */
sc->txring = NULL; sc->txring = NULL;
sc->rxring = NULL; sc->rxring = NULL;
/* Allocate non-cached DMA space for RX and TX descriptors. /* Allocate non-cached DMA space for RX and TX descriptors.
*/ */
#ifndef __rtems__
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR_32BIT,
#else /* __rtems__ */
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
#if defined(__LP64__)
8,
1ULL << 32, /* Do not cross a 4G boundary. */
BUS_SPACE_MAXADDR,
#elif defined(__ILP32__)
8,
0,
BUS_SPACE_MAXADDR,
#else /* ARMv7 */
1,
0,
BUS_SPACE_MAXADDR_32BIT,
#endif /* ARMv7 */
#endif /* __rtems__ */
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, NULL, NULL,
#ifndef __rtems__
MAX_DESC_RING_SIZE, MAX_DESC_RING_SIZE,
1, 1,
MAX_DESC_RING_SIZE, MAX_DESC_RING_SIZE,
#else /* __rtems__ */
desc_rings_size,
1,
desc_rings_size,
#endif /* __rtems__ */
0, 0,
busdma_lock_mutex, busdma_lock_mutex,
&sc->sc_mtx, &sc->sc_mtx,
@ -418,8 +504,19 @@ cgem_setup_descs(struct cgem_softc *sc)
return (err); return (err);
/* Set up a bus_dma_tag for mbufs. */ /* Set up a bus_dma_tag for mbufs. */
#ifndef __rtems__
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR_32BIT,
#else /* __rtems__ */
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
#ifdef CGEM64
8,
#else
1,
#endif
0,
BUS_SPACE_MAXADDR,
#endif /* __rtems__ */
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, NULL, NULL,
MCLBYTES, MCLBYTES,
@ -435,7 +532,11 @@ cgem_setup_descs(struct cgem_softc *sc)
/* Allocate DMA memory in non-cacheable space. */ /* Allocate DMA memory in non-cacheable space. */
err = bus_dmamem_alloc(sc->desc_dma_tag, err = bus_dmamem_alloc(sc->desc_dma_tag,
(void **)&sc->rxring, (void **)&sc->rxring,
#ifndef __rtems__
BUS_DMA_NOWAIT | BUS_DMA_COHERENT, BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
#else /* __rtems__ */
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO,
#endif /* __rtems__ */
&sc->rxring_dma_map); &sc->rxring_dma_map);
if (err) if (err)
return (err); return (err);
@ -443,7 +544,11 @@ cgem_setup_descs(struct cgem_softc *sc)
/* Load descriptor DMA memory. */ /* Load descriptor DMA memory. */
err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
(void *)sc->rxring, (void *)sc->rxring,
#ifndef __rtems__
CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc), CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
#else /* __rtems__ */
desc_rings_size,
#endif /* __rtems__ */
cgem_getaddr, &sc->rxring_physaddr, cgem_getaddr, &sc->rxring_physaddr,
BUS_DMA_NOWAIT); BUS_DMA_NOWAIT);
if (err) if (err)
@ -464,6 +569,7 @@ cgem_setup_descs(struct cgem_softc *sc)
sc->rxring_tl_ptr = 0; sc->rxring_tl_ptr = 0;
sc->rxring_queued = 0; sc->rxring_queued = 0;
#ifndef __rtems__
/* Allocate DMA memory for TX descriptors in non-cacheable space. */ /* Allocate DMA memory for TX descriptors in non-cacheable space. */
err = bus_dmamem_alloc(sc->desc_dma_tag, err = bus_dmamem_alloc(sc->desc_dma_tag,
(void **)&sc->txring, (void **)&sc->txring,
@ -480,6 +586,11 @@ cgem_setup_descs(struct cgem_softc *sc)
BUS_DMA_NOWAIT); BUS_DMA_NOWAIT);
if (err) if (err)
return (err); return (err);
#else /* __rtems__ */
sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS);
sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS *
sizeof(struct cgem_rx_desc);
#endif /* __rtems__ */
/* Initialize TX descriptor ring. */ /* Initialize TX descriptor ring. */
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
@ -496,6 +607,16 @@ cgem_setup_descs(struct cgem_softc *sc)
sc->txring_tl_ptr = 0; sc->txring_tl_ptr = 0;
sc->txring_queued = 0; sc->txring_queued = 0;
#ifdef __rtems__
if (sc->neednullqs) {
sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS);
sc->null_qs_physaddr = sc->txring_physaddr +
CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
cgem_null_qs(sc);
}
#endif /* __rtems__ */
return (0); return (0);
} }
@ -556,6 +677,13 @@ cgem_fill_rqueue(struct cgem_softc *sc)
/* Write rx descriptor and increment head pointer. */ /* Write rx descriptor and increment head pointer. */
sc->rxring[sc->rxring_hd_ptr].ctl = 0; sc->rxring[sc->rxring_hd_ptr].ctl = 0;
#ifdef __rtems__
#if defined(__LP64__)
sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32;
#elif defined(__ILP32__)
sc->rxring[sc->rxring_hd_ptr].addrhi = 0;
#endif
#endif /* __rtems__ */
if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
CGEM_RXDESC_WRAP; CGEM_RXDESC_WRAP;
@ -709,9 +837,16 @@ cgem_clean_tx(struct cgem_softc *sc)
/* Check the status. */ /* Check the status. */
if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
/* Serious bus error. log to console. */ /* Serious bus error. log to console. */
#if defined(__LP64__) && defined(__rtems__)
device_printf(sc->dev,
"cgem_clean_tx: AHB error, addr=0x%x%08x\n",
sc->txring[sc->txring_tl_ptr].addrhi,
sc->txring[sc->txring_tl_ptr].addr);
#else
device_printf(sc->dev, "cgem_clean_tx: Whoa! " device_printf(sc->dev, "cgem_clean_tx: Whoa! "
"AHB error, addr=0x%x\n", "AHB error, addr=0x%x\n",
sc->txring[sc->txring_tl_ptr].addr); sc->txring[sc->txring_tl_ptr].addr);
#endif
} else if ((ctl & (CGEM_TXDESC_RETRY_ERR | } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
CGEM_TXDESC_LATE_COLL)) != 0) { CGEM_TXDESC_LATE_COLL)) != 0) {
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
@ -879,6 +1014,14 @@ cgem_start_locked(if_t ifp)
/* Descriptor address. */ /* Descriptor address. */
sc->txring[sc->txring_hd_ptr + i].addr = sc->txring[sc->txring_hd_ptr + i].addr =
segs[i].ds_addr; segs[i].ds_addr;
#ifdef __rtems__
#ifdef defined(__LP64__)
sc->txring[sc->txring_hd_ptr + i].addrhi =
segs[i].ds_addr >> 32;
#elif defined(__ILP32__)
sc->txring[sc->txring_hd_ptr + i].addrhi = 0;
#endif
#endif /* __rtems__ */
/* Descriptor control word. */ /* Descriptor control word. */
ctl = segs[i].ds_len; ctl = segs[i].ds_len;
@ -1081,7 +1224,11 @@ cgem_reset(struct cgem_softc *sc)
CGEM_ASSERT_LOCKED(sc); CGEM_ASSERT_LOCKED(sc);
WR4(sc, CGEM_NET_CTRL, 0); WR4(sc, CGEM_NET_CTRL, 0);
#ifndef __rtems__
WR4(sc, CGEM_NET_CFG, 0); WR4(sc, CGEM_NET_CFG, 0);
#else
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
#endif
WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
@ -1092,9 +1239,31 @@ cgem_reset(struct cgem_softc *sc)
WR4(sc, CGEM_RX_QBAR, 0); WR4(sc, CGEM_RX_QBAR, 0);
/* Get management port running even if interface is down. */ /* Get management port running even if interface is down. */
#ifndef __rtems__
WR4(sc, CGEM_NET_CFG, WR4(sc, CGEM_NET_CFG,
CGEM_NET_CFG_DBUS_WIDTH_32 | CGEM_NET_CFG_DBUS_WIDTH_32 |
CGEM_NET_CFG_MDC_CLK_DIV_64); CGEM_NET_CFG_MDC_CLK_DIV_64);
#else
/* Determine data bus width from design configuration register. */
switch (RD4(sc, CGEM_DESIGN_CFG1) &
CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) {
case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64:
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64;
break;
case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128:
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128;
break;
default:
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32;
}
#ifdef CGEM64
sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48;
#else
sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_64;
#endif
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
#endif /* __rtems__ */
sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
@ -1112,8 +1281,14 @@ cgem_config(struct cgem_softc *sc)
CGEM_ASSERT_LOCKED(sc); CGEM_ASSERT_LOCKED(sc);
/* Program Net Config Register. */ /* Program Net Config Register. */
#ifndef __rtems__
net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 | net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
CGEM_NET_CFG_MDC_CLK_DIV_64 | CGEM_NET_CFG_MDC_CLK_DIV_64 |
#else /* __rtems__ */
sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK |
CGEM_NET_CFG_DBUS_WIDTH_MASK);
net_cfg = sc->net_cfg_shadow |
#endif /* __rtems__ */
CGEM_NET_CFG_FCS_REMOVE | CGEM_NET_CFG_FCS_REMOVE |
CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_GIGE_EN |
@ -1125,6 +1300,9 @@ cgem_config(struct cgem_softc *sc)
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
#ifdef __rtems__
sc->net_cfg_shadow = net_cfg;
#endif /* __rtems__ */
WR4(sc, CGEM_NET_CFG, net_cfg); WR4(sc, CGEM_NET_CFG, net_cfg);
/* Program DMA Config Register. */ /* Program DMA Config Register. */
@ -1132,6 +1310,9 @@ cgem_config(struct cgem_softc *sc)
CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
#if defined(CGEM64) && defined(__rtems__)
CGEM_DMA_CFG_ADDR_BUS_64 |
#endif
CGEM_DMA_CFG_DISC_WHEN_NO_AHB; CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
/* Enable transmit checksum offloading? */ /* Enable transmit checksum offloading? */
@ -1143,6 +1324,13 @@ cgem_config(struct cgem_softc *sc)
/* Write the rx and tx descriptor ring addresses to the QBAR regs. */ /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr); WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr); WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
#if defined(__LP64__) && defined(__rtems__)
WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32));
WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32));
#elif defined(__ILP32__) && defined(__rtems__)
WR4(sc, CGEM_RX_QBAR_HI, 0);
WR4(sc, CGEM_TX_QBAR_HI, 0);
#endif
/* Enable rx and tx. */ /* Enable rx and tx. */
sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
@ -1176,8 +1364,14 @@ cgem_init_locked(struct cgem_softc *sc)
if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
#ifdef __rtems__
if (sc->miibus != NULL) {
#endif /* __rtems__ */
mii = device_get_softc(sc->miibus); mii = device_get_softc(sc->miibus);
mii_mediachg(mii); mii_mediachg(mii);
#ifdef __rtems__
}
#endif /* __rtems__ */
callout_reset(&sc->tick_ch, hz, cgem_tick, sc); callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
} }
@ -1297,6 +1491,10 @@ cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
case SIOCSIFMEDIA: case SIOCSIFMEDIA:
case SIOCGIFMEDIA: case SIOCGIFMEDIA:
#ifdef __rtems__
if (sc->miibus == NULL)
return (ENXIO);
#endif /* __rtems__ */
mii = device_get_softc(sc->miibus); mii = device_get_softc(sc->miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break; break;
@ -1331,16 +1529,28 @@ cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
/* Turn on RX checksumming. */ /* Turn on RX checksumming. */
if_setcapenablebit(ifp, IFCAP_RXCSUM | if_setcapenablebit(ifp, IFCAP_RXCSUM |
IFCAP_RXCSUM_IPV6, 0); IFCAP_RXCSUM_IPV6, 0);
#ifndef __rtems__
WR4(sc, CGEM_NET_CFG, WR4(sc, CGEM_NET_CFG,
RD4(sc, CGEM_NET_CFG) | RD4(sc, CGEM_NET_CFG) |
CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
#else /* __rtems__ */
sc->net_cfg_shadow |=
CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
#endif /* __rtems__ */
} else { } else {
/* Turn off RX checksumming. */ /* Turn off RX checksumming. */
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM | if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
IFCAP_RXCSUM_IPV6); IFCAP_RXCSUM_IPV6);
#ifndef __rtems__
WR4(sc, CGEM_NET_CFG, WR4(sc, CGEM_NET_CFG,
RD4(sc, CGEM_NET_CFG) & RD4(sc, CGEM_NET_CFG) &
~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
#else /* __rtems__ */
sc->net_cfg_shadow &=
~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
#endif /* __rtems__ */
} }
} }
if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
@ -1361,6 +1571,7 @@ cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
/* MII bus support routines. /* MII bus support routines.
*/ */
#ifndef __rtems__
static void static void
cgem_child_detached(device_t dev, device_t child) cgem_child_detached(device_t dev, device_t child)
{ {
@ -1369,6 +1580,7 @@ cgem_child_detached(device_t dev, device_t child)
if (child == sc->miibus) if (child == sc->miibus)
sc->miibus = NULL; sc->miibus = NULL;
} }
#endif /* __rtems__ */
static int static int
cgem_ifmedia_upd(if_t ifp) cgem_ifmedia_upd(if_t ifp)
@ -1514,7 +1726,11 @@ cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
CGEM_ASSERT_LOCKED(sc); CGEM_ASSERT_LOCKED(sc);
/* Update hardware to reflect media. */ /* Update hardware to reflect media. */
#ifndef __rtems__
net_cfg = RD4(sc, CGEM_NET_CFG); net_cfg = RD4(sc, CGEM_NET_CFG);
#else /* __rtems__ */
net_cfg = sc->net_cfg_shadow;
#endif /* __rtems__ */
net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
CGEM_NET_CFG_FULL_DUPLEX); CGEM_NET_CFG_FULL_DUPLEX);
@ -1535,6 +1751,9 @@ cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
if ((mii->mii_media_active & IFM_FDX) != 0) if ((mii->mii_media_active & IFM_FDX) != 0)
net_cfg |= CGEM_NET_CFG_FULL_DUPLEX; net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
#ifdef __rtems__
sc->net_cfg_shadow = net_cfg;
#endif /* __rtems__ */
WR4(sc, CGEM_NET_CFG, net_cfg); WR4(sc, CGEM_NET_CFG, net_cfg);
/* Set the reference clock if necessary. */ /* Set the reference clock if necessary. */
@ -1809,7 +2028,17 @@ cgem_attach(device_t dev)
sc->if_old_flags = if_getflags(ifp); sc->if_old_flags = if_getflags(ifp);
sc->rxbufs = DEFAULT_NUM_RX_BUFS; sc->rxbufs = DEFAULT_NUM_RX_BUFS;
#if defined(CGEM64) && defined(__rtems__)
uint32_t design_cfg6 = RD4(sc, CGEM_DESIGN_CFG6);
/*
* QEMU does not have PBUF_CUTTHRU defined and is broken when trying
* to use nullqs
*/
if ((design_cfg6 & CGEM_DESIGN_CFG6_PBUF_CUTTHRU))
sc->neednullqs = 1;
#else
sc->rxhangwar = 1; sc->rxhangwar = 1;
#endif
/* Reset hardware. */ /* Reset hardware. */
CGEM_LOCK(sc); CGEM_LOCK(sc);
@ -1901,10 +2130,18 @@ cgem_detach(device_t dev)
bus_dmamap_unload(sc->desc_dma_tag, bus_dmamap_unload(sc->desc_dma_tag,
sc->rxring_dma_map); sc->rxring_dma_map);
sc->rxring_physaddr = 0; sc->rxring_physaddr = 0;
#ifdef __rtems__
sc->txring_physaddr = 0;
sc->null_qs_physaddr = 0;
#endif /* __rtems__ */
} }
bus_dmamem_free(sc->desc_dma_tag, sc->rxring, bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
sc->rxring_dma_map); sc->rxring_dma_map);
sc->rxring = NULL; sc->rxring = NULL;
#ifdef __rtems__
sc->txring = NULL;
sc->null_qs = NULL;
#endif /* __rtems__ */
#ifndef __rtems__ #ifndef __rtems__
for (i = 0; i < CGEM_NUM_RX_DESCS; i++) for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
if (sc->rxring_m_dmamap[i] != NULL) { if (sc->rxring_m_dmamap[i] != NULL) {
@ -1912,7 +2149,6 @@ cgem_detach(device_t dev)
sc->rxring_m_dmamap[i]); sc->rxring_m_dmamap[i]);
sc->rxring_m_dmamap[i] = NULL; sc->rxring_m_dmamap[i] = NULL;
} }
#endif /* __rtems__ */
} }
if (sc->txring != NULL) { if (sc->txring != NULL) {
if (sc->txring_physaddr != 0) { if (sc->txring_physaddr != 0) {
@ -1923,7 +2159,6 @@ cgem_detach(device_t dev)
bus_dmamem_free(sc->desc_dma_tag, sc->txring, bus_dmamem_free(sc->desc_dma_tag, sc->txring,
sc->txring_dma_map); sc->txring_dma_map);
sc->txring = NULL; sc->txring = NULL;
#ifndef __rtems__
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
if (sc->txring_m_dmamap[i] != NULL) { if (sc->txring_m_dmamap[i] != NULL) {
bus_dmamap_destroy(sc->mbuf_dma_tag, bus_dmamap_destroy(sc->mbuf_dma_tag,
@ -1954,8 +2189,10 @@ static device_method_t cgem_methods[] = {
DEVMETHOD(device_attach, cgem_attach), DEVMETHOD(device_attach, cgem_attach),
DEVMETHOD(device_detach, cgem_detach), DEVMETHOD(device_detach, cgem_detach),
#ifndef __rtems__
/* Bus interface */ /* Bus interface */
DEVMETHOD(bus_child_detached, cgem_child_detached), DEVMETHOD(bus_child_detached, cgem_child_detached),
#endif /* __rtems__ */
/* MII interface */ /* MII interface */
DEVMETHOD(miibus_readreg, cgem_miibus_readreg), DEVMETHOD(miibus_readreg, cgem_miibus_readreg),

View File

@ -77,6 +77,7 @@
#define CGEM_NET_CFG_MDC_CLK_DIV_16 (1<<18) #define CGEM_NET_CFG_MDC_CLK_DIV_16 (1<<18)
#define CGEM_NET_CFG_MDC_CLK_DIV_32 (2<<18) #define CGEM_NET_CFG_MDC_CLK_DIV_32 (2<<18)
#define CGEM_NET_CFG_MDC_CLK_DIV_48 (3<<18) #define CGEM_NET_CFG_MDC_CLK_DIV_48 (3<<18)
/* Higher MDC clock divs are reserved values on 64bit hardware */
#define CGEM_NET_CFG_MDC_CLK_DIV_64 (4<<18) #define CGEM_NET_CFG_MDC_CLK_DIV_64 (4<<18)
#define CGEM_NET_CFG_MDC_CLK_DIV_96 (5<<18) #define CGEM_NET_CFG_MDC_CLK_DIV_96 (5<<18)
#define CGEM_NET_CFG_MDC_CLK_DIV_128 (6<<18) #define CGEM_NET_CFG_MDC_CLK_DIV_128 (6<<18)
@ -113,6 +114,7 @@
#define CGEM_USER_IO 0x00C /* User I/O */ #define CGEM_USER_IO 0x00C /* User I/O */
#define CGEM_DMA_CFG 0x010 /* DMA Config */ #define CGEM_DMA_CFG 0x010 /* DMA Config */
#define CGEM_DMA_CFG_ADDR_BUS_64 (1 << 30)
#define CGEM_DMA_CFG_DISC_WHEN_NO_AHB (1<<24) #define CGEM_DMA_CFG_DISC_WHEN_NO_AHB (1<<24)
#define CGEM_DMA_CFG_RX_BUF_SIZE_SHIFT 16 #define CGEM_DMA_CFG_RX_BUF_SIZE_SHIFT 16
#define CGEM_DMA_CFG_RX_BUF_SIZE_MASK (0xff<<16) #define CGEM_DMA_CFG_RX_BUF_SIZE_MASK (0xff<<16)
@ -290,6 +292,29 @@
#define CGEM_PTP_PEER_RX_S 0x1F8 /* PTP Peer Event rcv'd s */ #define CGEM_PTP_PEER_RX_S 0x1F8 /* PTP Peer Event rcv'd s */
#define CGEM_PTP_PEER_RX_NS 0x1FC /* PTP Peer Event rcv'd ns */ #define CGEM_PTP_PEER_RX_NS 0x1FC /* PTP Peer Event rcv'd ns */
#define CGEM_DESIGN_CFG1 0x280 /* Design Configuration 1 */
#define CGEM_DESIGN_CFG1_AXI_CACHE_WIDTH_MASK (0xfU << 28)
#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK (7 << 25)
#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_32 (1 << 25)
#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64 (2 << 25)
#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128 (4 << 25)
#define CGEM_DESIGN_CFG1_IRQ_READ_CLR (1 << 23)
#define CGEM_DESIGN_CFG1_NO_SNAPSHOT (1 << 22)
#define CGEM_DESIGN_CFG1_NO_STATS (1 << 21)
#define CGEM_DESIGN_CFG1_NO_SCAN_PINS (1 << 20)
#define CGEM_DESIGN_CFG1_USER_IN_WIDTH_MASK (0x1f << 15)
#define CGEM_DESIGN_CFG1_USER_OUT_WIDTH_MASK (0x1f << 10)
#define CGEM_DESIGN_CFG1_USER_IO (1 << 9)
#define CGEM_DESIGN_CFG1_APB_REV2 (1 << 8)
#define CGEM_DESIGN_CFG1_APB_REV1 (1 << 7)
#define CGEM_DESIGN_CFG1_EXT_FIFO_INTERFACE (1 << 6)
#define CGEM_DESIGN_CFG1_NO_INT_LOOPBACK (1 << 5)
#define CGEM_DESIGN_CFG1_INT_LOOPBACK (1 << 4)
#define CGEM_DESIGN_CFG1_TDC_50 (1 << 3)
#define CGEM_DESIGN_CFG1_RDC_50 (1 << 2)
#define CGEM_DESIGN_CFG1_SERDES (1 << 1)
#define CGEM_DESIGN_CFG1_NO_PCS (1 << 0)
#define CGEM_DESIGN_CFG2 0x284 /* Design Configuration 2 */ #define CGEM_DESIGN_CFG2 0x284 /* Design Configuration 2 */
#define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_SHIFT 26 #define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_SHIFT 26
#define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_MASK (0xf<<26) #define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_MASK (0xf<<26)
@ -330,6 +355,18 @@
#define CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_MASK (0xf<<4) #define CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_MASK (0xf<<4)
#define CGEM_DESIGN_CFG5_RX_FIFO_CNT_WIDTH_MASK 0xf #define CGEM_DESIGN_CFG5_RX_FIFO_CNT_WIDTH_MASK 0xf
#define CGEM_DESIGN_CFG6 0x294 /* Design Configuration 6 */
#define CGEM_DESIGN_CFG6_PBUF_CUTTHRU (1 << 25) /* 64-bit addr cap */
#define CGEM_DESIGN_CFG6_ADDR_64B (1 << 23) /* 64-bit addr cap */
#define CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK 0xfffe
#define CGEM_DESIGN_CFG6_DMA_PRIO_Q(n) (1 << (n))
#define CGEM_TX_QN_BAR(n) (0x440 + ((n) - 1) * 4)
#define CGEM_RX_QN_BAR(n) (0x480 + ((n) - 1) * 4)
#define CGEM_TX_QBAR_HI 0x4C8
#define CGEM_RX_QBAR_HI 0x4D4
/* Transmit Descriptors */ /* Transmit Descriptors */
struct cgem_tx_desc { struct cgem_tx_desc {
uint32_t addr; uint32_t addr;
@ -350,6 +387,10 @@ struct cgem_tx_desc {
#define CGEM_TXDESC_NO_CRC_APPENDED (1<<16) #define CGEM_TXDESC_NO_CRC_APPENDED (1<<16)
#define CGEM_TXDESC_LAST_BUF (1<<15) /* last buf in frame */ #define CGEM_TXDESC_LAST_BUF (1<<15) /* last buf in frame */
#define CGEM_TXDESC_LENGTH_MASK 0x3fff #define CGEM_TXDESC_LENGTH_MASK 0x3fff
#if defined(CGEM64) && defined(__rtems__)
uint32_t addrhi;
uint32_t unused;
#endif
}; };
struct cgem_rx_desc { struct cgem_rx_desc {
@ -379,6 +420,10 @@ struct cgem_rx_desc {
#define CGEM_RXDESC_SOF (1<<14) /* start of frame */ #define CGEM_RXDESC_SOF (1<<14) /* start of frame */
#define CGEM_RXDESC_BAD_FCS (1<<13) #define CGEM_RXDESC_BAD_FCS (1<<13)
#define CGEM_RXDESC_LENGTH_MASK 0x1fff #define CGEM_RXDESC_LENGTH_MASK 0x1fff
#if defined(CGEM64) && defined(__rtems__)
uint32_t addrhi;
uint32_t unused;
#endif
}; };
#endif /* _IF_CGEM_HW_H_ */ #endif /* _IF_CGEM_HW_H_ */