if_dwc: Use explicit cache operations

This commit is contained in:
Sebastian Huber 2017-01-10 09:16:17 +01:00
parent 0436d64a2a
commit 3ef41be02f
2 changed files with 80 additions and 0 deletions

View File

@ -181,6 +181,7 @@ next_txidx(struct dwc_softc *sc, uint32_t curidx, int inc)
return ((curidx + (uint32_t)inc) % TX_DESC_COUNT); return ((curidx + (uint32_t)inc) % TX_DESC_COUNT);
} }
#ifndef __rtems__
static void static void
dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{ {
@ -189,6 +190,7 @@ dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
return; return;
*(bus_addr_t *)arg = segs[0].ds_addr; *(bus_addr_t *)arg = segs[0].ds_addr;
} }
#endif /* __rtems__ */
static void static void
dwc_setup_txdesc(struct dwc_softc *sc, int csum_flags, int idx, dwc_setup_txdesc(struct dwc_softc *sc, int csum_flags, int idx,
@ -255,6 +257,33 @@ dwc_setup_txdesc(struct dwc_softc *sc, int csum_flags, int idx,
} }
} }
#ifdef __rtems__
static int
dwc_get_segs_for_tx(struct mbuf *m, bus_dma_segment_t segs[TX_MAX_DMA_SEGS],
int *nsegs)
{
int i = 0;
do {
if (m->m_len > 0) {
segs[i].ds_addr = mtod(m, bus_addr_t);
segs[i].ds_len = m->m_len;
rtems_cache_flush_multiple_data_lines(m->m_data, m->m_len);
++i;
}
m = m->m_next;
if (m == NULL) {
*nsegs = i;
return (0);
}
} while (i < TX_MAX_DMA_SEGS);
return (EFBIG);
}
#endif /* __rtems__ */
static void static void
dwc_setup_txbuf(struct dwc_softc *sc, struct mbuf *m, int *start_tx) dwc_setup_txbuf(struct dwc_softc *sc, struct mbuf *m, int *start_tx)
{ {
@ -262,9 +291,13 @@ dwc_setup_txbuf(struct dwc_softc *sc, struct mbuf *m, int *start_tx)
int error, nsegs, idx; int error, nsegs, idx;
idx = sc->tx_idx_head; idx = sc->tx_idx_head;
#ifndef __rtems__
error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
m, &seg, &nsegs, BUS_DMA_NOWAIT); m, &seg, &nsegs, BUS_DMA_NOWAIT);
#else /* __rtems__ */
error = dwc_get_segs_for_tx(m, segs, &nsegs);
#endif /* __rtems__ */
if (error == EFBIG) { if (error == EFBIG) {
/* Too many segments! Defrag and try again. */ /* Too many segments! Defrag and try again. */
struct mbuf *m2 = m_defrag(m, M_NOWAIT); struct mbuf *m2 = m_defrag(m, M_NOWAIT);
@ -274,8 +307,12 @@ dwc_setup_txbuf(struct dwc_softc *sc, struct mbuf *m, int *start_tx)
return; return;
} }
m = m2; m = m2;
#ifndef __rtems__
error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag,
sc->txbuf_map[idx].map, m, &seg, &nsegs, BUS_DMA_NOWAIT); sc->txbuf_map[idx].map, m, &seg, &nsegs, BUS_DMA_NOWAIT);
#else /* __rtems__ */
error = dwc_get_segs_for_tx(m, segs, &nsegs);
#endif /* __rtems__ */
} }
if (error != 0) { if (error != 0) {
/* Give up. */ /* Give up. */
@ -285,8 +322,10 @@ dwc_setup_txbuf(struct dwc_softc *sc, struct mbuf *m, int *start_tx)
sc->txbuf_map[idx].mbuf = m; sc->txbuf_map[idx].mbuf = m;
#ifndef __rtems__
bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
BUS_DMASYNC_PREWRITE); BUS_DMASYNC_PREWRITE);
#endif /* __rtems__ */
dwc_setup_txdesc(sc, m->m_pkthdr.csum_flags, idx, segs, nsegs); dwc_setup_txdesc(sc, m->m_pkthdr.csum_flags, idx, segs, nsegs);
@ -518,8 +557,12 @@ dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr)
sc->rxdesc_ring[idx].addr = (uint32_t)paddr; sc->rxdesc_ring[idx].addr = (uint32_t)paddr;
nidx = next_rxidx(sc, idx); nidx = next_rxidx(sc, idx);
#ifndef __rtems__
sc->rxdesc_ring[idx].addr_next = sc->rxdesc_ring_paddr + \ sc->rxdesc_ring[idx].addr_next = sc->rxdesc_ring_paddr + \
(nidx * sizeof(struct dwc_hwdesc)); (nidx * sizeof(struct dwc_hwdesc));
#else /* __rtems__ */
sc->rxdesc_ring[idx].addr_next = (uint32_t)&sc->rxdesc_ring[nidx];
#endif /* __rtems__ */
if (sc->mactype == DWC_GMAC_ALT_DESC) if (sc->mactype == DWC_GMAC_ALT_DESC)
sc->rxdesc_ring[idx].tdes1 = DDESC_CNTL_CHAINED | RX_MAX_PACKET; sc->rxdesc_ring[idx].tdes1 = DDESC_CNTL_CHAINED | RX_MAX_PACKET;
else else
@ -536,10 +579,13 @@ static int
dwc_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m) dwc_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
{ {
bus_dma_segment_t seg; bus_dma_segment_t seg;
#ifndef __rtems__
int error, nsegs; int error, nsegs;
#endif /* __rtems__ */
m_adj(m, ETHER_ALIGN); m_adj(m, ETHER_ALIGN);
#ifndef __rtems__
error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map, error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
m, &seg, &nsegs, 0); m, &seg, &nsegs, 0);
if (error != 0) { if (error != 0) {
@ -550,6 +596,10 @@ dwc_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
BUS_DMASYNC_PREREAD); BUS_DMASYNC_PREREAD);
#else /* __rtems__ */
rtems_cache_invalidate_multiple_data_lines(m->m_data, m->m_len);
seg.ds_addr = mtod(m, bus_addr_t);
#endif /* __rtems__ */
sc->rxbuf_map[idx].mbuf = m; sc->rxbuf_map[idx].mbuf = m;
dwc_setup_rxdesc(sc, idx, seg.ds_addr); dwc_setup_rxdesc(sc, idx, seg.ds_addr);
@ -771,9 +821,11 @@ dwc_txfinish_locked(struct dwc_softc *sc)
if ((desc->tdes0 & DDESC_TDES0_OWN) != 0) if ((desc->tdes0 & DDESC_TDES0_OWN) != 0)
break; break;
bmap = &sc->txbuf_map[sc->tx_idx_tail]; bmap = &sc->txbuf_map[sc->tx_idx_tail];
#ifndef __rtems__
bus_dmamap_sync(sc->txbuf_tag, bmap->map, bus_dmamap_sync(sc->txbuf_tag, bmap->map,
BUS_DMASYNC_POSTWRITE); BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txbuf_tag, bmap->map); bus_dmamap_unload(sc->txbuf_tag, bmap->map);
#endif /* __rtems__ */
m_freem(bmap->mbuf); m_freem(bmap->mbuf);
bmap->mbuf = NULL; bmap->mbuf = NULL;
--sc->txcount; --sc->txcount;
@ -833,9 +885,11 @@ dwc_rxfinish_locked(struct dwc_softc *sc)
continue; continue;
} }
#ifndef __rtems__
bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
BUS_DMASYNC_POSTREAD); BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxbuf_tag, sc->rxbuf_map[idx].map); bus_dmamap_unload(sc->rxbuf_tag, sc->rxbuf_map[idx].map);
#endif /* __rtems__ */
len = (rdes0 >> DDESC_RDES0_FL_SHIFT) & DDESC_RDES0_FL_MASK; len = (rdes0 >> DDESC_RDES0_FL_SHIFT) & DDESC_RDES0_FL_MASK;
if (len != 0) { if (len != 0) {
@ -951,6 +1005,7 @@ setup_dma(struct dwc_softc *sc)
goto out; goto out;
} }
#ifndef __rtems__
error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr, sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
&sc->txdesc_ring_paddr, 0); &sc->txdesc_ring_paddr, 0);
@ -959,6 +1014,7 @@ setup_dma(struct dwc_softc *sc)
"could not load TX descriptor ring map.\n"); "could not load TX descriptor ring map.\n");
goto out; goto out;
} }
#endif /* __rtems__ */
for (idx = 0; idx < TX_DESC_COUNT; idx++) { for (idx = 0; idx < TX_DESC_COUNT; idx++) {
sc->txdesc_ring[idx].addr = 0; sc->txdesc_ring[idx].addr = 0;
@ -970,10 +1026,16 @@ setup_dma(struct dwc_softc *sc)
sc->txdesc_ring[idx].tdes1 = 0; sc->txdesc_ring[idx].tdes1 = 0;
} }
nidx = next_txidx(sc, idx, 1); nidx = next_txidx(sc, idx, 1);
#ifndef __rtems__
sc->txdesc_ring[idx].addr_next = sc->txdesc_ring_paddr + sc->txdesc_ring[idx].addr_next = sc->txdesc_ring_paddr +
(nidx * sizeof(struct dwc_hwdesc)); (nidx * sizeof(struct dwc_hwdesc));
#else /* __rtems__ */
sc->txdesc_ring[idx].addr_next =
(uint32_t)&sc->txdesc_ring[nidx];
#endif /* __rtems__ */
} }
#ifndef __rtems__
error = bus_dma_tag_create( error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* Parent tag. */ bus_get_dma_tag(sc->dev), /* Parent tag. */
1, 0, /* alignment, boundary */ 1, 0, /* alignment, boundary */
@ -990,8 +1052,10 @@ setup_dma(struct dwc_softc *sc)
"could not create TX ring DMA tag.\n"); "could not create TX ring DMA tag.\n");
goto out; goto out;
} }
#endif /* __rtems__ */
for (idx = 0; idx < TX_DESC_COUNT; idx++) { for (idx = 0; idx < TX_DESC_COUNT; idx++) {
#ifndef __rtems__
error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT, error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
&sc->txbuf_map[idx].map); &sc->txbuf_map[idx].map);
if (error != 0) { if (error != 0) {
@ -999,6 +1063,7 @@ setup_dma(struct dwc_softc *sc)
"could not create TX buffer DMA map.\n"); "could not create TX buffer DMA map.\n");
goto out; goto out;
} }
#endif /* __rtems__ */
} }
/* /*
@ -1030,6 +1095,7 @@ setup_dma(struct dwc_softc *sc)
goto out; goto out;
} }
#ifndef __rtems__
error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr, sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
&sc->rxdesc_ring_paddr, 0); &sc->rxdesc_ring_paddr, 0);
@ -1055,8 +1121,10 @@ setup_dma(struct dwc_softc *sc)
"could not create RX buf DMA tag.\n"); "could not create RX buf DMA tag.\n");
goto out; goto out;
} }
#endif /* __rtems__ */
for (idx = 0; idx < RX_DESC_COUNT; idx++) { for (idx = 0; idx < RX_DESC_COUNT; idx++) {
#ifndef __rtems__
error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT, error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
&sc->rxbuf_map[idx].map); &sc->rxbuf_map[idx].map);
if (error != 0) { if (error != 0) {
@ -1064,6 +1132,7 @@ setup_dma(struct dwc_softc *sc)
"could not create RX buffer DMA map.\n"); "could not create RX buffer DMA map.\n");
goto out; goto out;
} }
#endif /* __rtems__ */
if ((m = dwc_alloc_mbufcl(sc)) == NULL) { if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
device_printf(sc->dev, "Could not alloc mbuf\n"); device_printf(sc->dev, "Could not alloc mbuf\n");
error = ENOMEM; error = ENOMEM;
@ -1313,8 +1382,13 @@ dwc_attach(device_t dev)
return (ENXIO); return (ENXIO);
/* Setup addresses */ /* Setup addresses */
#ifndef __rtems__
WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr); WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr);
WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr); WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr);
#else /* __rtems__ */
WRITE4(sc, RX_DESCR_LIST_ADDR, (uint32_t)&sc->rxdesc_ring[0]);
WRITE4(sc, TX_DESCR_LIST_ADDR, (uint32_t)&sc->txdesc_ring[0]);
#endif /* __rtems__ */
mtx_init(&sc->mtx, device_get_nameunit(sc->dev), mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
MTX_NETWORK_LOCK, MTX_DEF); MTX_NETWORK_LOCK, MTX_DEF);

View File

@ -60,7 +60,9 @@
#define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */
struct dwc_bufmap { struct dwc_bufmap {
#ifndef __rtems__
bus_dmamap_t map; bus_dmamap_t map;
#endif /* __rtems__ */
struct mbuf *mbuf; struct mbuf *mbuf;
}; };
@ -88,8 +90,10 @@ struct dwc_softc {
bus_dma_tag_t rxdesc_tag; bus_dma_tag_t rxdesc_tag;
bus_dmamap_t rxdesc_map; bus_dmamap_t rxdesc_map;
struct dwc_hwdesc *rxdesc_ring; struct dwc_hwdesc *rxdesc_ring;
#ifndef __rtems__
bus_addr_t rxdesc_ring_paddr; bus_addr_t rxdesc_ring_paddr;
bus_dma_tag_t rxbuf_tag; bus_dma_tag_t rxbuf_tag;
#endif /* __rtems__ */
struct dwc_bufmap rxbuf_map[RX_DESC_COUNT]; struct dwc_bufmap rxbuf_map[RX_DESC_COUNT];
uint32_t rx_idx; uint32_t rx_idx;
@ -97,8 +101,10 @@ struct dwc_softc {
bus_dma_tag_t txdesc_tag; bus_dma_tag_t txdesc_tag;
bus_dmamap_t txdesc_map; bus_dmamap_t txdesc_map;
struct dwc_hwdesc *txdesc_ring; struct dwc_hwdesc *txdesc_ring;
#ifndef __rtems__
bus_addr_t txdesc_ring_paddr; bus_addr_t txdesc_ring_paddr;
bus_dma_tag_t txbuf_tag; bus_dma_tag_t txbuf_tag;
#endif /* __rtems__ */
struct dwc_bufmap txbuf_map[TX_DESC_COUNT]; struct dwc_bufmap txbuf_map[TX_DESC_COUNT];
uint32_t tx_idx_head; uint32_t tx_idx_head;
uint32_t tx_idx_tail; uint32_t tx_idx_tail;