mirror of
https://git.rtems.org/rtems-libbsd/
synced 2025-07-24 02:22:03 +08:00
busdma: Option to round to cache lines on sync
Some targets support only flushing or invalidating complete cache lines. In this cases misaligned buffers might lead to unexpected results. This patch adds a flag that allows drivers to signal to the bus dma driver that it is OK to round a buffer to the next full cache line. That's for example necessary if a driver wants to send out 14 byte via a USB DMA. Only the driver knows whether these 14 bytes are located in an otherwise unused cache line aligned buffer.
This commit is contained in:
parent
7e5d93bb6b
commit
4820ccecc9
@ -67,6 +67,9 @@
|
||||
#include <dev/usb/usb_controller.h>
|
||||
#include <dev/usb/usb_bus.h>
|
||||
#endif /* USB_GLOBAL_INCLUDE_FILE */
|
||||
#ifdef __rtems__
|
||||
#include <machine/rtems-bsd-cache.h>
|
||||
#endif /* __rtems__ */
|
||||
|
||||
#if USB_HAVE_BUSDMA
|
||||
static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t);
|
||||
@ -543,6 +546,15 @@ usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
|
||||
|
||||
uptag = pc->tag_parent;
|
||||
|
||||
#if defined(__rtems__) && defined(CPU_DATA_CACHE_ALIGNMENT)
|
||||
while (align % CPU_DATA_CACHE_ALIGNMENT != 0) {
|
||||
align *= 2;
|
||||
}
|
||||
if (size % CPU_DATA_CACHE_ALIGNMENT != 0) {
|
||||
size = (size + (CPU_DATA_CACHE_ALIGNMENT - 1)) &
|
||||
~(CPU_DATA_CACHE_ALIGNMENT - 1);
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
if (align != 1) {
|
||||
/*
|
||||
* The alignment must be greater or equal to the
|
||||
@ -605,7 +617,12 @@ usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
|
||||
/* load memory into DMA */
|
||||
err = bus_dmamap_load(
|
||||
utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
|
||||
#if defined(__rtems__) && CPU_DATA_CACHE_ALIGNMENT
|
||||
pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT |
|
||||
BUS_DMA_DO_CACHE_LINE_BLOW_UP));
|
||||
#else /* __rtems__ */
|
||||
pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
|
||||
#endif /* __rtems__ */
|
||||
|
||||
if (err == EINPROGRESS) {
|
||||
cv_wait(uptag->cv, uptag->mtx);
|
||||
@ -662,6 +679,12 @@ usb_pc_free_mem(struct usb_page_cache *pc)
|
||||
uint8_t
|
||||
usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
|
||||
{
|
||||
#ifdef __rtems__
|
||||
int flags;
|
||||
|
||||
flags = pc->dma_do_cache_line_blow_up ?
|
||||
BUS_DMA_DO_CACHE_LINE_BLOW_UP : 0;
|
||||
#endif /* __rtems__ */
|
||||
/* setup page cache */
|
||||
pc->page_offset_buf = 0;
|
||||
pc->page_offset_end = size;
|
||||
@ -687,7 +710,11 @@ usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
|
||||
*/
|
||||
err = bus_dmamap_load(
|
||||
pc->tag, pc->map, pc->buffer, size,
|
||||
#ifndef __rtems__
|
||||
&usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK);
|
||||
#else /* __rtems__ */
|
||||
&usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK | flags);
|
||||
#endif /* __rtems__ */
|
||||
if (err == EINPROGRESS) {
|
||||
cv_wait(uptag->cv, uptag->mtx);
|
||||
err = 0;
|
||||
@ -709,7 +736,11 @@ usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
|
||||
*/
|
||||
if (bus_dmamap_load(
|
||||
pc->tag, pc->map, pc->buffer, size,
|
||||
#ifndef __rtems__
|
||||
&usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) {
|
||||
#else /* __rtems__ */
|
||||
&usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK | flags)) {
|
||||
#endif /* __rtems__ */
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -102,6 +102,11 @@ struct usb_page_cache {
|
||||
* from the memory. Else write. */
|
||||
uint8_t ismultiseg:1; /* set if we can have multiple
|
||||
* segments */
|
||||
#ifdef __rtems__
|
||||
uint8_t dma_do_cache_line_blow_up:1;
|
||||
/* set if it is OK to align the buffer
|
||||
* start and end to next cache line */
|
||||
#endif /* __rtems__ */
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -2195,6 +2195,9 @@ usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
|
||||
/* set virtual address to load and length */
|
||||
xfer->frbuffers[frindex].buffer = ptr;
|
||||
usbd_xfer_set_frame_len(xfer, frindex, len);
|
||||
#ifdef __rtems__
|
||||
xfer->frbuffers[frindex].dma_do_cache_line_blow_up = 0;
|
||||
#endif /* __rtems__ */
|
||||
}
|
||||
|
||||
void
|
||||
@ -2209,6 +2212,23 @@ usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
|
||||
*len = xfer->frlengths[frindex];
|
||||
}
|
||||
|
||||
#ifdef __rtems__
|
||||
/*------------------------------------------------------------------------*
|
||||
* usbd_xfer_frame_allow_cache_line_blow_up
|
||||
*
|
||||
* Set a flag that the buffer start and end belonging to this frame can be
|
||||
* aligned to the next cache line on sync and flush.
|
||||
*------------------------------------------------------------------------*/
|
||||
void
|
||||
usbd_xfer_frame_allow_cache_line_blow_up(struct usb_xfer *xfer,
|
||||
usb_frcount_t frindex)
|
||||
{
|
||||
KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
|
||||
|
||||
xfer->frbuffers[frindex].dma_do_cache_line_blow_up = 1;
|
||||
}
|
||||
|
||||
#endif /* __rtems__ */
|
||||
/*------------------------------------------------------------------------*
|
||||
* usbd_xfer_old_frame_length
|
||||
*
|
||||
|
@ -640,6 +640,10 @@ void usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
|
||||
void *ptr, usb_frlength_t len);
|
||||
void usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
|
||||
void **ptr, int *len);
|
||||
#ifdef __rtems__
|
||||
void usbd_xfer_frame_allow_cache_line_blow_up(struct usb_xfer *xfer,
|
||||
usb_frcount_t frindex);
|
||||
#endif /* __rtems__ */
|
||||
void usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
|
||||
usb_frcount_t frindex);
|
||||
usb_frlength_t usbd_xfer_max_len(struct usb_xfer *xfer);
|
||||
|
@ -107,6 +107,12 @@
|
||||
#define BUS_DMA_KEEP_PG_OFFSET 0x400
|
||||
|
||||
#define BUS_DMA_LOAD_MBUF 0x800
|
||||
#ifdef __rtems__
|
||||
/*
|
||||
* Hint that the start address and size can be aligned to the next cache line.
|
||||
*/
|
||||
#define BUS_DMA_DO_CACHE_LINE_BLOW_UP 0x80000000
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/* Forwards needed by prototypes below. */
|
||||
union ccb;
|
||||
|
@ -75,6 +75,9 @@ struct bus_dma_tag {
|
||||
struct bus_dmamap {
|
||||
void *buffer_begin;
|
||||
bus_size_t buffer_size;
|
||||
int flags;
|
||||
/* OK to flush / invalidate the complete cache line */
|
||||
#define DMAMAP_CACHE_ALIGNED (1 << 0)
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -69,6 +69,9 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
int first = 1;
|
||||
bus_addr_t lastaddr = 0;
|
||||
struct mbuf *m;
|
||||
if ((flags & BUS_DMA_LOAD_MBUF) != 0) {
|
||||
map->flags |= DMAMAP_CACHE_ALIGNED;
|
||||
}
|
||||
|
||||
for (m = m0; m != NULL && error == 0; m = m->m_next) {
|
||||
if (m->m_len > 0) {
|
||||
|
@ -365,9 +365,13 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
|
||||
|
||||
map->buffer_begin = buf;
|
||||
map->buffer_size = buflen;
|
||||
if ((flags & BUS_DMA_DO_CACHE_LINE_BLOW_UP) != 0) {
|
||||
map->flags |= DMAMAP_CACHE_ALIGNED;
|
||||
}
|
||||
|
||||
lastaddr = (vm_offset_t)0;
|
||||
nsegs = 0;
|
||||
|
||||
error = bus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
|
||||
NULL, flags, &lastaddr, &nsegs, 1);
|
||||
|
||||
@ -397,6 +401,11 @@ bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
uintptr_t begin = (uintptr_t) map->buffer_begin;
|
||||
uintptr_t end = begin + size;
|
||||
|
||||
if ((map->flags & DMAMAP_CACHE_ALIGNED) != 0) {
|
||||
begin &= ~CLMASK;
|
||||
end = (end + CLMASK) & ~CLMASK;
|
||||
size = end - begin;
|
||||
}
|
||||
if ((op & BUS_DMASYNC_PREWRITE) != 0 && (op & BUS_DMASYNC_PREREAD) == 0) {
|
||||
rtems_cache_flush_multiple_data_lines((void *) begin, size);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user