mirror of
https://git.rtems.org/rtems-libbsd/
synced 2025-06-28 02:20:12 +08:00
parent
0653b1db32
commit
53145c7232
@ -52,6 +52,7 @@ net80211 = off
|
||||
netinet = on
|
||||
netinet6 = on
|
||||
netipsec = off
|
||||
nvme = on
|
||||
opencrypto = on
|
||||
pci = on
|
||||
pf = on
|
||||
|
@ -73,6 +73,7 @@ nvme_init(void)
|
||||
|
||||
SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
|
||||
|
||||
#ifndef __rtems__
|
||||
static void
|
||||
nvme_uninit(void)
|
||||
{
|
||||
@ -80,6 +81,7 @@ nvme_uninit(void)
|
||||
}
|
||||
|
||||
SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
int
|
||||
nvme_shutdown(device_t dev)
|
||||
|
@ -61,8 +61,10 @@ nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
|
||||
|
||||
qpair = &ctrlr->adminq;
|
||||
qpair->id = 0;
|
||||
#ifndef __rtems__
|
||||
qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
|
||||
qpair->domain = ctrlr->domain;
|
||||
#endif /* __rtems__ */
|
||||
|
||||
num_entries = NVME_ADMIN_ENTRIES;
|
||||
TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
|
||||
@ -146,6 +148,7 @@ nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
|
||||
* hence the 'i+1' here.
|
||||
*/
|
||||
qpair->id = i + 1;
|
||||
#ifndef __rtems__
|
||||
if (ctrlr->num_io_queues > 1) {
|
||||
/* Find number of CPUs served by this queue. */
|
||||
for (n = 1; QP(ctrlr, c + n) == i; n++)
|
||||
@ -157,6 +160,7 @@ nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
|
||||
qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
|
||||
qpair->domain = ctrlr->domain;
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/*
|
||||
* For I/O queues, use the controller-wide max_xfer_size
|
||||
@ -172,7 +176,11 @@ nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
|
||||
* interrupt thread for this controller.
|
||||
*/
|
||||
if (ctrlr->num_io_queues > 1)
|
||||
#ifndef __rtems__
|
||||
bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu);
|
||||
#else /* __rtems__ */
|
||||
bus_bind_intr(ctrlr->dev, qpair->res, QP(ctrlr, i));
|
||||
#endif /* __rtems__ */
|
||||
}
|
||||
|
||||
return (0);
|
||||
@ -1010,7 +1018,9 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct mtx *mtx;
|
||||
#ifndef __rtems__
|
||||
struct buf *buf = NULL;
|
||||
#endif /* __rtems__ */
|
||||
int ret = 0;
|
||||
vm_offset_t addr, end;
|
||||
|
||||
@ -1032,6 +1042,7 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
||||
ctrlr->max_xfer_size);
|
||||
return EIO;
|
||||
}
|
||||
#ifndef __rtems__
|
||||
if (is_user_buffer) {
|
||||
/*
|
||||
* Ensure the user buffer is wired for the duration of
|
||||
@ -1049,6 +1060,7 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
||||
req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
|
||||
nvme_pt_done, pt);
|
||||
} else
|
||||
#endif /* __rtems__ */
|
||||
req = nvme_allocate_request_vaddr(pt->buf, pt->len,
|
||||
nvme_pt_done, pt);
|
||||
} else
|
||||
@ -1081,11 +1093,13 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
||||
mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
|
||||
mtx_unlock(mtx);
|
||||
|
||||
#ifndef __rtems__
|
||||
err:
|
||||
if (buf != NULL) {
|
||||
uma_zfree(pbuf_zone, buf);
|
||||
PRELE(curproc);
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@ -1141,8 +1155,10 @@ nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
|
||||
ctrlr->dev = dev;
|
||||
|
||||
mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
|
||||
#ifndef __rtems__
|
||||
if (bus_get_domain(dev, &ctrlr->domain) != 0)
|
||||
ctrlr->domain = 0;
|
||||
#endif /* __rtems__ */
|
||||
|
||||
cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
|
||||
ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
|
||||
|
@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include "nvme_private.h"
|
||||
|
||||
#ifndef __rtems__
|
||||
static void nvme_bio_child_inbed(struct bio *parent, int bio_error);
|
||||
static void nvme_bio_child_done(void *arg,
|
||||
const struct nvme_completion *cpl);
|
||||
@ -63,6 +64,7 @@ static struct bio ** nvme_construct_child_bios(struct bio *bp,
|
||||
static int nvme_ns_split_bio(struct nvme_namespace *ns,
|
||||
struct bio *bp,
|
||||
uint32_t alignment);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static int
|
||||
nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
|
||||
@ -76,10 +78,12 @@ nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
|
||||
ctrlr = ns->ctrlr;
|
||||
|
||||
switch (cmd) {
|
||||
#ifndef __rtems__
|
||||
case NVME_IO_TEST:
|
||||
case NVME_BIO_TEST:
|
||||
nvme_ns_test(ns, cmd, arg);
|
||||
break;
|
||||
#endif /* __rtems__ */
|
||||
case NVME_PASSTHROUGH_CMD:
|
||||
pt = (struct nvme_pt_command *)arg;
|
||||
return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id,
|
||||
@ -125,6 +129,7 @@ nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
|
||||
return (0);
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
static void
|
||||
nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
|
||||
{
|
||||
@ -161,15 +166,20 @@ nvme_ns_strategy(struct bio *bp)
|
||||
}
|
||||
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static struct cdevsw nvme_ns_cdevsw = {
|
||||
.d_version = D_VERSION,
|
||||
.d_flags = D_DISK,
|
||||
#ifndef __rtems__
|
||||
.d_read = physread,
|
||||
.d_write = physwrite,
|
||||
#endif /* __rtems__ */
|
||||
.d_open = nvme_ns_open,
|
||||
.d_close = nvme_ns_close,
|
||||
#ifndef __rtems__
|
||||
.d_strategy = nvme_ns_strategy,
|
||||
#endif /* __rtems__ */
|
||||
.d_ioctl = nvme_ns_ioctl
|
||||
};
|
||||
|
||||
@ -240,6 +250,7 @@ nvme_ns_get_stripesize(struct nvme_namespace *ns)
|
||||
return (ns->boundary);
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
static void
|
||||
nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
|
||||
{
|
||||
@ -496,6 +507,7 @@ nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
|
||||
|
||||
return (err);
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
int
|
||||
nvme_ns_ioctl_process(struct nvme_namespace *ns, u_long cmd, caddr_t arg,
|
||||
|
@ -52,6 +52,7 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
return (0);
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
int
|
||||
nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
@ -73,6 +74,7 @@ nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
|
||||
|
||||
return (0);
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
int
|
||||
nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
@ -93,6 +95,7 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
return (0);
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
int
|
||||
nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
@ -113,6 +116,7 @@ nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
|
||||
|
||||
return (0);
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
int
|
||||
nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
|
||||
|
@ -262,7 +262,9 @@ nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
|
||||
{
|
||||
device_t dev;
|
||||
int force_intx, num_io_queues, per_cpu_io_queues;
|
||||
#ifndef __rtems__
|
||||
int min_cpus_per_ioq;
|
||||
#endif /* __rtems__ */
|
||||
int num_vectors_requested, num_vectors_allocated;
|
||||
|
||||
dev = ctrlr->dev;
|
||||
@ -284,12 +286,14 @@ nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
|
||||
if (per_cpu_io_queues == 0)
|
||||
num_io_queues = 1;
|
||||
|
||||
#ifndef __rtems__
|
||||
min_cpus_per_ioq = smp_threads_per_core;
|
||||
TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
|
||||
if (min_cpus_per_ioq > 1) {
|
||||
num_io_queues = min(num_io_queues,
|
||||
max(1, mp_ncpus / min_cpus_per_ioq));
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
num_io_queues = min(num_io_queues, pci_msix_count(dev) - 1);
|
||||
|
||||
|
@ -168,15 +168,21 @@ struct nvme_tracker {
|
||||
uint16_t cid;
|
||||
|
||||
uint64_t *prp;
|
||||
#ifndef __rtems__
|
||||
bus_addr_t prp_bus_addr;
|
||||
#else /* __rtems__ */
|
||||
uint64_t prp_bus_addr;
|
||||
#endif /* __rtems__ */
|
||||
};
|
||||
|
||||
struct nvme_qpair {
|
||||
|
||||
struct nvme_controller *ctrlr;
|
||||
uint32_t id;
|
||||
#ifndef __rtems__
|
||||
int domain;
|
||||
int cpu;
|
||||
#endif /* __rtems__ */
|
||||
|
||||
uint16_t vector;
|
||||
int rid;
|
||||
@ -240,7 +246,9 @@ struct nvme_controller {
|
||||
device_t dev;
|
||||
|
||||
struct mtx lock;
|
||||
#ifndef __rtems__
|
||||
int domain;
|
||||
#endif /* __rtems__ */
|
||||
uint32_t ready_timeout_in_ms;
|
||||
uint32_t quirks;
|
||||
#define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */
|
||||
|
@ -709,7 +709,9 @@ nvme_qpair_construct(struct nvme_qpair *qpair,
|
||||
nvme_printf(ctrlr, "tag create failed %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
#ifndef __rtems__
|
||||
bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
|
||||
BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
|
||||
@ -943,8 +945,13 @@ nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
|
||||
ctrlr = qpair->ctrlr;
|
||||
|
||||
if (req->timeout)
|
||||
#ifndef __rtems__
|
||||
callout_reset_on(&tr->timer, ctrlr->timeout_period * hz,
|
||||
nvme_timeout, tr, qpair->cpu);
|
||||
#else /* __rtems__ */
|
||||
callout_reset_on(&tr->timer, ctrlr->timeout_period * hz,
|
||||
nvme_timeout, tr, -1);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/* Copy the command from the tracker to the submission queue. */
|
||||
memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
|
||||
@ -1073,6 +1080,7 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
|
||||
case NVME_REQUEST_NULL:
|
||||
nvme_qpair_submit_tracker(tr->qpair, tr);
|
||||
break;
|
||||
#ifndef __rtems__
|
||||
case NVME_REQUEST_BIO:
|
||||
KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size,
|
||||
("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n",
|
||||
@ -1092,6 +1100,7 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
|
||||
nvme_printf(qpair->ctrlr,
|
||||
"bus_dmamap_load_ccb returned 0x%x!\n", err);
|
||||
break;
|
||||
#endif /* __rtems__ */
|
||||
default:
|
||||
panic("unknown nvme request type 0x%x\n", req->type);
|
||||
break;
|
||||
|
@ -185,7 +185,11 @@ void *contigmalloc_domainset(unsigned long size, struct malloc_type *type,
|
||||
unsigned long alignment, vm_paddr_t boundary)
|
||||
__malloc_like __result_use_check __alloc_size(1) __alloc_align(7);
|
||||
void free(void *addr, struct malloc_type *type);
|
||||
#ifndef __rtems__
|
||||
void free_domain(void *addr, struct malloc_type *type);
|
||||
#else /* __rtems__ */
|
||||
#define free_domain(addr, type) free(addr, type)
|
||||
#endif /* __rtems__ */
|
||||
#ifndef __rtems__
|
||||
void *malloc(size_t size, struct malloc_type *type, int flags) __malloc_like
|
||||
__result_use_check __alloc_size(1);
|
||||
@ -250,9 +254,13 @@ void *_bsd_malloc(size_t size, struct malloc_type *type, int flags)
|
||||
_malloc_item; \
|
||||
})
|
||||
|
||||
#ifndef __rtems__
|
||||
void *malloc_domainset(size_t size, struct malloc_type *type,
|
||||
struct domainset *ds, int flags) __malloc_like __result_use_check
|
||||
__alloc_size(1);
|
||||
#else /* __rtems__ */
|
||||
#define malloc_domainset(size, type, ds, flags) malloc(size, type, flags)
|
||||
#endif /* __rtems__ */
|
||||
void *mallocarray(size_t nmemb, size_t size, struct malloc_type *type,
|
||||
int flags) __malloc_like __result_use_check
|
||||
__alloc_size2(1, 2);
|
||||
|
0
rtemsbsd/include/rtems/bsd/local/opt_nvme.h
Normal file
0
rtemsbsd/include/rtems/bsd/local/opt_nvme.h
Normal file
Loading…
x
Reference in New Issue
Block a user