mirror of
https://git.rtems.org/rtems-libbsd/
synced 2025-05-13 16:19:16 +08:00
Linux update to 4.11-rc5
Linux baseline a71c9a1c779f2499fb2afc0553e543f18aff6edf (4.11-rc5).
This commit is contained in:
parent
28ee86a9b0
commit
cd089b9e05
32
libbsd.py
32
libbsd.py
@ -4177,11 +4177,32 @@ def mghttpd(mm):
|
||||
|
||||
def dpaa(mm):
|
||||
mod = builder.Module('dpaa')
|
||||
mod.addLinuxHeaderFiles(
|
||||
[
|
||||
'include/soc/fsl/bman.h',
|
||||
'include/soc/fsl/qman.h',
|
||||
'drivers/net/ethernet/freescale/fman/mac.h',
|
||||
'drivers/net/ethernet/freescale/fman/fman.h',
|
||||
'drivers/net/ethernet/freescale/fman/fman_tgec.h',
|
||||
'drivers/net/ethernet/freescale/fman/fman_mac.h',
|
||||
'drivers/net/ethernet/freescale/fman/fman_muram.h',
|
||||
'drivers/net/ethernet/freescale/fman/fman_port.h',
|
||||
'drivers/net/ethernet/freescale/fman/fman_dtsec.h',
|
||||
'drivers/net/ethernet/freescale/fman/fman_memac.h',
|
||||
'drivers/net/ethernet/freescale/fman/fman_sp.h',
|
||||
'drivers/net/ethernet/freescale/dpaa/dpaa_eth.h',
|
||||
'drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h',
|
||||
'drivers/soc/fsl/qbman/qman_priv.h',
|
||||
'drivers/soc/fsl/qbman/bman_test.h',
|
||||
'drivers/soc/fsl/qbman/dpaa_sys.h',
|
||||
'drivers/soc/fsl/qbman/bman_priv.h',
|
||||
'drivers/soc/fsl/qbman/qman_test.h',
|
||||
]
|
||||
)
|
||||
mod.addCPUDependentLinuxSourceFiles(
|
||||
[ 'powerpc' ],
|
||||
[
|
||||
'drivers/net/ethernet/freescale/dpaa/dpaa_eth.c',
|
||||
'drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c',
|
||||
'drivers/net/ethernet/freescale/fman/fman.c',
|
||||
'drivers/net/ethernet/freescale/fman/fman_dtsec.c',
|
||||
'drivers/net/ethernet/freescale/fman/fman_memac.c',
|
||||
@ -4189,19 +4210,14 @@ def dpaa(mm):
|
||||
'drivers/net/ethernet/freescale/fman/fman_sp.c',
|
||||
'drivers/net/ethernet/freescale/fman/fman_tgec.c',
|
||||
'drivers/net/ethernet/freescale/fman/mac.c',
|
||||
'drivers/soc/fsl/qbman/bman_api.c',
|
||||
'drivers/soc/fsl/qbman/bman.c',
|
||||
'drivers/soc/fsl/qbman/bman_ccsr.c',
|
||||
'drivers/soc/fsl/qbman/bman_test_api.c',
|
||||
'drivers/soc/fsl/qbman/bman_test.c',
|
||||
'drivers/soc/fsl/qbman/bman_test_thresh.c',
|
||||
'drivers/soc/fsl/qbman/bman_utils.c',
|
||||
'drivers/soc/fsl/qbman/dpaa_resource.c',
|
||||
'drivers/soc/fsl/qbman/qman_api.c',
|
||||
'drivers/soc/fsl/qbman/qman.c',
|
||||
'drivers/soc/fsl/qbman/qman_ccsr.c',
|
||||
'drivers/soc/fsl/qbman/qman_portal.c',
|
||||
'drivers/soc/fsl/qbman/qman_test_api.c',
|
||||
'drivers/soc/fsl/qbman/qman_test_stash.c',
|
||||
'drivers/soc/fsl/qbman/qman_utils.c',
|
||||
],
|
||||
mm.generator['source']()
|
||||
)
|
||||
|
@ -2304,7 +2304,6 @@ def build(bld):
|
||||
if bld.get_env()["RTEMS_ARCH"] == "powerpc":
|
||||
source += ['freebsd/sys/powerpc/powerpc/in_cksum.c',
|
||||
'linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c',
|
||||
'linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c',
|
||||
'linux/drivers/net/ethernet/freescale/fman/fman.c',
|
||||
'linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c',
|
||||
'linux/drivers/net/ethernet/freescale/fman/fman_memac.c',
|
||||
@ -2313,18 +2312,13 @@ def build(bld):
|
||||
'linux/drivers/net/ethernet/freescale/fman/fman_tgec.c',
|
||||
'linux/drivers/net/ethernet/freescale/fman/mac.c',
|
||||
'linux/drivers/soc/fsl/qbman/bman.c',
|
||||
'linux/drivers/soc/fsl/qbman/bman_api.c',
|
||||
'linux/drivers/soc/fsl/qbman/bman_test.c',
|
||||
'linux/drivers/soc/fsl/qbman/bman_ccsr.c',
|
||||
'linux/drivers/soc/fsl/qbman/bman_test_api.c',
|
||||
'linux/drivers/soc/fsl/qbman/bman_test_thresh.c',
|
||||
'linux/drivers/soc/fsl/qbman/bman_utils.c',
|
||||
'linux/drivers/soc/fsl/qbman/dpaa_resource.c',
|
||||
'linux/drivers/soc/fsl/qbman/qman.c',
|
||||
'linux/drivers/soc/fsl/qbman/qman_api.c',
|
||||
'linux/drivers/soc/fsl/qbman/qman_ccsr.c',
|
||||
'linux/drivers/soc/fsl/qbman/qman_portal.c',
|
||||
'linux/drivers/soc/fsl/qbman/qman_test_api.c',
|
||||
'linux/drivers/soc/fsl/qbman/qman_test_stash.c',
|
||||
'linux/drivers/soc/fsl/qbman/qman_utils.c',
|
||||
'rtemsbsd/sys/powerpc/compat.c',
|
||||
'rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c',
|
||||
'rtemsbsd/sys/powerpc/fdt_phy.c',
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -28,131 +28,36 @@
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __DPA_H
|
||||
#define __DPA_H
|
||||
#ifndef __DPAA_H
|
||||
#define __DPAA_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <soc/fsl/qman.h>
|
||||
#include <soc/fsl/bman.h>
|
||||
|
||||
#include "fman.h"
|
||||
#include "mac.h"
|
||||
#include "dpaa_eth_trace.h"
|
||||
|
||||
#define DPAA_ETH_TXQ_NUM NR_CPUS
|
||||
|
||||
#ifndef __rtems__
|
||||
extern int dpa_rx_extra_headroom;
|
||||
extern int dpa_max_frm;
|
||||
|
||||
#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
|
||||
#define dpa_get_max_frm() dpa_max_frm
|
||||
#define DPAA_BPS_NUM 3 /* number of bpools per interface */
|
||||
#else /* __rtems__ */
|
||||
#define dpa_get_rx_extra_headroom fman_get_rx_extra_headroom
|
||||
#define dpa_get_max_frm fman_get_max_frm
|
||||
#define DPAA_BPS_NUM 1
|
||||
#endif /* __rtems__ */
|
||||
|
||||
#define dpa_get_max_mtu() \
|
||||
(dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
|
||||
|
||||
/* Simple enum of FQ types - used for array indexing */
|
||||
enum port_type {RX, TX};
|
||||
|
||||
struct dpa_buffer_layout_s {
|
||||
u16 priv_data_size;
|
||||
bool parse_results;
|
||||
bool time_stamp;
|
||||
bool hash_results;
|
||||
u16 data_align;
|
||||
};
|
||||
|
||||
#define DPA_ERR_ON(cond)
|
||||
|
||||
#define DPA_TX_PRIV_DATA_SIZE 16
|
||||
#define DPA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
|
||||
#define DPA_TIME_STAMP_SIZE 8
|
||||
#define DPA_HASH_RESULTS_SIZE 8
|
||||
#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
|
||||
dpa_get_rx_extra_headroom())
|
||||
|
||||
#define FM_FD_STAT_RX_ERRORS \
|
||||
(FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
|
||||
FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
|
||||
FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
|
||||
FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
|
||||
FM_FD_ERR_PRS_HDR_ERR)
|
||||
|
||||
#define FM_FD_STAT_TX_ERRORS \
|
||||
(FM_FD_ERR_UNSUPPORTED_FORMAT | \
|
||||
FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
|
||||
|
||||
/* The raw buffer size must be cacheline aligned.
|
||||
* Normally we use 2K buffers.
|
||||
*/
|
||||
#define DPA_BP_RAW_SIZE 2048
|
||||
|
||||
/* This is what FMan is ever allowed to use.
|
||||
* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
|
||||
* even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
|
||||
* via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
|
||||
* half-page-aligned buffers (can we?), so we reserve some more space
|
||||
* for start-of-buffer alignment.
|
||||
*/
|
||||
#ifndef __rtems__
|
||||
#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
|
||||
SMP_CACHE_BYTES)
|
||||
#else /* __rtems__ */
|
||||
/*
|
||||
* FIXME: 4 bytes would be enough for the mbuf pointer. However, jumbo receive
|
||||
* frames overwrite this area if < 64 bytes.
|
||||
*/
|
||||
#define DPA_OUT_OF_BAND_SIZE 64
|
||||
#define DPA_MBUF_POINTER_OFFSET (DPA_BP_RAW_SIZE - DPA_OUT_OF_BAND_SIZE)
|
||||
#define dpa_bp_size(buffer_layout) DPA_MBUF_POINTER_OFFSET
|
||||
#endif /* __rtems__ */
|
||||
/* We must ensure that skb_shinfo is always cacheline-aligned. */
|
||||
#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
|
||||
|
||||
/* Largest value that the FQD's OAL field can hold.
|
||||
* This is DPAA-1.x specific.
|
||||
*/
|
||||
#define FSL_QMAN_MAX_OAL 127
|
||||
|
||||
/* Default alignment for start of data in an Rx FD */
|
||||
#define DPA_FD_DATA_ALIGNMENT 16
|
||||
|
||||
/* Values for the L3R field of the FM Parse Results
|
||||
*/
|
||||
/* L3 Type field: First IP Present IPv4 */
|
||||
#define FM_L3_PARSE_RESULT_IPV4 0x8000
|
||||
/* L3 Type field: First IP Present IPv6 */
|
||||
#define FM_L3_PARSE_RESULT_IPV6 0x4000
|
||||
|
||||
/* Values for the L4R field of the FM Parse Results
|
||||
* See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
|
||||
*/
|
||||
/* L4 Type field: UDP */
|
||||
#define FM_L4_PARSE_RESULT_UDP 0x40
|
||||
/* L4 Type field: TCP */
|
||||
#define FM_L4_PARSE_RESULT_TCP 0x20
|
||||
|
||||
/* number of Tx queues to FMan */
|
||||
#define DPAA_ETH_TX_QUEUES NR_CPUS
|
||||
|
||||
#define DPAA_ETH_RX_QUEUES 128
|
||||
|
||||
#define FSL_DPAA_ETH_MAX_BUF_COUNT 128
|
||||
#define FSL_DPAA_ETH_REFILL_THRESHOLD 80
|
||||
|
||||
/* More detailed FQ types - used for fine-grained WQ assignments */
|
||||
enum dpa_fq_type {
|
||||
enum dpaa_fq_type {
|
||||
FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
|
||||
FQ_TYPE_RX_ERROR, /* Rx Error FQs */
|
||||
FQ_TYPE_RX_PCD, /* User-defined PCDs */
|
||||
FQ_TYPE_TX, /* "Real" Tx FQs */
|
||||
FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
|
||||
FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
|
||||
FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
|
||||
};
|
||||
|
||||
struct dpa_fq {
|
||||
struct dpaa_fq {
|
||||
struct qman_fq fq_base;
|
||||
struct list_head list;
|
||||
struct net_device *net_dev;
|
||||
@ -161,10 +66,10 @@ struct dpa_fq {
|
||||
u32 flags;
|
||||
u16 channel;
|
||||
u8 wq;
|
||||
enum dpa_fq_type fq_type;
|
||||
enum dpaa_fq_type fq_type;
|
||||
};
|
||||
|
||||
struct dpa_fq_cbs_t {
|
||||
struct dpaa_fq_cbs {
|
||||
struct qman_fq rx_defq;
|
||||
struct qman_fq tx_defq;
|
||||
struct qman_fq rx_errq;
|
||||
@ -172,45 +77,30 @@ struct dpa_fq_cbs_t {
|
||||
struct qman_fq egress_ern;
|
||||
};
|
||||
|
||||
struct fqid_cell {
|
||||
u32 start;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct dpa_bp {
|
||||
struct bman_pool *pool;
|
||||
u8 bpid;
|
||||
#ifndef __rtems__
|
||||
struct dpaa_bp {
|
||||
/* device used in the DMA mapping operations */
|
||||
struct device *dev;
|
||||
#endif /* __rtems__ */
|
||||
/* the buffer pools used for the private ports are initialized
|
||||
* with config_count buffers for each CPU; at runtime the
|
||||
* number of buffers per CPU is constantly brought back to this
|
||||
* level
|
||||
*/
|
||||
int config_count;
|
||||
size_t size;
|
||||
bool seed_pool;
|
||||
/* physical address of the contiguous memory used by the pool to store
|
||||
* the buffers
|
||||
*/
|
||||
dma_addr_t paddr;
|
||||
/* virtual address of the contiguous memory used by the pool to store
|
||||
* the buffers
|
||||
*/
|
||||
void __iomem *vaddr;
|
||||
/* current number of buffers in the bpool alloted to this CPU */
|
||||
/* current number of buffers in the buffer pool alloted to each CPU */
|
||||
int __percpu *percpu_count;
|
||||
atomic_t refs;
|
||||
/* some bpools need to be seeded before use by this cb */
|
||||
int (*seed_cb)(struct dpa_bp *);
|
||||
/* some bpools need to be emptied before freeing; this cb is used
|
||||
* for freeing of individual buffers taken from the pool
|
||||
/* all buffers allocated for this pool have this raw size */
|
||||
size_t raw_size;
|
||||
/* all buffers in this pool have this same usable size */
|
||||
size_t size;
|
||||
/* the buffer pools are initialized with config_count buffers for each
|
||||
* CPU; at runtime the number of buffers per CPU is constantly brought
|
||||
* back to this level
|
||||
*/
|
||||
void (*free_buf_cb)(void *addr);
|
||||
u16 config_count;
|
||||
u8 bpid;
|
||||
struct bman_pool *pool;
|
||||
/* bpool can be seeded before use by this cb */
|
||||
int (*seed_cb)(struct dpaa_bp *);
|
||||
/* bpool can be emptied before freeing by this cb */
|
||||
void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
|
||||
atomic_t refs;
|
||||
};
|
||||
|
||||
struct dpa_rx_errors {
|
||||
struct dpaa_rx_errors {
|
||||
u64 dme; /* DMA Error */
|
||||
u64 fpe; /* Frame Physical Error */
|
||||
u64 fse; /* Frame Size Error */
|
||||
@ -218,7 +108,7 @@ struct dpa_rx_errors {
|
||||
};
|
||||
|
||||
/* Counters for QMan ERN frames - one counter per rejection code */
|
||||
struct dpa_ern_cnt {
|
||||
struct dpaa_ern_cnt {
|
||||
u64 cg_tdrop; /* Congestion group taildrop */
|
||||
u64 wred; /* WRED congestion */
|
||||
u64 err_cond; /* Error condition */
|
||||
@ -229,16 +119,17 @@ struct dpa_ern_cnt {
|
||||
u64 orp_zero; /* ORP disabled */
|
||||
};
|
||||
|
||||
struct dpa_napi_portal {
|
||||
struct dpaa_napi_portal {
|
||||
#ifndef __rtems__
|
||||
struct napi_struct napi;
|
||||
#endif /* __rtems__ */
|
||||
struct qman_portal *p;
|
||||
bool down;
|
||||
};
|
||||
|
||||
struct dpa_percpu_priv_s {
|
||||
struct dpaa_percpu_priv {
|
||||
struct net_device *net_dev;
|
||||
struct dpa_napi_portal *np;
|
||||
struct dpaa_napi_portal np;
|
||||
u64 in_interrupt;
|
||||
u64 tx_confirm;
|
||||
/* fragmented (non-linear) skbuffs received from the stack */
|
||||
@ -246,26 +137,28 @@ struct dpa_percpu_priv_s {
|
||||
#ifndef __rtems__
|
||||
struct rtnl_link_stats64 stats;
|
||||
#endif /* __rtems__ */
|
||||
struct dpa_rx_errors rx_errors;
|
||||
struct dpa_ern_cnt ern_cnt;
|
||||
struct dpaa_rx_errors rx_errors;
|
||||
struct dpaa_ern_cnt ern_cnt;
|
||||
};
|
||||
|
||||
struct dpa_priv_s {
|
||||
struct dpa_percpu_priv_s __percpu *percpu_priv;
|
||||
struct dpa_bp *dpa_bp;
|
||||
struct dpaa_buffer_layout {
|
||||
u16 priv_data_size;
|
||||
};
|
||||
|
||||
struct dpaa_priv {
|
||||
struct dpaa_percpu_priv __percpu *percpu_priv;
|
||||
struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
|
||||
/* Store here the needed Tx headroom for convenience and speed
|
||||
* (even though it can be computed based on the fields of buf_layout)
|
||||
*/
|
||||
u16 tx_headroom;
|
||||
struct net_device *net_dev;
|
||||
struct mac_device *mac_dev;
|
||||
struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
|
||||
struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
|
||||
struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
|
||||
struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
|
||||
|
||||
size_t bp_count;
|
||||
|
||||
u16 channel; /* "fsl,qman-channel-id" */
|
||||
struct list_head dpa_fq_list;
|
||||
u16 channel;
|
||||
struct list_head dpaa_fq_list;
|
||||
|
||||
#ifndef __rtems__
|
||||
u32 msg_enable; /* net_device message level */
|
||||
@ -289,231 +182,28 @@ struct dpa_priv_s {
|
||||
bool use_ingress_cgr;
|
||||
struct qman_cgr ingress_cgr;
|
||||
|
||||
struct dpa_buffer_layout_s *buf_layout;
|
||||
struct dpaa_buffer_layout buf_layout[2];
|
||||
u16 rx_headroom;
|
||||
};
|
||||
|
||||
struct fm_port_fqs {
|
||||
struct dpa_fq *tx_defq;
|
||||
struct dpa_fq *tx_errq;
|
||||
struct dpa_fq *rx_defq;
|
||||
struct dpa_fq *rx_errq;
|
||||
};
|
||||
/* from dpaa_ethtool.c */
|
||||
extern const struct ethtool_ops dpaa_ethtool_ops;
|
||||
|
||||
int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
|
||||
int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
|
||||
void _dpa_rx(struct net_device *net_dev,
|
||||
struct qman_portal *portal,
|
||||
const struct dpa_priv_s *priv,
|
||||
struct dpa_percpu_priv_s *percpu_priv,
|
||||
const struct qm_fd *fd,
|
||||
u32 fqid,
|
||||
int *count_ptr);
|
||||
#ifndef __rtems__
|
||||
int dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
|
||||
struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
|
||||
const struct qm_fd *fd);
|
||||
|
||||
/* Turn on HW checksum computation for this outgoing frame.
|
||||
* If the current protocol is not something we support in this regard
|
||||
* (or if the stack has already computed the SW checksum), we do nothing.
|
||||
*
|
||||
* Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
|
||||
* otherwise.
|
||||
*
|
||||
* Note that this function may modify the fd->cmd field and the skb data buffer
|
||||
* (the Parse Results area).
|
||||
*/
|
||||
int dpa_enable_tx_csum(struct dpa_priv_s *priv, struct sk_buff *skb,
|
||||
struct qm_fd *fd, char *parse_results);
|
||||
#else /* __rtems__ */
|
||||
void _dpa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
|
||||
struct qman_portal *portal)
|
||||
{
|
||||
#ifndef __rtems__
|
||||
/* In case of threaded ISR for RT enable kernel,
|
||||
* in_irq() does not return appropriate value, so use
|
||||
* in_serving_softirq to distinguish softirq or irq context.
|
||||
*/
|
||||
if (unlikely(in_irq() || !in_serving_softirq())) {
|
||||
/* Disable QMan IRQ and invoke NAPI */
|
||||
int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
|
||||
|
||||
if (likely(!ret)) {
|
||||
const struct qman_portal_config *pc =
|
||||
qman_p_get_portal_config(portal);
|
||||
struct dpa_napi_portal *np =
|
||||
&percpu_priv->np[pc->channel];
|
||||
|
||||
np->p = portal;
|
||||
napi_schedule(&np->napi);
|
||||
percpu_priv->in_interrupt++;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
#else /* __rtems__ */
|
||||
/* FIXME */
|
||||
#endif /* __rtems__ */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline ssize_t __const dpa_fd_length(const struct qm_fd *fd)
|
||||
{
|
||||
return fd->length20;
|
||||
}
|
||||
|
||||
static inline ssize_t __const dpa_fd_offset(const struct qm_fd *fd)
|
||||
{
|
||||
return fd->offset;
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
/* Verifies if the skb length is below the interface MTU */
|
||||
static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
|
||||
{
|
||||
if (unlikely(skb->len > mtu))
|
||||
if ((skb->protocol != htons(ETH_P_8021Q)) ||
|
||||
(skb->len > mtu + 4))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static inline u16 dpa_get_headroom(struct dpa_buffer_layout_s *bl)
|
||||
{
|
||||
u16 headroom;
|
||||
/* The frame headroom must accommodate:
|
||||
* - the driver private data area
|
||||
* - parse results, hash results, timestamp if selected
|
||||
* If either hash results or time stamp are selected, both will
|
||||
* be copied to/from the frame headroom, as TS is located between PR and
|
||||
* HR in the IC and IC copy size has a granularity of 16bytes
|
||||
* (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
|
||||
*
|
||||
* Also make sure the headroom is a multiple of data_align bytes
|
||||
*/
|
||||
headroom = (u16)(bl->priv_data_size +
|
||||
(bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
|
||||
(bl->hash_results || bl->time_stamp ?
|
||||
DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0));
|
||||
|
||||
return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
/* from dpaa_eth_sysfs.c */
|
||||
void dpaa_eth_sysfs_remove(struct device *dev);
|
||||
void dpaa_eth_sysfs_init(struct device *dev);
|
||||
#ifdef __rtems__
|
||||
#include <sys/mbuf.h>
|
||||
|
||||
void dpa_private_napi_del(struct net_device *net_dev);
|
||||
#define DPAA_ENQUEUE_RETRIES 100000
|
||||
#define DPAA_SGT_MAX_ENTRIES 16
|
||||
#define DPAA_TX_PRIV_DATA_SIZE 16
|
||||
#define FM_L3_PARSE_RESULT_IPV4 0x8000
|
||||
#define FM_L3_PARSE_RESULT_IPV6 0x4000
|
||||
#define FM_L4_PARSE_RESULT_UDP 0x40
|
||||
#define FM_L4_PARSE_RESULT_TCP 0x20
|
||||
#define FSL_DPAA_BPID_INV 0xff
|
||||
|
||||
void dpaa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static inline void clear_fd(struct qm_fd *fd)
|
||||
{
|
||||
fd->opaque_addr = 0;
|
||||
fd->opaque = 0;
|
||||
fd->cmd = 0;
|
||||
}
|
||||
|
||||
static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
|
||||
struct qman_fq *tx_fq)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
|
||||
if (priv->egress_fqs[i] == tx_fq)
|
||||
return i;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
static inline int dpa_xmit(struct dpa_priv_s *priv,
|
||||
struct rtnl_link_stats64 *percpu_stats,
|
||||
int queue,
|
||||
struct qm_fd *fd)
|
||||
{
|
||||
int err, i;
|
||||
struct qman_fq *egress_fq;
|
||||
|
||||
egress_fq = priv->egress_fqs[queue];
|
||||
if (fd->bpid == 0xff)
|
||||
fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
|
||||
|
||||
/* Trace this Tx fd */
|
||||
trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
|
||||
|
||||
for (i = 0; i < 100000; i++) {
|
||||
err = qman_enqueue(egress_fq, fd, 0);
|
||||
if (err != -EBUSY)
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(err < 0)) {
|
||||
percpu_stats->tx_errors++;
|
||||
percpu_stats->tx_fifo_errors++;
|
||||
return err;
|
||||
}
|
||||
|
||||
percpu_stats->tx_packets++;
|
||||
percpu_stats->tx_bytes += dpa_fd_length(fd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/* Use multiple WQs for FQ assignment:
|
||||
* - Tx Confirmation queues go to WQ1.
|
||||
* - Rx Default and Tx queues go to WQ3 (no differentiation between
|
||||
* Rx and Tx traffic).
|
||||
* - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
|
||||
* to be scheduled, in case there are many more FQs in WQ3).
|
||||
* This ensures that Tx-confirmed buffers are timely released. In particular,
|
||||
* it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
|
||||
* are greatly outnumbered by other FQs in the system, while
|
||||
* dequeue scheduling is round-robin.
|
||||
*/
|
||||
static inline void _dpa_assign_wq(struct dpa_fq *fq)
|
||||
{
|
||||
switch (fq->fq_type) {
|
||||
case FQ_TYPE_TX_CONFIRM:
|
||||
case FQ_TYPE_TX_CONF_MQ:
|
||||
fq->wq = 1;
|
||||
break;
|
||||
case FQ_TYPE_RX_DEFAULT:
|
||||
case FQ_TYPE_TX:
|
||||
fq->wq = 3;
|
||||
break;
|
||||
case FQ_TYPE_RX_ERROR:
|
||||
case FQ_TYPE_TX_ERROR:
|
||||
fq->wq = 2;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Invalid FQ type %d for FQID %d!\n",
|
||||
fq->fq_type, fq->fqid);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
|
||||
/* Use in lieu of skb_get_queue_mapping() */
|
||||
#define dpa_get_queue_mapping(skb) \
|
||||
raw_smp_processor_id()
|
||||
#else
|
||||
/* Use the queue selected by XPS */
|
||||
#define dpa_get_queue_mapping(skb) \
|
||||
skb_get_queue_mapping(skb)
|
||||
#endif
|
||||
|
||||
static inline void _dpa_bp_free_pf(void *addr)
|
||||
{
|
||||
#ifndef __rtems__
|
||||
put_page(virt_to_head_page(addr));
|
||||
#else /* __rtems__ */
|
||||
BSD_ASSERT(0);
|
||||
#endif /* __rtems__ */
|
||||
}
|
||||
|
||||
#endif /* __DPA_H */
|
||||
#endif /* __DPAA_H */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,113 +0,0 @@
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __DPAA_ETH_COMMON_H
|
||||
#define __DPAA_ETH_COMMON_H
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <soc/fsl/bman.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#include "dpaa_eth.h"
|
||||
|
||||
#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
|
||||
#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
|
||||
|
||||
/* used in napi related functions */
|
||||
extern u16 qman_portal_max;
|
||||
|
||||
/* from dpa_ethtool.c */
|
||||
extern const struct ethtool_ops dpa_ethtool_ops;
|
||||
|
||||
int dpa_netdev_init(struct net_device *net_dev,
|
||||
const u8 *mac_addr,
|
||||
u16 tx_timeout);
|
||||
int dpa_start(struct net_device *net_dev);
|
||||
int dpa_stop(struct net_device *net_dev);
|
||||
void dpa_timeout(struct net_device *net_dev);
|
||||
struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev,
|
||||
struct rtnl_link_stats64 *stats);
|
||||
int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
|
||||
int dpa_ndo_init(struct net_device *net_dev);
|
||||
#ifndef __rtems__
|
||||
int dpa_set_features(struct net_device *dev, netdev_features_t features);
|
||||
netdev_features_t dpa_fix_features(struct net_device *dev,
|
||||
netdev_features_t features);
|
||||
#endif /* __rtems__ */
|
||||
int dpa_remove(struct platform_device *pdev);
|
||||
struct mac_device *dpa_mac_dev_get(struct platform_device *pdev);
|
||||
int dpa_mac_hw_index_get(struct platform_device *pdev);
|
||||
int dpa_mac_fman_index_get(struct platform_device *pdev);
|
||||
int dpa_set_mac_address(struct net_device *net_dev, void *addr);
|
||||
void dpa_set_rx_mode(struct net_device *net_dev);
|
||||
void dpa_set_buffers_layout(struct mac_device *mac_dev,
|
||||
struct dpa_buffer_layout_s *layout);
|
||||
int dpa_bp_alloc(struct dpa_bp *dpa_bp);
|
||||
void dpa_bp_free(struct dpa_priv_s *priv);
|
||||
struct dpa_bp *dpa_bpid2pool(int bpid);
|
||||
void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
|
||||
bool dpa_bpid2pool_use(int bpid);
|
||||
void dpa_bp_drain(struct dpa_bp *bp);
|
||||
#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
|
||||
u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback);
|
||||
#endif
|
||||
struct dpa_fq *dpa_fq_alloc(struct device *dev,
|
||||
const struct fqid_cell *fqids,
|
||||
struct list_head *list,
|
||||
enum dpa_fq_type fq_type);
|
||||
int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
|
||||
struct fm_port_fqs *port_fqs,
|
||||
bool tx_conf_fqs_per_core,
|
||||
enum port_type ptype);
|
||||
int dpa_get_channel(void);
|
||||
void dpa_release_channel(void);
|
||||
int dpaa_eth_add_channel(void *__arg);
|
||||
int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
|
||||
void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
|
||||
struct fman_port *tx_port);
|
||||
int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
|
||||
int dpa_fq_free(struct device *dev, struct list_head *list);
|
||||
void dpaa_eth_init_ports(struct mac_device *mac_dev,
|
||||
struct dpa_bp *bp, size_t count,
|
||||
struct fm_port_fqs *port_fqs,
|
||||
struct dpa_buffer_layout_s *buf_layout,
|
||||
struct device *dev);
|
||||
void dpa_release_sgt(struct qm_sg_entry *sgt);
|
||||
void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
|
||||
void count_ern(struct dpa_percpu_priv_s *percpu_priv,
|
||||
const struct qm_mr_entry *msg);
|
||||
#ifndef __rtems__
|
||||
int dpa_enable_tx_csum(struct dpa_priv_s *priv,
|
||||
struct sk_buff *skb,
|
||||
struct qm_fd *fd,
|
||||
char *parse_results);
|
||||
#endif /* __rtems__ */
|
||||
#endif /* __DPAA_ETH_COMMON_H */
|
@ -1,710 +0,0 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2012 - 2015 Freescale Semiconductor Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <soc/fsl/bman.h>
|
||||
|
||||
#include "dpaa_eth.h"
|
||||
#include "dpaa_eth_common.h"
|
||||
|
||||
/* Convenience macros for storing/retrieving the skb back-pointers.
|
||||
*
|
||||
* NB: @off is an offset from a (struct sk_buff **) pointer!
|
||||
*/
|
||||
#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
|
||||
{ \
|
||||
skbh = (struct sk_buff **)addr; \
|
||||
*(skbh + (off)) = skb; \
|
||||
}
|
||||
#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
|
||||
{ \
|
||||
skbh = (struct sk_buff **)addr; \
|
||||
skb = *(skbh + (off)); \
|
||||
}
|
||||
|
||||
/* DMA map and add a page frag back into the bpool.
|
||||
* @vaddr fragment must have been allocated with netdev_alloc_frag(),
|
||||
* specifically for fitting into @dpa_bp.
|
||||
*/
|
||||
static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
|
||||
int *count_ptr)
|
||||
{
|
||||
struct bm_buffer bmb;
|
||||
dma_addr_t addr;
|
||||
|
||||
addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
|
||||
dev_err(dpa_bp->dev, "DMA mapping failed");
|
||||
return;
|
||||
}
|
||||
|
||||
bm_buffer_set64(&bmb, addr);
|
||||
|
||||
while (bman_release(dpa_bp->pool, &bmb, 1, 0))
|
||||
cpu_relax();
|
||||
|
||||
(*count_ptr)++;
|
||||
}
|
||||
|
||||
static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
|
||||
{
|
||||
struct bm_buffer bmb[8];
|
||||
void *new_buf;
|
||||
dma_addr_t addr;
|
||||
u8 i;
|
||||
struct device *dev = dpa_bp->dev;
|
||||
struct sk_buff *skb, **skbh;
|
||||
|
||||
memset(bmb, 0, sizeof(bmb));
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
/* We'll prepend the skb back-pointer; can't use the DPA
|
||||
* priv space, because FMan will overwrite it (from offset 0)
|
||||
* if it ends up being the second, third, etc. fragment
|
||||
* in a S/G frame.
|
||||
*
|
||||
* We only need enough space to store a pointer, but allocate
|
||||
* an entire cacheline for performance reasons.
|
||||
*/
|
||||
new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
|
||||
if (unlikely(!new_buf))
|
||||
goto netdev_alloc_failed;
|
||||
new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
|
||||
|
||||
skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
|
||||
if (unlikely(!skb)) {
|
||||
put_page(virt_to_head_page(new_buf));
|
||||
goto build_skb_failed;
|
||||
}
|
||||
DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
|
||||
|
||||
addr = dma_map_single(dev, new_buf,
|
||||
dpa_bp->size, DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(dev, addr)))
|
||||
goto dma_map_failed;
|
||||
|
||||
bm_buffer_set64(&bmb[i], addr);
|
||||
}
|
||||
|
||||
release_bufs:
|
||||
/* Release the buffers. In case bman is busy, keep trying
|
||||
* until successful. bman_release() is guaranteed to succeed
|
||||
* in a reasonable amount of time
|
||||
*/
|
||||
while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
|
||||
cpu_relax();
|
||||
return i;
|
||||
|
||||
dma_map_failed:
|
||||
kfree_skb(skb);
|
||||
|
||||
build_skb_failed:
|
||||
netdev_alloc_failed:
|
||||
net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
|
||||
WARN_ONCE(1, "Memory allocation failure on Rx\n");
|
||||
|
||||
bm_buffer_set64(&bmb[i], 0);
|
||||
/* Avoid releasing a completely null buffer; bman_release() requires
|
||||
* at least one buffer.
|
||||
*/
|
||||
if (likely(i))
|
||||
goto release_bufs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
|
||||
static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
|
||||
{
|
||||
int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
|
||||
*count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
|
||||
}
|
||||
|
||||
int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Give each CPU an allotment of "config_count" buffers */
|
||||
for_each_possible_cpu(i) {
|
||||
int j;
|
||||
|
||||
/* Although we access another CPU's counters here
|
||||
* we do it at boot time so it is safe
|
||||
*/
|
||||
for (j = 0; j < dpa_bp->config_count; j += 8)
|
||||
dpa_bp_add_8_bufs(dpa_bp, i);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Add buffers/(pages) for Rx processing whenever bpool count falls below
|
||||
* REFILL_THRESHOLD.
|
||||
*/
|
||||
int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
|
||||
{
|
||||
int count = *countptr;
|
||||
int new_bufs;
|
||||
|
||||
if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
|
||||
do {
|
||||
new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
|
||||
if (unlikely(!new_bufs)) {
|
||||
/* Avoid looping forever if we've temporarily
|
||||
* run out of memory. We'll try again at the
|
||||
* next NAPI cycle.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
count += new_bufs;
|
||||
} while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
|
||||
|
||||
*countptr = count;
|
||||
if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Cleanup function for outgoing frame descriptors that were built on Tx path,
|
||||
* either contiguous frames or scatter/gather ones.
|
||||
* Skb freeing is not handled here.
|
||||
*
|
||||
* This function may be called on error paths in the Tx function, so guard
|
||||
* against cases when not all fd relevant fields were filled in.
|
||||
*
|
||||
* Return the skb backpointer, since for S/G frames the buffer containing it
|
||||
* gets freed here.
|
||||
*/
|
||||
struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
|
||||
const struct qm_fd *fd)
|
||||
{
|
||||
const struct qm_sg_entry *sgt;
|
||||
int i;
|
||||
struct dpa_bp *dpa_bp = priv->dpa_bp;
|
||||
dma_addr_t addr = qm_fd_addr(fd);
|
||||
struct sk_buff **skbh;
|
||||
struct sk_buff *skb = NULL;
|
||||
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
|
||||
int nr_frags;
|
||||
|
||||
|
||||
/* retrieve skb back pointer */
|
||||
DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
|
||||
|
||||
if (unlikely(fd->format == qm_fd_sg)) {
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
|
||||
sizeof(struct qm_sg_entry) * (1 + nr_frags),
|
||||
dma_dir);
|
||||
|
||||
/* The sgt buffer has been allocated with netdev_alloc_frag(),
|
||||
* it's from lowmem.
|
||||
*/
|
||||
sgt = phys_to_virt(addr + dpa_fd_offset(fd));
|
||||
|
||||
/* sgt[0] is from lowmem, was dma_map_single()-ed */
|
||||
dma_unmap_single(dpa_bp->dev, (dma_addr_t)sgt[0].addr,
|
||||
sgt[0].length, dma_dir);
|
||||
|
||||
/* remaining pages were mapped with dma_map_page() */
|
||||
for (i = 1; i < nr_frags; i++) {
|
||||
DPA_ERR_ON(sgt[i].extension);
|
||||
|
||||
dma_unmap_page(dpa_bp->dev, (dma_addr_t)sgt[i].addr,
|
||||
sgt[i].length, dma_dir);
|
||||
}
|
||||
|
||||
/* Free the page frag that we allocated on Tx */
|
||||
put_page(virt_to_head_page(sgt));
|
||||
} else {
|
||||
dma_unmap_single(dpa_bp->dev, addr,
|
||||
skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/* Build a linear skb around the received buffer.
|
||||
* We are guaranteed there is enough room at the end of the data buffer to
|
||||
* accommodate the shared info area of the skb.
|
||||
*/
|
||||
static struct sk_buff *contig_fd_to_skb(const struct dpa_priv_s *priv,
|
||||
const struct qm_fd *fd)
|
||||
{
|
||||
struct sk_buff *skb = NULL, **skbh;
|
||||
ssize_t fd_off = dpa_fd_offset(fd);
|
||||
dma_addr_t addr = qm_fd_addr(fd);
|
||||
void *vaddr;
|
||||
|
||||
vaddr = phys_to_virt(addr);
|
||||
DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
|
||||
|
||||
/* Retrieve the skb and adjust data and tail pointers, to make sure
|
||||
* forwarded skbs will have enough space on Tx if extra headers
|
||||
* are added.
|
||||
*/
|
||||
DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
|
||||
|
||||
DPA_ERR_ON(fd_off != priv->rx_headroom);
|
||||
skb_reserve(skb, fd_off);
|
||||
skb_put(skb, dpa_fd_length(fd));
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/* Build an skb with the data of the first S/G entry in the linear portion and
|
||||
* the rest of the frame as skb fragments.
|
||||
*
|
||||
* The page fragment holding the S/G Table is recycled here.
|
||||
*/
|
||||
static struct sk_buff *sg_fd_to_skb(const struct dpa_priv_s *priv,
|
||||
const struct qm_fd *fd,
|
||||
int *count_ptr)
|
||||
{
|
||||
const struct qm_sg_entry *sgt;
|
||||
dma_addr_t addr = qm_fd_addr(fd);
|
||||
ssize_t fd_off = dpa_fd_offset(fd);
|
||||
dma_addr_t sg_addr;
|
||||
void *vaddr, *sg_vaddr;
|
||||
struct dpa_bp *dpa_bp;
|
||||
struct page *page, *head_page;
|
||||
int frag_offset, frag_len;
|
||||
int page_offset;
|
||||
int i;
|
||||
struct sk_buff *skb = NULL, *skb_tmp, **skbh;
|
||||
|
||||
vaddr = phys_to_virt(addr);
|
||||
DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
|
||||
|
||||
dpa_bp = priv->dpa_bp;
|
||||
/* Iterate through the SGT entries and add data buffers to the skb */
|
||||
sgt = vaddr + fd_off;
|
||||
for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
|
||||
/* Extension bit is not supported */
|
||||
DPA_ERR_ON(sgt[i].extension);
|
||||
|
||||
/* We use a single global Rx pool */
|
||||
DPA_ERR_ON(dpa_bp != dpa_bpid2pool(sgt[i].bpid));
|
||||
|
||||
sg_addr = qm_sg_addr(&sgt[i]);
|
||||
sg_vaddr = phys_to_virt(sg_addr);
|
||||
DPA_ERR_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
|
||||
SMP_CACHE_BYTES));
|
||||
|
||||
dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (i == 0) {
|
||||
DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
|
||||
DPA_ERR_ON(skb->head != sg_vaddr);
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
/* Make sure forwarded skbs will have enough space
|
||||
* on Tx, if extra headers are added.
|
||||
*/
|
||||
DPA_ERR_ON(fd_off != priv->rx_headroom);
|
||||
skb_reserve(skb, fd_off);
|
||||
skb_put(skb, sgt[i].length);
|
||||
} else {
|
||||
/* Not the first S/G entry; all data from buffer will
|
||||
* be added in an skb fragment; fragment index is offset
|
||||
* by one since first S/G entry was incorporated in the
|
||||
* linear part of the skb.
|
||||
*
|
||||
* Caution: 'page' may be a tail page.
|
||||
*/
|
||||
DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
|
||||
page = virt_to_page(sg_vaddr);
|
||||
head_page = virt_to_head_page(sg_vaddr);
|
||||
|
||||
/* Free (only) the skbuff shell because its data buffer
|
||||
* is already a frag in the main skb.
|
||||
*/
|
||||
get_page(head_page);
|
||||
dev_kfree_skb(skb_tmp);
|
||||
|
||||
/* Compute offset in (possibly tail) page */
|
||||
page_offset = ((unsigned long)sg_vaddr &
|
||||
(PAGE_SIZE - 1)) +
|
||||
(page_address(page) - page_address(head_page));
|
||||
/* page_offset only refers to the beginning of sgt[i];
|
||||
* but the buffer itself may have an internal offset.
|
||||
*/
|
||||
frag_offset = sgt[i].offset + page_offset;
|
||||
frag_len = sgt[i].length;
|
||||
/* skb_add_rx_frag() does no checking on the page; if
|
||||
* we pass it a tail page, we'll end up with
|
||||
* bad page accounting and eventually with segafults.
|
||||
*/
|
||||
skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
|
||||
frag_len, dpa_bp->size);
|
||||
}
|
||||
/* Update the pool count for the current {cpu x bpool} */
|
||||
(*count_ptr)--;
|
||||
|
||||
if (sgt[i].final)
|
||||
break;
|
||||
}
|
||||
WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
|
||||
|
||||
/* recycle the SGT fragment */
|
||||
DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
|
||||
dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
|
||||
return skb;
|
||||
}
|
||||
|
||||
void _dpa_rx(struct net_device *net_dev,
|
||||
struct qman_portal *portal,
|
||||
const struct dpa_priv_s *priv,
|
||||
struct dpa_percpu_priv_s *percpu_priv,
|
||||
const struct qm_fd *fd,
|
||||
u32 fqid,
|
||||
int *count_ptr)
|
||||
{
|
||||
struct dpa_bp *dpa_bp;
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t addr = qm_fd_addr(fd);
|
||||
u32 fd_status = fd->status;
|
||||
unsigned int skb_len;
|
||||
struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
|
||||
|
||||
if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
|
||||
if (net_ratelimit())
|
||||
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
|
||||
fd_status & FM_FD_STAT_RX_ERRORS);
|
||||
|
||||
percpu_stats->rx_errors++;
|
||||
goto _release_frame;
|
||||
}
|
||||
|
||||
dpa_bp = priv->dpa_bp;
|
||||
DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
|
||||
|
||||
/* prefetch the first 64 bytes of the frame or the SGT start */
|
||||
dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
|
||||
prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
|
||||
|
||||
/* The only FD types that we may receive are contig and S/G */
|
||||
DPA_ERR_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
|
||||
|
||||
if (likely(fd->format == qm_fd_contig))
|
||||
skb = contig_fd_to_skb(priv, fd);
|
||||
else
|
||||
skb = sg_fd_to_skb(priv, fd, count_ptr);
|
||||
|
||||
/* Account for either the contig buffer or the SGT buffer (depending on
|
||||
* which case we were in) having been removed from the pool.
|
||||
*/
|
||||
(*count_ptr)--;
|
||||
skb->protocol = eth_type_trans(skb, net_dev);
|
||||
|
||||
/* IP Reassembled frames are allowed to be larger than MTU */
|
||||
if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
|
||||
!(fd_status & FM_FD_IPR))) {
|
||||
percpu_stats->rx_dropped++;
|
||||
goto drop_bad_frame;
|
||||
}
|
||||
|
||||
skb_len = skb->len;
|
||||
|
||||
if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
|
||||
goto packet_dropped;
|
||||
|
||||
percpu_stats->rx_packets++;
|
||||
percpu_stats->rx_bytes += skb_len;
|
||||
|
||||
packet_dropped:
|
||||
return;
|
||||
|
||||
drop_bad_frame:
|
||||
dev_kfree_skb(skb);
|
||||
return;
|
||||
|
||||
_release_frame:
|
||||
dpa_fd_release(net_dev, fd);
|
||||
}
|
||||
|
||||
static int skb_to_contig_fd(struct dpa_priv_s *priv,
|
||||
struct sk_buff *skb, struct qm_fd *fd,
|
||||
int *count_ptr, int *offset)
|
||||
{
|
||||
struct sk_buff **skbh;
|
||||
dma_addr_t addr;
|
||||
struct dpa_bp *dpa_bp = priv->dpa_bp;
|
||||
struct net_device *net_dev = priv->net_dev;
|
||||
int err;
|
||||
enum dma_data_direction dma_dir;
|
||||
unsigned char *buffer_start;
|
||||
|
||||
{
|
||||
/* We are guaranteed to have at least tx_headroom bytes
|
||||
* available, so just use that for offset.
|
||||
*/
|
||||
fd->bpid = 0xff;
|
||||
buffer_start = skb->data - priv->tx_headroom;
|
||||
fd->offset = priv->tx_headroom;
|
||||
dma_dir = DMA_TO_DEVICE;
|
||||
|
||||
DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
|
||||
}
|
||||
|
||||
/* Enable L3/L4 hardware checksum computation.
|
||||
*
|
||||
* We must do this before dma_map_single(DMA_TO_DEVICE), because we may
|
||||
* need to write into the skb.
|
||||
*/
|
||||
err = dpa_enable_tx_csum(priv, skb, fd,
|
||||
((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
|
||||
if (unlikely(err < 0)) {
|
||||
if (net_ratelimit())
|
||||
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Fill in the rest of the FD fields */
|
||||
fd->format = qm_fd_contig;
|
||||
fd->length20 = skb->len;
|
||||
fd->cmd |= FM_FD_CMD_FCO;
|
||||
|
||||
/* Map the entire buffer size that may be seen by FMan, but no more */
|
||||
addr = dma_map_single(dpa_bp->dev, skbh,
|
||||
skb_tail_pointer(skb) - buffer_start, dma_dir);
|
||||
if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
|
||||
if (net_ratelimit())
|
||||
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
fd->addr_hi = (u8)upper_32_bits(addr);
|
||||
fd->addr_lo = lower_32_bits(addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skb_to_sg_fd(struct dpa_priv_s *priv,
|
||||
struct sk_buff *skb, struct qm_fd *fd)
|
||||
{
|
||||
struct dpa_bp *dpa_bp = priv->dpa_bp;
|
||||
dma_addr_t addr;
|
||||
struct sk_buff **skbh;
|
||||
struct net_device *net_dev = priv->net_dev;
|
||||
int err;
|
||||
|
||||
struct qm_sg_entry *sgt;
|
||||
void *sgt_buf;
|
||||
void *buffer_start;
|
||||
skb_frag_t *frag;
|
||||
int i, j;
|
||||
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
|
||||
const int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
|
||||
fd->format = qm_fd_sg;
|
||||
|
||||
/* get a page frag to store the SGTable */
|
||||
sgt_buf = netdev_alloc_frag(priv->tx_headroom +
|
||||
sizeof(struct qm_sg_entry) * (1 + nr_frags));
|
||||
if (unlikely(!sgt_buf)) {
|
||||
netdev_err(net_dev, "netdev_alloc_frag() failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Enable L3/L4 hardware checksum computation.
|
||||
*
|
||||
* We must do this before dma_map_single(DMA_TO_DEVICE), because we may
|
||||
* need to write into the skb.
|
||||
*/
|
||||
err = dpa_enable_tx_csum(priv, skb, fd,
|
||||
sgt_buf + DPA_TX_PRIV_DATA_SIZE);
|
||||
if (unlikely(err < 0)) {
|
||||
if (net_ratelimit())
|
||||
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
|
||||
err);
|
||||
goto csum_failed;
|
||||
}
|
||||
|
||||
sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
|
||||
sgt[0].bpid = 0xff;
|
||||
sgt[0].offset = 0;
|
||||
sgt[0].length = cpu_to_be32(skb_headlen(skb));
|
||||
sgt[0].extension = 0;
|
||||
sgt[0].final = 0;
|
||||
addr = dma_map_single(dpa_bp->dev, skb->data, sgt[0].length, dma_dir);
|
||||
if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
|
||||
dev_err(dpa_bp->dev, "DMA mapping failed");
|
||||
err = -EINVAL;
|
||||
goto sg0_map_failed;
|
||||
}
|
||||
sgt[0].addr_hi = (u8)upper_32_bits(addr);
|
||||
sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
|
||||
|
||||
/* populate the rest of SGT entries */
|
||||
for (i = 1; i <= nr_frags; i++) {
|
||||
frag = &skb_shinfo(skb)->frags[i - 1];
|
||||
sgt[i].bpid = 0xff;
|
||||
sgt[i].offset = 0;
|
||||
sgt[i].length = cpu_to_be32(frag->size);
|
||||
sgt[i].extension = 0;
|
||||
sgt[i].final = 0;
|
||||
|
||||
DPA_ERR_ON(!skb_frag_page(frag));
|
||||
addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
|
||||
dma_dir);
|
||||
if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
|
||||
dev_err(dpa_bp->dev, "DMA mapping failed");
|
||||
err = -EINVAL;
|
||||
goto sg_map_failed;
|
||||
}
|
||||
|
||||
/* keep the offset in the address */
|
||||
sgt[i].addr_hi = (u8)upper_32_bits(addr);
|
||||
sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
|
||||
}
|
||||
sgt[i - 1].final = 1;
|
||||
|
||||
fd->length20 = skb->len;
|
||||
fd->offset = priv->tx_headroom;
|
||||
|
||||
/* DMA map the SGT page */
|
||||
buffer_start = (void *)sgt - priv->tx_headroom;
|
||||
DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
|
||||
|
||||
addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
|
||||
sizeof(struct qm_sg_entry) * (1 + nr_frags),
|
||||
dma_dir);
|
||||
if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
|
||||
dev_err(dpa_bp->dev, "DMA mapping failed");
|
||||
err = -EINVAL;
|
||||
goto sgt_map_failed;
|
||||
}
|
||||
|
||||
fd->bpid = 0xff;
|
||||
fd->cmd |= FM_FD_CMD_FCO;
|
||||
fd->addr_hi = (u8)upper_32_bits(addr);
|
||||
fd->addr_lo = lower_32_bits(addr);
|
||||
|
||||
return 0;
|
||||
|
||||
sgt_map_failed:
|
||||
sg_map_failed:
|
||||
for (j = 0; j < i; j++)
|
||||
dma_unmap_page(dpa_bp->dev, qm_sg_addr(&sgt[j]),
|
||||
cpu_to_be32(sgt[j].length), dma_dir);
|
||||
sg0_map_failed:
|
||||
csum_failed:
|
||||
put_page(virt_to_head_page(sgt_buf));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
|
||||
{
|
||||
struct dpa_priv_s *priv;
|
||||
struct qm_fd fd;
|
||||
struct dpa_percpu_priv_s *percpu_priv;
|
||||
struct rtnl_link_stats64 *percpu_stats;
|
||||
int err = 0;
|
||||
const int queue_mapping = dpa_get_queue_mapping(skb);
|
||||
bool nonlinear = skb_is_nonlinear(skb);
|
||||
int *countptr, offset = 0;
|
||||
|
||||
priv = netdev_priv(net_dev);
|
||||
/* Non-migratable context, safe to use raw_cpu_ptr */
|
||||
percpu_priv = raw_cpu_ptr(priv->percpu_priv);
|
||||
percpu_stats = &percpu_priv->stats;
|
||||
countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
|
||||
|
||||
clear_fd(&fd);
|
||||
|
||||
if (!nonlinear) {
|
||||
/* We're going to store the skb backpointer at the beginning
|
||||
* of the data buffer, so we need a privately owned skb
|
||||
*
|
||||
* We've made sure skb is not shared in dev->priv_flags,
|
||||
* we need to verify the skb head is not cloned
|
||||
*/
|
||||
if (skb_cow_head(skb, priv->tx_headroom))
|
||||
goto enomem;
|
||||
|
||||
BUG_ON(skb_is_nonlinear(skb));
|
||||
}
|
||||
|
||||
/* MAX_SKB_FRAGS is equal or larger than our DPA_SGT_MAX_ENTRIES;
|
||||
* make sure we don't feed FMan with more fragments than it supports.
|
||||
* Btw, we're using the first sgt entry to store the linear part of
|
||||
* the skb, so we're one extra frag short.
|
||||
*/
|
||||
if (nonlinear &&
|
||||
likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
|
||||
/* Just create a S/G fd based on the skb */
|
||||
err = skb_to_sg_fd(priv, skb, &fd);
|
||||
percpu_priv->tx_frag_skbuffs++;
|
||||
} else {
|
||||
/* If the egress skb contains more fragments than we support
|
||||
* we have no choice but to linearize it ourselves.
|
||||
*/
|
||||
if (unlikely(nonlinear) && __skb_linearize(skb))
|
||||
goto enomem;
|
||||
|
||||
/* Finally, create a contig FD from this skb */
|
||||
err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
|
||||
}
|
||||
if (unlikely(err < 0))
|
||||
goto skb_to_fd_failed;
|
||||
|
||||
if (likely(dpa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
/* dpa_xmit failed */
|
||||
if (fd.bpid != 0xff) {
|
||||
(*countptr)--;
|
||||
dpa_fd_release(net_dev, &fd);
|
||||
percpu_stats->tx_errors++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
_dpa_cleanup_tx_fd(priv, &fd);
|
||||
skb_to_fd_failed:
|
||||
enomem:
|
||||
percpu_stats->tx_errors++;
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
@ -80,9 +80,9 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
|
||||
TP_fast_assign(
|
||||
__entry->fqid = fq->fqid;
|
||||
__entry->fd_addr = qm_fd_addr_get64(fd);
|
||||
__entry->fd_format = fd->format;
|
||||
__entry->fd_offset = dpa_fd_offset(fd);
|
||||
__entry->fd_length = dpa_fd_length(fd);
|
||||
__entry->fd_format = qm_fd_get_format(fd);
|
||||
__entry->fd_offset = qm_fd_get_offset(fd);
|
||||
__entry->fd_length = qm_fd_get_length(fd);
|
||||
__entry->fd_status = fd->status;
|
||||
__assign_str(name, netdev->name);
|
||||
),
|
||||
@ -99,7 +99,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
|
||||
*/
|
||||
|
||||
/* Tx (egress) fd */
|
||||
DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
|
||||
DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_fd,
|
||||
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
struct qman_fq *fq,
|
||||
@ -109,7 +109,7 @@ DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
|
||||
);
|
||||
|
||||
/* Rx fd */
|
||||
DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
|
||||
DEFINE_EVENT(dpaa_eth_fd, dpaa_rx_fd,
|
||||
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
struct qman_fq *fq,
|
||||
@ -119,7 +119,7 @@ DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
|
||||
);
|
||||
|
||||
/* Tx confirmation fd */
|
||||
DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
|
||||
DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_conf_fd,
|
||||
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
struct qman_fq *fq,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -58,7 +58,6 @@
|
||||
/* TX Port: Length Error */
|
||||
#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
|
||||
|
||||
|
||||
/* Rx FIFO overflow, FCS error, code error, running disparity error
|
||||
* (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
|
||||
* PHY error control character detected.
|
||||
@ -167,8 +166,8 @@ struct fman_prs_result {
|
||||
u8 ip_off[2]; /* IP offset */
|
||||
u8 gre_off; /* GRE offset */
|
||||
u8 l4_off; /* Layer 4 offset */
|
||||
u8 nxthdr_off; /** Parser end point */
|
||||
} __attribute__((__packed__));
|
||||
u8 nxthdr_off; /* Parser end point */
|
||||
};
|
||||
|
||||
/* A structure for defining buffer prefix area content. */
|
||||
struct fman_buffer_prefix_content {
|
||||
@ -237,29 +236,6 @@ struct fman_buf_pool_depletion {
|
||||
bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
|
||||
};
|
||||
|
||||
/** fman_exceptions_cb
|
||||
* fman - Pointer to FMan
|
||||
* exception - The exception.
|
||||
*
|
||||
* Exceptions user callback routine, will be called upon an exception
|
||||
* passing the exception identification.
|
||||
*/
|
||||
typedef void (fman_exceptions_cb)(struct fman *fman,
|
||||
enum fman_exceptions exception);
|
||||
|
||||
/** fman_bus_error_cb
|
||||
* fman - Pointer to FMan
|
||||
* port_id - Port id
|
||||
* addr - Address that caused the error
|
||||
* tnum - Owner of error
|
||||
* liodn - Logical IO device number
|
||||
*
|
||||
* Bus error user callback routine, will be called upon bus error,
|
||||
* passing parameters describing the errors and the owner.
|
||||
*/
|
||||
typedef void (fman_bus_error_cb)(struct fman *fman, u8 port_id, u64 addr,
|
||||
u8 tnum, u16 liodn);
|
||||
|
||||
/* Enum for inter-module interrupts registration */
|
||||
enum fman_event_modules {
|
||||
FMAN_MOD_MAC = 0, /* MAC event */
|
||||
@ -325,176 +301,37 @@ struct fman_port_init_params {
|
||||
/* LIODN base for this port, to be used together with LIODN offset. */
|
||||
};
|
||||
|
||||
struct fman;
|
||||
|
||||
/**
|
||||
* fman_get_revision
|
||||
* @fman - Pointer to the FMan module
|
||||
* @rev_info - A structure of revision information parameters.
|
||||
*
|
||||
* Returns the FM revision
|
||||
*
|
||||
* Allowed only following fman_init().
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
|
||||
|
||||
/**
|
||||
* fman_register_intr
|
||||
* @fman: A Pointer to FMan device
|
||||
* @mod: Calling module
|
||||
* @mod_id: Module id (if more than 1 exists, '0' if not)
|
||||
* @intr_type: Interrupt type (error/normal) selection.
|
||||
* @f_isr: The interrupt service routine.
|
||||
* @h_src_arg: Argument to be passed to f_isr.
|
||||
*
|
||||
* Used to register an event handler to be processed by FMan
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
|
||||
u8 mod_id, enum fman_intr_type intr_type,
|
||||
void (*f_isr)(void *h_src_arg), void *h_src_arg);
|
||||
|
||||
/**
|
||||
* fman_unregister_intr
|
||||
* @fman: A Pointer to FMan device
|
||||
* @mod: Calling module
|
||||
* @mod_id: Module id (if more than 1 exists, '0' if not)
|
||||
* @intr_type: Interrupt type (error/normal) selection.
|
||||
*
|
||||
* Used to unregister an event handler to be processed by FMan
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
|
||||
u8 mod_id, enum fman_intr_type intr_type);
|
||||
|
||||
/**
|
||||
* fman_set_port_params
|
||||
* @fman: A Pointer to FMan device
|
||||
* @port_params: Port parameters
|
||||
*
|
||||
* Used by FMan Port to pass parameters to the FMan
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_set_port_params(struct fman *fman,
|
||||
struct fman_port_init_params *port_params);
|
||||
|
||||
/**
|
||||
* fman_reset_mac
|
||||
* @fman: A Pointer to FMan device
|
||||
* @mac_id: MAC id to be reset
|
||||
*
|
||||
* Reset a specific MAC
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_reset_mac(struct fman *fman, u8 mac_id);
|
||||
|
||||
/**
|
||||
* fman_get_clock_freq
|
||||
* @fman: A Pointer to FMan device
|
||||
*
|
||||
* Get FMan clock frequency
|
||||
*
|
||||
* Return: FMan clock frequency
|
||||
*/
|
||||
|
||||
u16 fman_get_clock_freq(struct fman *fman);
|
||||
|
||||
/**
|
||||
* fman_get_bmi_max_fifo_size
|
||||
* @fman: A Pointer to FMan device
|
||||
*
|
||||
* Get FMan maximum FIFO size
|
||||
*
|
||||
* Return: FMan Maximum FIFO size
|
||||
*/
|
||||
u32 fman_get_bmi_max_fifo_size(struct fman *fman);
|
||||
|
||||
/**
|
||||
* fman_set_mac_max_frame
|
||||
* @fman: A Pointer to FMan device
|
||||
* @mac_id: MAC id
|
||||
* @mfl: Maximum frame length
|
||||
*
|
||||
* Set maximum frame length of specific MAC in FMan driver
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
|
||||
|
||||
/**
|
||||
* fman_get_qman_channel_id
|
||||
* @fman: A Pointer to FMan device
|
||||
* @port_id: Port id
|
||||
*
|
||||
* Get QMan channel ID associated to the Port id
|
||||
*
|
||||
* Return: QMan channel ID
|
||||
*/
|
||||
u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
|
||||
|
||||
/**
|
||||
* fman_get_mem_region
|
||||
* @fman: A Pointer to FMan device
|
||||
*
|
||||
* Get FMan memory region
|
||||
*
|
||||
* Return: A structure with FMan memory region information
|
||||
*/
|
||||
struct resource *fman_get_mem_region(struct fman *fman);
|
||||
|
||||
/**
|
||||
* fman_get_max_frm
|
||||
*
|
||||
* Return: Max frame length configured in the FM driver
|
||||
*/
|
||||
u16 fman_get_max_frm(void);
|
||||
|
||||
/**
|
||||
* fman_get_rx_extra_headroom
|
||||
*
|
||||
* Return: Extra headroom size configured in the FM driver
|
||||
*/
|
||||
int fman_get_rx_extra_headroom(void);
|
||||
|
||||
/**
|
||||
* fman_bind
|
||||
* @dev: FMan OF device pointer
|
||||
*
|
||||
* Bind to a specific FMan device.
|
||||
*
|
||||
* Allowed only after the port was created.
|
||||
*
|
||||
* Return: A pointer to the FMan device
|
||||
*/
|
||||
struct fman *fman_bind(struct device *dev);
|
||||
|
||||
/**
|
||||
* fman_unbind
|
||||
* @fman: Pointer to the FMan device
|
||||
*
|
||||
* Un-bind from a specific FMan device.
|
||||
*
|
||||
* Allowed only after the port was created.
|
||||
*/
|
||||
void fman_unbind(struct fman *fman);
|
||||
|
||||
/**
|
||||
* fman_get_device
|
||||
* @fman: A pointer to the FMan device.
|
||||
*
|
||||
* Get the FMan device pointer
|
||||
*
|
||||
* Return: Pointer to FMan device.
|
||||
*/
|
||||
struct device *fman_get_device(struct fman *fman);
|
||||
#ifdef __rtems__
|
||||
void fman_reset(struct fman *fman);
|
||||
int fman_reset(struct fman *fman);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
#endif /* __FM_H */
|
||||
|
@ -36,8 +36,6 @@
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include "crc_mac_addr_ext.h"
|
||||
|
||||
#include "fman_dtsec.h"
|
||||
#include "fman.h"
|
||||
|
||||
@ -46,30 +44,23 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/of_mdio.h>
|
||||
#include <linux/mii.h>
|
||||
|
||||
/* MII Management Command Register */
|
||||
#define MIIMCOM_READ_CYCLE 0x00000001
|
||||
/* TBI register addresses */
|
||||
#define MII_TBICON 0x11
|
||||
|
||||
/* MII Management Address Register */
|
||||
#define MIIMADD_PHY_ADDR_SHIFT 8
|
||||
/* TBICON register bit fields */
|
||||
#define TBICON_SOFT_RESET 0x8000 /* Soft reset */
|
||||
#define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
|
||||
#define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
|
||||
#define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
|
||||
#define TBICON_CLK_SELECT 0x0020 /* Clock select */
|
||||
#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
|
||||
|
||||
/* MII Management Indicator Register */
|
||||
#define MIIMIND_BUSY 0x00000001
|
||||
|
||||
/* PHY Control Register */
|
||||
#define PHY_CR_PHY_RESET 0x8000
|
||||
#define PHY_CR_SPEED0 0x2000
|
||||
#define PHY_CR_ANE 0x1000
|
||||
#define PHY_CR_RESET_AN 0x0200
|
||||
#define PHY_CR_FULLDUPLEX 0x0100
|
||||
#define PHY_CR_SPEED1 0x0040
|
||||
|
||||
#define PHY_TBICON_SRESET 0x8000
|
||||
#define PHY_TBICON_CLK_SEL 0x0020
|
||||
#define PHY_TBIANA_SGMII 0x4001
|
||||
#define PHY_TBIANA_1000X 0x01a0
|
||||
|
||||
#define DTSEC_TO_MII_OFFSET 0x1000
|
||||
#define TBIANA_SGMII 0x4001
|
||||
#define TBIANA_1000X 0x01a0
|
||||
|
||||
/* Interrupt Mask Register (IMASK) */
|
||||
#define DTSEC_IMASK_BREN 0x80000000
|
||||
@ -116,9 +107,7 @@
|
||||
/* Defaults */
|
||||
#define DEFAULT_HALFDUP_RETRANSMIT 0xf
|
||||
#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
|
||||
#define DEFAULT_HALFDUP_ALT_BACKOFF_VAL 0x0A
|
||||
#define DEFAULT_TX_PAUSE_TIME 0xf000
|
||||
#define DEFAULT_TBIPA 5
|
||||
#define DEFAULT_RX_PREPEND 0
|
||||
#define DEFAULT_PREAMBLE_LEN 7
|
||||
#define DEFAULT_TX_PAUSE_TIME_EXTD 0
|
||||
@ -127,22 +116,6 @@
|
||||
#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
|
||||
#define DEFAULT_BACK_TO_BACK_IPG 0x60
|
||||
#define DEFAULT_MAXIMUM_FRAME 0x600
|
||||
#define DEFAULT_TBI_PHY_ADDR 5
|
||||
|
||||
#define DTSEC_DEFAULT_EXCEPTIONS \
|
||||
((u32)((DTSEC_IMASK_BREN) |\
|
||||
(DTSEC_IMASK_RXCEN) |\
|
||||
(DTSEC_IMASK_BTEN) |\
|
||||
(DTSEC_IMASK_TXCEN) |\
|
||||
(DTSEC_IMASK_TXEEN) |\
|
||||
(DTSEC_IMASK_ABRTEN) |\
|
||||
(DTSEC_IMASK_LCEN) |\
|
||||
(DTSEC_IMASK_CRLEN) |\
|
||||
(DTSEC_IMASK_XFUNEN) |\
|
||||
(DTSEC_IMASK_IFERREN) |\
|
||||
(DTSEC_IMASK_MAGEN) |\
|
||||
(DTSEC_IMASK_TDPEEN) |\
|
||||
(DTSEC_IMASK_RDPEEN)))
|
||||
|
||||
/* register related defines (bits, field offsets..) */
|
||||
#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
|
||||
@ -154,24 +127,17 @@
|
||||
#define DTSEC_ECNTRL_R100M 0x00000008
|
||||
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
|
||||
|
||||
#define DTSEC_TCTRL_THDF 0x00000800
|
||||
#define DTSEC_TCTRL_TTSE 0x00000040
|
||||
#define DTSEC_TCTRL_GTS 0x00000020
|
||||
|
||||
#define RCTRL_PAL_MASK 0x001f0000
|
||||
#define RCTRL_PAL_SHIFT 16
|
||||
#define RCTRL_CFA 0x00008000
|
||||
#define RCTRL_GHTX 0x00000400
|
||||
#define RCTRL_RTSE 0x00000040
|
||||
#define RCTRL_GRS 0x00000020
|
||||
#define RCTRL_BC_REJ 0x00000010
|
||||
#define RCTRL_MPROM 0x00000008
|
||||
#define RCTRL_RSF 0x00000004
|
||||
#define RCTRL_UPROM 0x00000001
|
||||
#define RCTRL_PROM (RCTRL_UPROM | RCTRL_MPROM)
|
||||
|
||||
#define MACCFG1_SOFT_RESET 0x80000000
|
||||
#define MACCFG1_LOOPBACK 0x00000100
|
||||
#define MACCFG1_RX_FLOW 0x00000020
|
||||
#define MACCFG1_TX_FLOW 0x00000010
|
||||
#define MACCFG1_TX_EN 0x00000001
|
||||
@ -179,11 +145,7 @@
|
||||
|
||||
#define MACCFG2_NIBBLE_MODE 0x00000100
|
||||
#define MACCFG2_BYTE_MODE 0x00000200
|
||||
#define MACCFG2_PRE_AM_RX_EN 0x00000080
|
||||
#define MACCFG2_PRE_AM_TX_EN 0x00000040
|
||||
#define MACCFG2_LENGTH_CHECK 0x00000010
|
||||
#define MACCFG2_PAD_CRC_EN 0x00000004
|
||||
#define MACCFG2_CRC_EN 0x00000002
|
||||
#define MACCFG2_FULL_DUPLEX 0x00000001
|
||||
#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
|
||||
#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
|
||||
@ -197,13 +159,8 @@
|
||||
#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
|
||||
#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
|
||||
|
||||
#define HAFDUP_ALT_BEB 0x00080000
|
||||
#define HAFDUP_BP_NO_BACKOFF 0x00040000
|
||||
#define HAFDUP_NO_BACKOFF 0x00020000
|
||||
#define HAFDUP_EXCESS_DEFER 0x00010000
|
||||
#define HAFDUP_COLLISION_WINDOW 0x000003ff
|
||||
#define HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK 0x00f00000
|
||||
#define HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT 20
|
||||
#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
|
||||
#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
|
||||
|
||||
@ -215,7 +172,6 @@
|
||||
|
||||
#define MAX_PACKET_ALIGNMENT 31
|
||||
#define MAX_INTER_PACKET_GAP 0x7f
|
||||
#define MAX_INTER_PALTERNATE_BEB 0x0f
|
||||
#define MAX_RETRANSMISSION 0x0f
|
||||
#define MAX_COLLISION_WINDOW 0x03ff
|
||||
|
||||
@ -224,20 +180,6 @@
|
||||
/* Extended Hash table size (32 bits*16 regs) */
|
||||
#define EXTENDED_HASH_TABLE_SIZE 512
|
||||
|
||||
/* maximum number of phys */
|
||||
#define MAX_PHYS 32
|
||||
|
||||
/* MII Configuration Control Memory Map Registers */
|
||||
struct dtsec_mii_regs {
|
||||
u32 reserved1[72];
|
||||
u32 miimcfg; /* MII Mgmt:configuration */
|
||||
u32 miimcom; /* MII Mgmt:command */
|
||||
u32 miimadd; /* MII Mgmt:address */
|
||||
u32 miimcon; /* MII Mgmt:control 3 */
|
||||
u32 miimstat; /* MII Mgmt:status */
|
||||
u32 miimind; /* MII Mgmt:indicators */
|
||||
};
|
||||
|
||||
/* dTSEC Memory Map registers */
|
||||
struct dtsec_regs {
|
||||
/* dTSEC General Control and Status Registers */
|
||||
@ -340,43 +282,13 @@ struct dtsec_regs {
|
||||
* standard 512-bit slot time window. If collisions are detected after this
|
||||
* byte, the late collision event is asserted and transmission of current
|
||||
* frame is aborted.
|
||||
* rx_drop_bcast:
|
||||
* Discard broadcast frames. If set, all broadcast frames will be discarded
|
||||
* by dTSEC.
|
||||
* rx_short_frm:
|
||||
* Accept short frames. If set, dTSEC will accept frames of length 14-63 bytes.
|
||||
* rx_len_check:
|
||||
* Length check for received frames. If set, the MAC checks the frame's length
|
||||
* field on receive to ensure it matches the actual data field length.
|
||||
* This only works for received frames with length field less than 1500.
|
||||
* No check is performed for larger frames.
|
||||
* tx_pad_crc:
|
||||
* Pad and append CRC. If set, the MAC pads all ransmitted short frames and
|
||||
* appends a CRC to every frame regardless of padding requirement.
|
||||
* tx_crc:
|
||||
* Transmission CRC enable. If set, the MAC appends a CRC to all frames.
|
||||
* If frames presented to the MAC have a valid length and contain a valid CRC,
|
||||
* tx_crc should be reset. This field is ignored if tx_pad_crc is set.
|
||||
* rx_ctrl_acc:
|
||||
* Control frame accept. If set, this overrides 802.3 standard control frame
|
||||
* behavior, and all Ethernet frames that have an ethertype of 0x8808 are
|
||||
* treated as normal Ethernet frames and passed up to the packet interface on
|
||||
* a DA match. Received pause control frames are passed to the packet
|
||||
* interface only if Rx flow control is also disabled.
|
||||
* See dtsec_accept_rx_pause_frames() function.
|
||||
* tx_pause_time:
|
||||
* Transmit pause time value. This pause value is used as part of the pause
|
||||
* frame to be sent when a transmit pause frame is initiated.
|
||||
* If set to 0 this disables transmission of pause frames.
|
||||
* rx_preamble:
|
||||
* Receive preamble enable. If set, the MAC recovers the received Ethernet
|
||||
* 7-byte preamble and passes it to the packet interface at the start of each
|
||||
* received frame.
|
||||
* This field should be reset for internal MAC loop-back mode.
|
||||
* tx_preamble:
|
||||
* User defined preamble enable for transmitted frames.
|
||||
* If set, a user-defined preamble must passed to the MAC and it is
|
||||
* transmitted instead of the standard preamble.
|
||||
* preamble_len:
|
||||
* Length, in bytes, of the preamble field preceding each Ethernet
|
||||
* start-of-frame delimiter byte. The default value of 0x7 should be used in
|
||||
@ -393,36 +305,14 @@ struct dtsec_regs {
|
||||
* obtained by calling set_dflts().
|
||||
*/
|
||||
struct dtsec_cfg {
|
||||
bool halfdup_on;
|
||||
bool halfdup_alt_backoff_en;
|
||||
bool halfdup_excess_defer;
|
||||
bool halfdup_no_backoff;
|
||||
bool halfdup_bp_no_backoff;
|
||||
u32 halfdup_alt_backoff_val;
|
||||
u16 halfdup_retransmit;
|
||||
u16 halfdup_coll_window;
|
||||
bool rx_drop_bcast;
|
||||
bool rx_short_frm;
|
||||
bool rx_len_check;
|
||||
bool tx_pad_crc;
|
||||
bool tx_crc;
|
||||
bool rx_ctrl_acc;
|
||||
u16 tx_pause_time;
|
||||
u16 tbipa;
|
||||
bool ptp_tsu_en;
|
||||
bool ptp_exception_en;
|
||||
bool rx_preamble;
|
||||
bool tx_preamble;
|
||||
u32 preamble_len;
|
||||
u32 rx_prepend;
|
||||
bool loopback;
|
||||
bool rx_time_stamp_en;
|
||||
bool tx_time_stamp_en;
|
||||
bool rx_flow;
|
||||
bool tx_flow;
|
||||
bool rx_group_hash_exd;
|
||||
bool rx_promisc;
|
||||
u8 tbi_phy_addr;
|
||||
u16 tx_pause_time_extd;
|
||||
u16 maximum_frame;
|
||||
u32 non_back_to_back_ipg1;
|
||||
@ -434,10 +324,6 @@ struct dtsec_cfg {
|
||||
struct fman_mac {
|
||||
/* pointer to dTSEC memory mapped registers */
|
||||
struct dtsec_regs __iomem *regs;
|
||||
/* pointer to dTSEC MII memory mapped registers */
|
||||
struct dtsec_mii_regs __iomem *mii_regs;
|
||||
/* MII management clock */
|
||||
u16 mii_mgmt_clk;
|
||||
/* MAC address of device */
|
||||
u64 addr;
|
||||
/* Ethernet physical interface */
|
||||
@ -453,169 +339,38 @@ struct fman_mac {
|
||||
/* pointer to driver's individual address hash table */
|
||||
struct eth_hash_t *unicast_addr_hash;
|
||||
u8 mac_id;
|
||||
u8 tbi_phy_addr;
|
||||
u32 exceptions;
|
||||
bool ptp_tsu_enabled;
|
||||
bool en_tsu_err_exeption;
|
||||
bool en_tsu_err_exception;
|
||||
struct dtsec_cfg *dtsec_drv_param;
|
||||
void *fm;
|
||||
struct fman_rev_info fm_rev_info;
|
||||
bool basex_if;
|
||||
struct phy_device *tbiphy;
|
||||
};
|
||||
|
||||
static u32 calc_mii_mgmt_clk(struct fman_mac *dtsec)
|
||||
{
|
||||
u16 fm_clk_freq, dtsec_freq;
|
||||
u32 mgmt_clk;
|
||||
|
||||
fm_clk_freq = fman_get_clock_freq(dtsec->fm);
|
||||
if (fm_clk_freq == 0) {
|
||||
pr_err("Can't get clock for MAC!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
dtsec_freq = (u16)(fm_clk_freq >> 1);
|
||||
|
||||
if (dtsec_freq < 80)
|
||||
mgmt_clk = 1;
|
||||
else if (dtsec_freq < 120)
|
||||
mgmt_clk = 2;
|
||||
else if (dtsec_freq < 160)
|
||||
mgmt_clk = 3;
|
||||
else if (dtsec_freq < 200)
|
||||
mgmt_clk = 4;
|
||||
else if (dtsec_freq < 280)
|
||||
mgmt_clk = 5;
|
||||
else if (dtsec_freq < 400)
|
||||
mgmt_clk = 6;
|
||||
else
|
||||
mgmt_clk = 7;
|
||||
|
||||
return mgmt_clk;
|
||||
}
|
||||
|
||||
static int mii_write_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 data)
|
||||
{
|
||||
struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
|
||||
u32 tmp;
|
||||
int count;
|
||||
|
||||
/* Setup the MII Mgmt clock speed */
|
||||
iowrite32be(dtsec->mii_mgmt_clk, ®s->miimcfg);
|
||||
|
||||
/* Stop the MII management read cycle */
|
||||
iowrite32be(0, ®s->miimcom);
|
||||
/* Dummy read to make sure MIIMCOM is written */
|
||||
tmp = ioread32be(®s->miimcom);
|
||||
|
||||
/* Setting up MII Management Address Register */
|
||||
tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
|
||||
iowrite32be(tmp, ®s->miimadd);
|
||||
|
||||
/* Setting up MII Management Control Register with data */
|
||||
iowrite32be((u32)data, ®s->miimcon);
|
||||
/* Dummy read to make sure MIIMCON is written */
|
||||
tmp = ioread32be(®s->miimcon);
|
||||
|
||||
/* Wait until MII management write is complete */
|
||||
count = 100;
|
||||
do {
|
||||
udelay(1);
|
||||
} while (((ioread32be(®s->miimind)) & MIIMIND_BUSY) && count--);
|
||||
|
||||
if (count == 0)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mii_read_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 *data)
|
||||
{
|
||||
struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
|
||||
u32 tmp;
|
||||
int count;
|
||||
|
||||
/* Setup the MII Mgmt clock speed */
|
||||
iowrite32be(dtsec->mii_mgmt_clk, ®s->miimcfg);
|
||||
|
||||
/* Setting up the MII Management Address Register */
|
||||
tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
|
||||
iowrite32be(tmp, ®s->miimadd);
|
||||
|
||||
/* Perform an MII management read cycle */
|
||||
iowrite32be(MIIMCOM_READ_CYCLE, ®s->miimcom);
|
||||
/* Dummy read to make sure MIIMCOM is written */
|
||||
tmp = ioread32be(®s->miimcom);
|
||||
|
||||
/* Wait until MII management write is complete */
|
||||
count = 100;
|
||||
do {
|
||||
udelay(1);
|
||||
} while (((ioread32be(®s->miimind)) & MIIMIND_BUSY) && count--);
|
||||
|
||||
if (count == 0)
|
||||
return -EBUSY;
|
||||
|
||||
/* Read MII management status */
|
||||
*data = (u16)ioread32be(®s->miimstat);
|
||||
|
||||
iowrite32be(0, ®s->miimcom);
|
||||
/* Dummy read to make sure MIIMCOM is written */
|
||||
tmp = ioread32be(®s->miimcom);
|
||||
|
||||
if (*data == 0xffff) {
|
||||
pr_warn("Read wrong data(0xffff):phy_addr 0x%x,reg 0x%x",
|
||||
addr, reg);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void set_dflts(struct dtsec_cfg *cfg)
|
||||
{
|
||||
cfg->halfdup_on = false;
|
||||
cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
|
||||
cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
|
||||
cfg->halfdup_excess_defer = true;
|
||||
cfg->halfdup_no_backoff = false;
|
||||
cfg->halfdup_bp_no_backoff = false;
|
||||
cfg->halfdup_alt_backoff_val = DEFAULT_HALFDUP_ALT_BACKOFF_VAL;
|
||||
cfg->halfdup_alt_backoff_en = false;
|
||||
cfg->rx_drop_bcast = false;
|
||||
cfg->rx_short_frm = true;
|
||||
cfg->rx_len_check = false;
|
||||
cfg->tx_pad_crc = true;
|
||||
cfg->tx_crc = false;
|
||||
cfg->rx_ctrl_acc = false;
|
||||
cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
|
||||
/* PHY address 0 is reserved (DPAA RM) */
|
||||
cfg->tbipa = DEFAULT_TBIPA;
|
||||
cfg->rx_prepend = DEFAULT_RX_PREPEND;
|
||||
cfg->ptp_tsu_en = true;
|
||||
cfg->ptp_exception_en = true;
|
||||
cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
|
||||
cfg->rx_preamble = false;
|
||||
cfg->tx_preamble = false;
|
||||
cfg->loopback = false;
|
||||
cfg->rx_time_stamp_en = false;
|
||||
cfg->tx_time_stamp_en = false;
|
||||
cfg->rx_flow = true;
|
||||
cfg->tx_flow = true;
|
||||
cfg->rx_group_hash_exd = false;
|
||||
cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
|
||||
cfg->rx_promisc = false;
|
||||
cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
|
||||
cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
|
||||
cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
|
||||
cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
|
||||
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
|
||||
cfg->tbi_phy_addr = DEFAULT_TBI_PHY_ADDR;
|
||||
}
|
||||
|
||||
static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
|
||||
phy_interface_t iface, u16 iface_speed, u8 *macaddr,
|
||||
u32 exception_mask)
|
||||
u32 exception_mask, u8 tbi_addr)
|
||||
{
|
||||
bool is_rgmii, is_sgmii, is_qsgmii;
|
||||
int i;
|
||||
@ -658,14 +413,6 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
|
||||
|
||||
iowrite32be(tmp, ®s->ecntrl);
|
||||
|
||||
tmp = 0;
|
||||
if (cfg->halfdup_on)
|
||||
tmp |= DTSEC_TCTRL_THDF;
|
||||
if (cfg->tx_time_stamp_en)
|
||||
tmp |= DTSEC_TCTRL_TTSE;
|
||||
|
||||
iowrite32be(tmp, ®s->tctrl);
|
||||
|
||||
tmp = 0;
|
||||
|
||||
if (cfg->tx_pause_time)
|
||||
@ -676,18 +423,8 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
|
||||
|
||||
tmp = 0;
|
||||
tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
|
||||
if (cfg->rx_ctrl_acc)
|
||||
tmp |= RCTRL_CFA;
|
||||
if (cfg->rx_group_hash_exd)
|
||||
tmp |= RCTRL_GHTX;
|
||||
if (cfg->rx_time_stamp_en)
|
||||
tmp |= RCTRL_RTSE;
|
||||
if (cfg->rx_drop_bcast)
|
||||
tmp |= RCTRL_BC_REJ;
|
||||
if (cfg->rx_short_frm)
|
||||
/* Accept short frames */
|
||||
tmp |= RCTRL_RSF;
|
||||
if (cfg->rx_promisc)
|
||||
tmp |= RCTRL_PROM;
|
||||
|
||||
iowrite32be(tmp, ®s->rctrl);
|
||||
|
||||
@ -695,7 +432,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
|
||||
* Done also in cases where TBI is not selected to avoid conflict with
|
||||
* the external PHY's Physical address
|
||||
*/
|
||||
iowrite32be(cfg->tbipa, ®s->tbipa);
|
||||
iowrite32be(tbi_addr, ®s->tbipa);
|
||||
|
||||
iowrite32be(0, ®s->tmr_ctrl);
|
||||
|
||||
@ -712,11 +449,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
|
||||
}
|
||||
|
||||
tmp = 0;
|
||||
if (cfg->loopback)
|
||||
tmp |= MACCFG1_LOOPBACK;
|
||||
if (cfg->rx_flow)
|
||||
tmp |= MACCFG1_RX_FLOW;
|
||||
if (cfg->tx_flow)
|
||||
tmp |= MACCFG1_TX_FLOW;
|
||||
iowrite32be(tmp, ®s->maccfg1);
|
||||
|
||||
@ -729,17 +462,9 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
|
||||
|
||||
tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
|
||||
MACCFG2_PREAMBLE_LENGTH_MASK;
|
||||
if (cfg->rx_preamble)
|
||||
tmp |= MACCFG2_PRE_AM_RX_EN;
|
||||
if (cfg->tx_preamble)
|
||||
tmp |= MACCFG2_PRE_AM_TX_EN;
|
||||
if (cfg->rx_len_check)
|
||||
tmp |= MACCFG2_LENGTH_CHECK;
|
||||
if (cfg->tx_pad_crc)
|
||||
tmp |= MACCFG2_PAD_CRC_EN;
|
||||
if (cfg->tx_crc)
|
||||
tmp |= MACCFG2_CRC_EN;
|
||||
if (!cfg->halfdup_on)
|
||||
/* Full Duplex */
|
||||
tmp |= MACCFG2_FULL_DUPLEX;
|
||||
iowrite32be(tmp, ®s->maccfg2);
|
||||
|
||||
@ -755,18 +480,6 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
|
||||
iowrite32be(tmp, ®s->ipgifg);
|
||||
|
||||
tmp = 0;
|
||||
|
||||
if (cfg->halfdup_alt_backoff_en) {
|
||||
tmp = HAFDUP_ALT_BEB;
|
||||
tmp |= (cfg->halfdup_alt_backoff_val <<
|
||||
HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT) &
|
||||
HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK;
|
||||
}
|
||||
if (cfg->halfdup_bp_no_backoff)
|
||||
tmp |= HAFDUP_BP_NO_BACKOFF;
|
||||
if (cfg->halfdup_no_backoff)
|
||||
tmp |= HAFDUP_NO_BACKOFF;
|
||||
if (cfg->halfdup_excess_defer)
|
||||
tmp |= HAFDUP_EXCESS_DEFER;
|
||||
tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
|
||||
& HAFDUP_RETRANSMISSION_MAX);
|
||||
@ -843,35 +556,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
|
||||
pr_err("Ethernet MAC Must have a valid MAC Address\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dtsec->max_speed >= SPEED_1000 &&
|
||||
dtsec->dtsec_drv_param->halfdup_on) {
|
||||
pr_err("Ethernet MAC 1G can't work in half duplex\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* FM_RX_PREAM_4_ERRATA_DTSEC_A001 Errata workaround */
|
||||
if (dtsec->dtsec_drv_param->rx_preamble) {
|
||||
pr_err("preamble_rx_en\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (((dtsec->dtsec_drv_param)->tx_preamble ||
|
||||
(dtsec->dtsec_drv_param)->rx_preamble) &&
|
||||
((dtsec->dtsec_drv_param)->preamble_len != 0x7)) {
|
||||
pr_err("Preamble length should be 0x7 bytes\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((dtsec->dtsec_drv_param)->halfdup_on &&
|
||||
(dtsec->dtsec_drv_param->tx_time_stamp_en ||
|
||||
dtsec->dtsec_drv_param->rx_time_stamp_en)) {
|
||||
pr_err("1588 timeStamp disabled in half duplex mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((dtsec->dtsec_drv_param)->rx_flow &&
|
||||
(dtsec->dtsec_drv_param)->rx_ctrl_acc) {
|
||||
pr_err("Receive control frame can not be accepted\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((dtsec->dtsec_drv_param)->rx_prepend >
|
||||
MAX_PACKET_ALIGNMENT) {
|
||||
pr_err("packetAlignmentPadding can't be > than %d\n",
|
||||
@ -888,12 +572,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
|
||||
MAX_INTER_PACKET_GAP);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((dtsec->dtsec_drv_param)->halfdup_alt_backoff_val >
|
||||
MAX_INTER_PALTERNATE_BEB) {
|
||||
pr_err("alternateBackoffVal can't be greater than %d\n",
|
||||
MAX_INTER_PALTERNATE_BEB);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
|
||||
MAX_RETRANSMISSION) {
|
||||
pr_err("maxRetransmission can't be greater than %d\n",
|
||||
@ -909,10 +587,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
|
||||
* using the MII Management Interface
|
||||
*/
|
||||
}
|
||||
if (dtsec->dtsec_drv_param->tbipa > MAX_PHYS) {
|
||||
pr_err("PHY address (should be 0-%d)\n", MAX_PHYS);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (!dtsec->exception_cb) {
|
||||
pr_err("uninitialized exception_cb\n");
|
||||
return -EINVAL;
|
||||
@ -922,12 +596,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
|
||||
if (dtsec->dtsec_drv_param->rx_len_check) {
|
||||
pr_warn("Length Check!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -998,18 +666,6 @@ static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
|
||||
return false;
|
||||
}
|
||||
|
||||
static u32 get_mac_addr_hash_code(u64 eth_addr)
|
||||
{
|
||||
u32 crc;
|
||||
|
||||
/* CRC calculation */
|
||||
GET_MAC_ADDR_CRC(eth_addr, crc);
|
||||
|
||||
crc = bitrev32(crc);
|
||||
|
||||
return crc;
|
||||
}
|
||||
|
||||
static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
|
||||
{
|
||||
struct dtsec_regs __iomem *regs = dtsec->regs;
|
||||
@ -1059,10 +715,10 @@ static void dtsec_isr(void *handle)
|
||||
* This is a read only register
|
||||
* b. Read and save the value of TPKT
|
||||
*/
|
||||
tpkt1 = in_be32(®s->tpkt);
|
||||
tpkt1 = ioread32be(®s->tpkt);
|
||||
|
||||
/* c. Read the register at dTSEC address offset 0x32C */
|
||||
tmp_reg1 = in_be32(®s->reserved02c0[27]);
|
||||
tmp_reg1 = ioread32be(®s->reserved02c0[27]);
|
||||
|
||||
/* d. Compare bits [9:15] to bits [25:31] of the
|
||||
* register at address offset 0x32C.
|
||||
@ -1083,8 +739,8 @@ static void dtsec_isr(void *handle)
|
||||
/* e. Read and save TPKT again and read the register
|
||||
* at dTSEC address offset 0x32C again
|
||||
*/
|
||||
tpkt2 = in_be32(®s->tpkt);
|
||||
tmp_reg2 = in_be32(®s->reserved02c0[27]);
|
||||
tpkt2 = ioread32be(®s->tpkt);
|
||||
tmp_reg2 = ioread32be(®s->reserved02c0[27]);
|
||||
|
||||
/* f. Compare the value of TPKT saved in step b to
|
||||
* value read in step e. Also compare bits [9:15] of
|
||||
@ -1100,21 +756,22 @@ static void dtsec_isr(void *handle)
|
||||
|
||||
/* a.Write a 1 to RCTRL[GRS] */
|
||||
|
||||
out_be32(®s->rctrl,
|
||||
in_be32(®s->rctrl) | RCTRL_GRS);
|
||||
iowrite32be(ioread32be(®s->rctrl) |
|
||||
RCTRL_GRS, ®s->rctrl);
|
||||
|
||||
/* b.Wait until IEVENT[GRSC]=1, or at least
|
||||
* 100 us has elapsed.
|
||||
*/
|
||||
for (i = 0; i < 100; i++) {
|
||||
if (in_be32(®s->ievent) &
|
||||
if (ioread32be(®s->ievent) &
|
||||
DTSEC_IMASK_GRSCEN)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
if (in_be32(®s->ievent) & DTSEC_IMASK_GRSCEN)
|
||||
out_be32(®s->ievent,
|
||||
DTSEC_IMASK_GRSCEN);
|
||||
if (ioread32be(®s->ievent) &
|
||||
DTSEC_IMASK_GRSCEN)
|
||||
iowrite32be(DTSEC_IMASK_GRSCEN,
|
||||
®s->ievent);
|
||||
else
|
||||
pr_debug("Rx lockup due to Tx lockup\n");
|
||||
|
||||
@ -1279,15 +936,14 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
|
||||
if (!is_init_done(dtsec->dtsec_drv_param))
|
||||
return -EINVAL;
|
||||
|
||||
if (pause_time) {
|
||||
/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
|
||||
if (dtsec->fm_rev_info.major == 2)
|
||||
if (0 < pause_time && pause_time <= 320) {
|
||||
if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
|
||||
pr_warn("pause-time: %d illegal.Should be > 320\n",
|
||||
pause_time);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pause_time) {
|
||||
ptv = ioread32be(®s->ptv);
|
||||
ptv &= PTV_PTE_MASK;
|
||||
ptv |= pause_time & PTV_PT_MASK;
|
||||
@ -1341,7 +997,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
|
||||
struct eth_hash_entry *hash_entry;
|
||||
u64 addr;
|
||||
s32 bucket;
|
||||
u32 crc;
|
||||
u32 crc = 0xFFFFFFFF;
|
||||
bool mcast, ghtx;
|
||||
|
||||
if (!is_init_done(dtsec->dtsec_drv_param))
|
||||
@ -1357,7 +1013,8 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
|
||||
pr_err("Could not compute hash bucket\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
crc = get_mac_addr_hash_code(addr);
|
||||
crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
|
||||
crc = bitrev32(crc);
|
||||
|
||||
/* considering the 9 highest order bits in crc H[8:0]:
|
||||
*if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
|
||||
@ -1407,7 +1064,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
|
||||
struct eth_hash_entry *hash_entry = NULL;
|
||||
u64 addr;
|
||||
s32 bucket;
|
||||
u32 crc;
|
||||
u32 crc = 0xFFFFFFFF;
|
||||
bool mcast, ghtx;
|
||||
|
||||
if (!is_init_done(dtsec->dtsec_drv_param))
|
||||
@ -1423,7 +1080,8 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
|
||||
pr_err("Could not compute hash bucket\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
crc = get_mac_addr_hash_code(addr);
|
||||
crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
|
||||
crc = bitrev32(crc);
|
||||
|
||||
if (ghtx) {
|
||||
bucket = (s32)((crc >> 23) & 0x1ff);
|
||||
@ -1532,22 +1190,17 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
|
||||
int dtsec_restart_autoneg(struct fman_mac *dtsec)
|
||||
{
|
||||
u16 tmp_reg16;
|
||||
int err;
|
||||
|
||||
if (!is_init_done(dtsec->dtsec_drv_param))
|
||||
return -EINVAL;
|
||||
|
||||
err = mii_read_reg(dtsec, dtsec->tbi_phy_addr, 0, &tmp_reg16);
|
||||
if (err) {
|
||||
pr_err("Autonegotiation restart failed\n");
|
||||
return err;
|
||||
}
|
||||
tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
|
||||
|
||||
tmp_reg16 &= ~(PHY_CR_SPEED0 | PHY_CR_SPEED1);
|
||||
tmp_reg16 |=
|
||||
(PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
|
||||
tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
|
||||
tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
|
||||
BMCR_FULLDPLX | BMCR_SPEED1000);
|
||||
|
||||
mii_write_reg(dtsec, dtsec->tbi_phy_addr, 0, tmp_reg16);
|
||||
phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1598,12 +1251,12 @@ int dtsec_set_exception(struct fman_mac *dtsec,
|
||||
switch (exception) {
|
||||
case FM_MAC_EX_1G_1588_TS_RX_ERR:
|
||||
if (enable) {
|
||||
dtsec->en_tsu_err_exeption = true;
|
||||
dtsec->en_tsu_err_exception = true;
|
||||
iowrite32be(ioread32be(®s->tmr_pemask) |
|
||||
TMR_PEMASK_TSREEN,
|
||||
®s->tmr_pemask);
|
||||
} else {
|
||||
dtsec->en_tsu_err_exeption = false;
|
||||
dtsec->en_tsu_err_exception = false;
|
||||
iowrite32be(ioread32be(®s->tmr_pemask) &
|
||||
~TMR_PEMASK_TSREEN,
|
||||
®s->tmr_pemask);
|
||||
@ -1644,7 +1297,8 @@ int dtsec_init(struct fman_mac *dtsec)
|
||||
MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
|
||||
|
||||
err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
|
||||
dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions);
|
||||
dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
|
||||
dtsec->tbiphy->mdio.addr);
|
||||
if (err) {
|
||||
free_init_resources(dtsec);
|
||||
pr_err("DTSEC version doesn't support this i/f mode\n");
|
||||
@ -1655,30 +1309,26 @@ int dtsec_init(struct fman_mac *dtsec)
|
||||
u16 tmp_reg16;
|
||||
|
||||
/* Configure the TBI PHY Control Register */
|
||||
tmp_reg16 = PHY_TBICON_CLK_SEL | PHY_TBICON_SRESET;
|
||||
mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
|
||||
tmp_reg16);
|
||||
tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
|
||||
phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
|
||||
|
||||
tmp_reg16 = PHY_TBICON_CLK_SEL;
|
||||
mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
|
||||
tmp_reg16);
|
||||
tmp_reg16 = TBICON_CLK_SELECT;
|
||||
phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
|
||||
|
||||
tmp_reg16 =
|
||||
(PHY_CR_PHY_RESET | PHY_CR_ANE | PHY_CR_FULLDUPLEX |
|
||||
PHY_CR_SPEED1);
|
||||
mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
|
||||
tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
|
||||
BMCR_FULLDPLX | BMCR_SPEED1000);
|
||||
phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
|
||||
|
||||
if (dtsec->basex_if)
|
||||
tmp_reg16 = PHY_TBIANA_1000X;
|
||||
tmp_reg16 = TBIANA_1000X;
|
||||
else
|
||||
tmp_reg16 = PHY_TBIANA_SGMII;
|
||||
mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 4, tmp_reg16);
|
||||
tmp_reg16 = TBIANA_SGMII;
|
||||
phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
|
||||
|
||||
tmp_reg16 =
|
||||
(PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX |
|
||||
PHY_CR_SPEED1);
|
||||
tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
|
||||
BMCR_FULLDPLX | BMCR_SPEED1000);
|
||||
|
||||
mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
|
||||
phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
|
||||
}
|
||||
|
||||
/* Max Frame Length */
|
||||
@ -1752,34 +1402,53 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
|
||||
|
||||
set_dflts(dtsec_drv_param);
|
||||
|
||||
dtsec->regs = (struct dtsec_regs __iomem *)(base_addr);
|
||||
dtsec->mii_regs = (struct dtsec_mii_regs __iomem *)
|
||||
(base_addr + DTSEC_TO_MII_OFFSET);
|
||||
dtsec->regs = base_addr;
|
||||
dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
|
||||
dtsec->max_speed = params->max_speed;
|
||||
dtsec->phy_if = params->phy_if;
|
||||
dtsec->mac_id = params->mac_id;
|
||||
dtsec->exceptions = DTSEC_DEFAULT_EXCEPTIONS;
|
||||
dtsec->exceptions = (DTSEC_IMASK_BREN |
|
||||
DTSEC_IMASK_RXCEN |
|
||||
DTSEC_IMASK_BTEN |
|
||||
DTSEC_IMASK_TXCEN |
|
||||
DTSEC_IMASK_TXEEN |
|
||||
DTSEC_IMASK_ABRTEN |
|
||||
DTSEC_IMASK_LCEN |
|
||||
DTSEC_IMASK_CRLEN |
|
||||
DTSEC_IMASK_XFUNEN |
|
||||
DTSEC_IMASK_IFERREN |
|
||||
DTSEC_IMASK_MAGEN |
|
||||
DTSEC_IMASK_TDPEEN |
|
||||
DTSEC_IMASK_RDPEEN);
|
||||
dtsec->exception_cb = params->exception_cb;
|
||||
dtsec->event_cb = params->event_cb;
|
||||
dtsec->dev_id = params->dev_id;
|
||||
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
|
||||
dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
|
||||
dtsec->tbi_phy_addr = dtsec->dtsec_drv_param->tbi_phy_addr;
|
||||
dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
|
||||
|
||||
dtsec->fm = params->fm;
|
||||
dtsec->basex_if = params->basex_if;
|
||||
dtsec->mii_mgmt_clk = calc_mii_mgmt_clk(dtsec);
|
||||
if (dtsec->mii_mgmt_clk == 0) {
|
||||
pr_err("Can't calculate MII management clock\n");
|
||||
goto err_dtsec;
|
||||
|
||||
if (!params->internal_phy_node) {
|
||||
pr_err("TBI PHY node is not available\n");
|
||||
goto err_dtsec_drv_param;
|
||||
}
|
||||
|
||||
dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
|
||||
if (!dtsec->tbiphy) {
|
||||
pr_err("of_phy_find_device (TBI PHY) failed\n");
|
||||
goto err_dtsec_drv_param;
|
||||
}
|
||||
|
||||
put_device(&dtsec->tbiphy->mdio.dev);
|
||||
|
||||
/* Save FMan revision */
|
||||
fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
|
||||
|
||||
return dtsec;
|
||||
|
||||
err_dtsec_drv_param:
|
||||
kfree(dtsec_drv_param);
|
||||
err_dtsec:
|
||||
kfree(dtsec);
|
||||
return NULL;
|
||||
|
@ -191,10 +191,6 @@ struct fman_mac_params {
|
||||
u16 max_speed;
|
||||
/* A handle to the FM object this port related to */
|
||||
void *fm;
|
||||
/* MDIO exceptions interrupt source - not valid for all
|
||||
* MACs; MUST be set to 'NO_IRQ' for MACs that don't have
|
||||
* mdio-irq, or for polling
|
||||
*/
|
||||
void *dev_id; /* device cookie used by the exception cbs */
|
||||
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
|
||||
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
|
||||
@ -204,6 +200,8 @@ struct fman_mac_params {
|
||||
* synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
|
||||
*/
|
||||
bool basex_if;
|
||||
/* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
|
||||
struct device_node *internal_phy_node;
|
||||
};
|
||||
|
||||
struct eth_hash_t {
|
||||
|
@ -42,59 +42,55 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/phy_fixed.h>
|
||||
#include <linux/of_mdio.h>
|
||||
|
||||
/* MII Management Registers */
|
||||
#define MDIO_CFG_CLK_DIV_MASK 0x0080ff80
|
||||
#define MDIO_CFG_HOLD_MASK 0x0000001c
|
||||
#define MDIO_CFG_ENC45 0x00000040
|
||||
#define MDIO_CFG_BSY 0x00000001
|
||||
/* PCS registers */
|
||||
#define MDIO_SGMII_CR 0x00
|
||||
#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
|
||||
#define MDIO_SGMII_LINK_TMR_L 0x12
|
||||
#define MDIO_SGMII_LINK_TMR_H 0x13
|
||||
#define MDIO_SGMII_IF_MODE 0x14
|
||||
|
||||
#define MDIO_CTL_PHY_ADDR_SHIFT 5
|
||||
/* SGMII Control defines */
|
||||
#define SGMII_CR_AN_EN 0x1000
|
||||
#define SGMII_CR_RESTART_AN 0x0200
|
||||
#define SGMII_CR_FD 0x0100
|
||||
#define SGMII_CR_SPEED_SEL1_1G 0x0040
|
||||
#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
|
||||
SGMII_CR_SPEED_SEL1_1G)
|
||||
|
||||
#define MDIO_DATA_BSY 0x80000000
|
||||
/* SGMII Device Ability for SGMII defines */
|
||||
#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
|
||||
#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
|
||||
|
||||
/* Internal PHY access */
|
||||
#define PHY_MDIO_ADDR 0
|
||||
/* Link timer define */
|
||||
#define LINK_TMR_L 0xa120
|
||||
#define LINK_TMR_H 0x0007
|
||||
#define LINK_TMR_L_BASEX 0xaf08
|
||||
#define LINK_TMR_H_BASEX 0x002f
|
||||
|
||||
/* Internal PHY Registers - SGMII */
|
||||
#define PHY_SGMII_CR_RESET_AN 0x0200
|
||||
#define PHY_SGMII_CR_AN_ENABLE 0x1000
|
||||
#define PHY_SGMII_CR_DEF_VAL 0x1140
|
||||
#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
|
||||
#define PHY_SGMII_DEV_ABILITY_1000X 0x01A0
|
||||
#define PHY_SGMII_IF_MODE_DUPLEX_FULL 0x0000
|
||||
#define PHY_SGMII_IF_MODE_DUPLEX_HALF 0x0010
|
||||
#define PHY_SGMII_IF_MODE_SPEED_GB 0x0008
|
||||
#define PHY_SGMII_IF_MODE_SPEED_100M 0x0004
|
||||
#define PHY_SGMII_IF_MODE_SPEED_10M 0x0000
|
||||
#define PHY_SGMII_IF_MODE_AN 0x0002
|
||||
#define PHY_SGMII_IF_MODE_SGMII 0x0001
|
||||
#define PHY_SGMII_IF_MODE_1000X 0x0000
|
||||
/* SGMII IF Mode defines */
|
||||
#define IF_MODE_USE_SGMII_AN 0x0002
|
||||
#define IF_MODE_SGMII_EN 0x0001
|
||||
#define IF_MODE_SGMII_SPEED_100M 0x0004
|
||||
#define IF_MODE_SGMII_SPEED_1G 0x0008
|
||||
#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
|
||||
|
||||
/* Offset from the MEM map to the MDIO mem map */
|
||||
#define MEMAC_TO_MII_OFFSET 0x030
|
||||
/* Num of additional exact match MAC adr regs */
|
||||
#define MEMAC_NUM_OF_PADDRS 7
|
||||
|
||||
/* Control and Configuration Register (COMMAND_CONFIG) */
|
||||
#define CMD_CFG_MG 0x80000000 /* 00 Magic Packet detection */
|
||||
#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
|
||||
#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
|
||||
#define CMD_CFG_SFD_ANY 0x00200000 /* 10 Disable SFD check */
|
||||
#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
|
||||
#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
|
||||
#define CMD_CFG_SEND_IDLE 0x00010000 /* 15 Force idle generation */
|
||||
#define CMD_CFG_CNT_FRM_EN 0x00002000 /* 18 Control frame rx enable */
|
||||
#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
|
||||
#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
|
||||
#define CMD_CFG_LOOPBACK_EN 0x00000400 /* 21 XGMII/GMII loopback enable */
|
||||
#define CMD_CFG_TX_ADDR_INS 0x00000200 /* 22 Tx source MAC addr insertion */
|
||||
#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
|
||||
#define CMD_CFG_PAUSE_FWD 0x00000080 /* 24 Terminate/frwd Pause frames */
|
||||
#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
|
||||
#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
|
||||
#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
|
||||
#define CMD_CFG_WAN_MODE 0x00000008 /* 28 WAN mode enable */
|
||||
#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
|
||||
#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
|
||||
|
||||
@ -171,10 +167,6 @@ do { \
|
||||
#define DEFAULT_FRAME_LENGTH 0x600
|
||||
#define DEFAULT_TX_IPG_LENGTH 12
|
||||
|
||||
#define MEMAC_DEFAULT_EXCEPTIONS \
|
||||
((u32)(MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | \
|
||||
MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI))
|
||||
|
||||
#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF
|
||||
#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000
|
||||
#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF
|
||||
@ -187,14 +179,6 @@ struct mac_addr {
|
||||
u32 mac_addr_u;
|
||||
};
|
||||
|
||||
/* MII Configuration Control Memory Map Registers */
|
||||
struct memac_mii_regs {
|
||||
u32 mdio_cfg; /* 0x030 */
|
||||
u32 mdio_ctrl; /* 0x034 */
|
||||
u32 mdio_data; /* 0x038 */
|
||||
u32 mdio_addr; /* 0x03c */
|
||||
};
|
||||
|
||||
/* memory map */
|
||||
struct memac_regs {
|
||||
u32 res0000[2]; /* General Control and Status */
|
||||
@ -340,25 +324,8 @@ struct memac_regs {
|
||||
|
||||
struct memac_cfg {
|
||||
bool reset_on_init;
|
||||
bool rx_error_discard;
|
||||
bool pause_ignore;
|
||||
bool pause_forward_enable;
|
||||
bool no_length_check_enable;
|
||||
bool cmd_frame_enable;
|
||||
bool send_idle_enable;
|
||||
bool wan_mode_enable;
|
||||
bool promiscuous_mode_enable;
|
||||
bool tx_addr_ins_enable;
|
||||
bool loopback_enable;
|
||||
bool lgth_check_nostdr;
|
||||
bool time_stamp_enable;
|
||||
bool pad_enable;
|
||||
bool phy_tx_ena_on;
|
||||
bool rx_sfd_any;
|
||||
bool rx_pbl_fwd;
|
||||
bool tx_pbl_fwd;
|
||||
bool debug_mode;
|
||||
bool wake_on_lan;
|
||||
struct fixed_phy_status *fixed_link;
|
||||
u16 max_frame_length;
|
||||
u16 pause_quanta;
|
||||
@ -368,8 +335,6 @@ struct memac_cfg {
|
||||
struct fman_mac {
|
||||
/* Pointer to MAC memory mapped registers */
|
||||
struct memac_regs __iomem *regs;
|
||||
/* Pointer to MII memory mapped registers */
|
||||
struct memac_mii_regs __iomem *mii_regs;
|
||||
/* MAC address of device */
|
||||
u64 addr;
|
||||
/* Ethernet physical interface */
|
||||
@ -382,133 +347,15 @@ struct fman_mac {
|
||||
struct eth_hash_t *multicast_addr_hash;
|
||||
/* Pointer to driver's individual address hash table */
|
||||
struct eth_hash_t *unicast_addr_hash;
|
||||
bool debug_mode;
|
||||
u8 mac_id;
|
||||
u32 exceptions;
|
||||
struct memac_cfg *memac_drv_param;
|
||||
void *fm;
|
||||
struct fman_rev_info fm_rev_info;
|
||||
bool basex_if;
|
||||
struct phy_device *pcsphy;
|
||||
};
|
||||
|
||||
static int write_phy_reg_10g(struct memac_mii_regs __iomem *mii_regs,
|
||||
u8 phy_addr, u8 reg, u16 data)
|
||||
{
|
||||
u32 tmp_reg;
|
||||
int count;
|
||||
|
||||
tmp_reg = ioread32be(&mii_regs->mdio_cfg);
|
||||
/* Leave only MDIO_CLK_DIV bits set on */
|
||||
tmp_reg &= MDIO_CFG_CLK_DIV_MASK;
|
||||
/* Set maximum MDIO_HOLD value to allow phy to see
|
||||
* change of data signal
|
||||
*/
|
||||
tmp_reg |= MDIO_CFG_HOLD_MASK;
|
||||
/* Add 10G interface mode */
|
||||
tmp_reg |= MDIO_CFG_ENC45;
|
||||
iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
|
||||
|
||||
/* Wait for command completion */
|
||||
count = 100;
|
||||
do {
|
||||
udelay(1);
|
||||
} while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
|
||||
|
||||
if (count == 0)
|
||||
return -EBUSY;
|
||||
|
||||
/* Specify phy and register to be accessed */
|
||||
iowrite32be(phy_addr, &mii_regs->mdio_ctrl);
|
||||
iowrite32be(reg, &mii_regs->mdio_addr);
|
||||
|
||||
count = 100;
|
||||
do {
|
||||
udelay(1);
|
||||
} while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
|
||||
|
||||
if (count == 0)
|
||||
return -EBUSY;
|
||||
|
||||
/* Write data */
|
||||
iowrite32be(data, &mii_regs->mdio_data);
|
||||
|
||||
/* Wait for write transaction end */
|
||||
count = 100;
|
||||
do {
|
||||
udelay(1);
|
||||
} while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
|
||||
--count);
|
||||
|
||||
if (count == 0)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int write_phy_reg_1g(struct memac_mii_regs __iomem *mii_regs,
|
||||
u8 phy_addr, u8 reg, u16 data)
|
||||
{
|
||||
u32 tmp_reg;
|
||||
int count;
|
||||
|
||||
/* Leave only MDIO_CLK_DIV and MDIO_HOLD bits set on */
|
||||
tmp_reg = ioread32be(&mii_regs->mdio_cfg);
|
||||
tmp_reg &= (MDIO_CFG_CLK_DIV_MASK | MDIO_CFG_HOLD_MASK);
|
||||
iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
|
||||
|
||||
/* Wait for command completion */
|
||||
count = 100;
|
||||
do {
|
||||
udelay(1);
|
||||
} while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
|
||||
|
||||
if (count == 0)
|
||||
return -EBUSY;
|
||||
|
||||
/* Write transaction */
|
||||
tmp_reg = (phy_addr << MDIO_CTL_PHY_ADDR_SHIFT);
|
||||
tmp_reg |= reg;
|
||||
iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
|
||||
|
||||
/* Wait for command completion */
|
||||
count = 100;
|
||||
do {
|
||||
udelay(1);
|
||||
} while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
|
||||
|
||||
if (count == 0)
|
||||
return -EBUSY;
|
||||
|
||||
iowrite32be(data, &mii_regs->mdio_data);
|
||||
|
||||
/* Wait for write transaction to end */
|
||||
count = 100;
|
||||
do {
|
||||
udelay(1);
|
||||
} while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
|
||||
--count);
|
||||
|
||||
if (count == 0)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mii_write_phy_reg(struct fman_mac *memac, u8 phy_addr, u8 reg,
|
||||
u16 data)
|
||||
{
|
||||
int err = 0;
|
||||
/* Figure out interface type - 10G vs 1G.
|
||||
* In 10G interface both phy_addr and devAddr present.
|
||||
*/
|
||||
if (memac->max_speed == SPEED_10000)
|
||||
err = write_phy_reg_10g(memac->mii_regs, phy_addr, reg, data);
|
||||
else
|
||||
err = write_phy_reg_1g(memac->mii_regs, phy_addr, reg, data);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
|
||||
u8 paddr_num)
|
||||
{
|
||||
@ -571,30 +418,15 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
|
||||
|
||||
/* Config */
|
||||
tmp = 0;
|
||||
if (cfg->wan_mode_enable)
|
||||
tmp |= CMD_CFG_WAN_MODE;
|
||||
if (cfg->promiscuous_mode_enable)
|
||||
tmp |= CMD_CFG_PROMIS_EN;
|
||||
if (cfg->pause_forward_enable)
|
||||
tmp |= CMD_CFG_PAUSE_FWD;
|
||||
if (cfg->pause_ignore)
|
||||
tmp |= CMD_CFG_PAUSE_IGNORE;
|
||||
if (cfg->tx_addr_ins_enable)
|
||||
tmp |= CMD_CFG_TX_ADDR_INS;
|
||||
if (cfg->loopback_enable)
|
||||
tmp |= CMD_CFG_LOOPBACK_EN;
|
||||
if (cfg->cmd_frame_enable)
|
||||
tmp |= CMD_CFG_CNT_FRM_EN;
|
||||
if (cfg->send_idle_enable)
|
||||
tmp |= CMD_CFG_SEND_IDLE;
|
||||
if (cfg->no_length_check_enable)
|
||||
|
||||
/* Payload length check disable */
|
||||
tmp |= CMD_CFG_NO_LEN_CHK;
|
||||
if (cfg->rx_sfd_any)
|
||||
tmp |= CMD_CFG_SFD_ANY;
|
||||
if (cfg->pad_enable)
|
||||
/* Enable padding of frames in transmit direction */
|
||||
tmp |= CMD_CFG_TX_PAD_EN;
|
||||
if (cfg->wake_on_lan)
|
||||
tmp |= CMD_CFG_MG;
|
||||
|
||||
tmp |= CMD_CFG_CRC_FWD;
|
||||
|
||||
@ -615,7 +447,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
|
||||
break;
|
||||
default:
|
||||
tmp |= IF_MODE_GMII;
|
||||
if (phy_if == PHY_INTERFACE_MODE_RGMII && !cfg->loopback_enable)
|
||||
if (phy_if == PHY_INTERFACE_MODE_RGMII)
|
||||
tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
|
||||
}
|
||||
iowrite32be(tmp, ®s->if_mode);
|
||||
@ -646,28 +478,11 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
|
||||
static void set_dflts(struct memac_cfg *cfg)
|
||||
{
|
||||
cfg->reset_on_init = false;
|
||||
cfg->wan_mode_enable = false;
|
||||
cfg->promiscuous_mode_enable = false;
|
||||
cfg->pause_forward_enable = false;
|
||||
cfg->pause_ignore = false;
|
||||
cfg->tx_addr_ins_enable = false;
|
||||
cfg->loopback_enable = false;
|
||||
cfg->cmd_frame_enable = false;
|
||||
cfg->rx_error_discard = false;
|
||||
cfg->send_idle_enable = false;
|
||||
cfg->no_length_check_enable = true;
|
||||
cfg->lgth_check_nostdr = false;
|
||||
cfg->time_stamp_enable = false;
|
||||
cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
|
||||
cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
|
||||
cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
|
||||
cfg->pad_enable = true;
|
||||
cfg->phy_tx_ena_on = false;
|
||||
cfg->rx_sfd_any = false;
|
||||
cfg->rx_pbl_fwd = false;
|
||||
cfg->tx_pbl_fwd = false;
|
||||
cfg->debug_mode = false;
|
||||
cfg->wake_on_lan = false;
|
||||
}
|
||||
|
||||
static u32 get_mac_addr_hash_code(u64 eth_addr)
|
||||
@ -692,49 +507,42 @@ static u32 get_mac_addr_hash_code(u64 eth_addr)
|
||||
return xor_val;
|
||||
}
|
||||
|
||||
static void setup_sgmii_internal_phy(struct fman_mac *memac, u8 phy_addr,
|
||||
static void setup_sgmii_internal_phy(struct fman_mac *memac,
|
||||
struct fixed_phy_status *fixed_link)
|
||||
{
|
||||
u16 tmp_reg16, speed;
|
||||
u16 tmp_reg16;
|
||||
|
||||
/* In case the higher MACs are used (i.e. the MACs that should
|
||||
* support 10G), speed=10000 is provided for SGMII ports.
|
||||
* Temporary modify enet mode to 1G one, so MII functions can
|
||||
* work correctly.
|
||||
*/
|
||||
speed = memac->max_speed;
|
||||
memac->max_speed = SPEED_1000;
|
||||
if (WARN_ON(!memac->pcsphy))
|
||||
return;
|
||||
|
||||
/* SGMII mode */
|
||||
tmp_reg16 = PHY_SGMII_IF_MODE_SGMII;
|
||||
tmp_reg16 = IF_MODE_SGMII_EN;
|
||||
if (!fixed_link)
|
||||
/* AN enable */
|
||||
tmp_reg16 |= PHY_SGMII_IF_MODE_AN;
|
||||
tmp_reg16 |= IF_MODE_USE_SGMII_AN;
|
||||
else {
|
||||
#ifndef __rtems__
|
||||
switch (fixed_link->speed) {
|
||||
case 10:
|
||||
tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_10M;
|
||||
/* For 10M: IF_MODE[SPEED_10M] = 0 */
|
||||
break;
|
||||
case 100:
|
||||
tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_100M;
|
||||
tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
|
||||
break;
|
||||
case 1000: /* fallthrough */
|
||||
default:
|
||||
tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_GB;
|
||||
tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
|
||||
break;
|
||||
}
|
||||
if (fixed_link->duplex)
|
||||
tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_FULL;
|
||||
else
|
||||
tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_HALF;
|
||||
if (!fixed_link->duplex)
|
||||
tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
|
||||
#endif /* __rtems__ */
|
||||
}
|
||||
mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
|
||||
phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
|
||||
|
||||
/* Device ability according to SGMII specification */
|
||||
tmp_reg16 = PHY_SGMII_DEV_ABILITY_SGMII;
|
||||
mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
|
||||
tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
|
||||
phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
|
||||
|
||||
/* Adjust link timer for SGMII -
|
||||
* According to Cisco SGMII specification the timer should be 1.6 ms.
|
||||
@ -748,40 +556,25 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac, u8 phy_addr,
|
||||
* Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
|
||||
* we always set up here a value of 2.5 SGMII.
|
||||
*/
|
||||
mii_write_phy_reg(memac, phy_addr, 0x13, 0x0007);
|
||||
mii_write_phy_reg(memac, phy_addr, 0x12, 0xa120);
|
||||
phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
|
||||
phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
|
||||
|
||||
if (!fixed_link)
|
||||
/* Restart AN */
|
||||
tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
|
||||
tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
|
||||
else
|
||||
/* AN disabled */
|
||||
tmp_reg16 = PHY_SGMII_CR_DEF_VAL & ~PHY_SGMII_CR_AN_ENABLE;
|
||||
mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
|
||||
|
||||
/* Restore original speed */
|
||||
memac->max_speed = speed;
|
||||
tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
|
||||
phy_write(memac->pcsphy, 0x0, tmp_reg16);
|
||||
}
|
||||
|
||||
static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac, u8 phy_addr)
|
||||
static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
|
||||
{
|
||||
u16 tmp_reg16, speed;
|
||||
|
||||
/* In case the higher MACs are used (i.e. the MACs that
|
||||
* should support 10G), speed=10000 is provided for SGMII ports.
|
||||
* Temporary modify enet mode to 1G one, so MII functions can
|
||||
* work correctly.
|
||||
*/
|
||||
speed = memac->max_speed;
|
||||
memac->max_speed = SPEED_1000;
|
||||
|
||||
/* 1000BaseX mode */
|
||||
tmp_reg16 = PHY_SGMII_IF_MODE_1000X;
|
||||
mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
|
||||
u16 tmp_reg16;
|
||||
|
||||
/* AN Device capability */
|
||||
tmp_reg16 = PHY_SGMII_DEV_ABILITY_1000X;
|
||||
mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
|
||||
tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
|
||||
phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
|
||||
|
||||
/* Adjust link timer for SGMII -
|
||||
* For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
|
||||
@ -795,15 +588,12 @@ static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac, u8 phy_addr)
|
||||
* Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
|
||||
* we always set up here a value of 2.5 SGMII.
|
||||
*/
|
||||
mii_write_phy_reg(memac, phy_addr, 0x13, 0x002f);
|
||||
mii_write_phy_reg(memac, phy_addr, 0x12, 0xaf08);
|
||||
phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
|
||||
phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
|
||||
|
||||
/* Restart AN */
|
||||
tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
|
||||
mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
|
||||
|
||||
/* Restore original speed */
|
||||
memac->max_speed = speed;
|
||||
tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
|
||||
phy_write(memac->pcsphy, 0x0, tmp_reg16);
|
||||
}
|
||||
|
||||
static int check_init_parameters(struct fman_mac *memac)
|
||||
@ -821,12 +611,6 @@ static int check_init_parameters(struct fman_mac *memac)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
|
||||
if (!memac->memac_drv_param->no_length_check_enable) {
|
||||
pr_err("Length Check!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1216,7 +1000,7 @@ int memac_set_exception(struct fman_mac *memac,
|
||||
int memac_init(struct fman_mac *memac)
|
||||
{
|
||||
struct memac_cfg *memac_drv_param;
|
||||
u8 i, phy_addr;
|
||||
u8 i;
|
||||
enet_addr_t eth_addr;
|
||||
bool slow_10g_if = false;
|
||||
struct fixed_phy_status *fixed_link;
|
||||
@ -1262,33 +1046,35 @@ int memac_init(struct fman_mac *memac)
|
||||
/* MAC strips CRC from received frames - this workaround
|
||||
* should decrease the likelihood of bug appearance
|
||||
*/
|
||||
reg32 = in_be32(&memac->regs->command_config);
|
||||
reg32 = ioread32be(&memac->regs->command_config);
|
||||
reg32 &= ~CMD_CFG_CRC_FWD;
|
||||
out_be32(&memac->regs->command_config, reg32);
|
||||
iowrite32be(reg32, &memac->regs->command_config);
|
||||
}
|
||||
|
||||
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
|
||||
/* Configure internal SGMII PHY */
|
||||
if (memac->basex_if)
|
||||
setup_sgmii_internal_phy_base_x(memac, PHY_MDIO_ADDR);
|
||||
setup_sgmii_internal_phy_base_x(memac);
|
||||
else
|
||||
setup_sgmii_internal_phy(memac, PHY_MDIO_ADDR,
|
||||
fixed_link);
|
||||
setup_sgmii_internal_phy(memac, fixed_link);
|
||||
} else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
|
||||
/* Configure 4 internal SGMII PHYs */
|
||||
for (i = 0; i < 4; i++) {
|
||||
u8 qsmgii_phy_addr, phy_addr;
|
||||
/* QSGMII PHY address occupies 3 upper bits of 5-bit
|
||||
* phy_address; the lower 2 bits are used to extend
|
||||
* register address space and access each one of 4
|
||||
* ports inside QSGMII.
|
||||
*/
|
||||
phy_addr = (u8)((PHY_MDIO_ADDR << 2) | i);
|
||||
phy_addr = memac->pcsphy->mdio.addr;
|
||||
qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
|
||||
memac->pcsphy->mdio.addr = qsmgii_phy_addr;
|
||||
if (memac->basex_if)
|
||||
setup_sgmii_internal_phy_base_x(memac,
|
||||
phy_addr);
|
||||
setup_sgmii_internal_phy_base_x(memac);
|
||||
else
|
||||
setup_sgmii_internal_phy(memac, phy_addr,
|
||||
fixed_link);
|
||||
setup_sgmii_internal_phy(memac, fixed_link);
|
||||
|
||||
memac->pcsphy->mdio.addr = phy_addr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1330,6 +1116,9 @@ int memac_free(struct fman_mac *memac)
|
||||
{
|
||||
free_init_resources(memac);
|
||||
|
||||
if (memac->pcsphy)
|
||||
put_device(&memac->pcsphy->mdio.dev);
|
||||
|
||||
kfree(memac->memac_drv_param);
|
||||
kfree(memac);
|
||||
|
||||
@ -1362,13 +1151,12 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
|
||||
|
||||
memac->addr = ENET_ADDR_TO_UINT64(params->addr);
|
||||
|
||||
memac->regs = (struct memac_regs __iomem *)(base_addr);
|
||||
memac->mii_regs = (struct memac_mii_regs __iomem *)
|
||||
(base_addr + MEMAC_TO_MII_OFFSET);
|
||||
memac->regs = base_addr;
|
||||
memac->max_speed = params->max_speed;
|
||||
memac->phy_if = params->phy_if;
|
||||
memac->mac_id = params->mac_id;
|
||||
memac->exceptions = MEMAC_DEFAULT_EXCEPTIONS;
|
||||
memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
|
||||
MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
|
||||
memac->exception_cb = params->exception_cb;
|
||||
memac->event_cb = params->event_cb;
|
||||
memac->dev_id = params->dev_id;
|
||||
@ -1378,5 +1166,21 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
|
||||
/* Save FMan revision */
|
||||
fman_get_revision(memac->fm, &memac->fm_rev_info);
|
||||
|
||||
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
|
||||
memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
|
||||
if (!params->internal_phy_node) {
|
||||
pr_err("PCS PHY node is not available\n");
|
||||
memac_free(memac);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memac->pcsphy = of_phy_find_device(params->internal_phy_node);
|
||||
if (!memac->pcsphy) {
|
||||
pr_err("of_phy_find_device (PCS PHY) failed\n");
|
||||
memac_free(memac);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return memac;
|
||||
}
|
||||
|
@ -39,52 +39,14 @@
|
||||
/* Structure for FM MURAM information */
|
||||
struct muram_info;
|
||||
|
||||
/**
|
||||
* fman_muram_init
|
||||
* @base: Pointer to base of memory mapped FM-MURAM.
|
||||
* @size: Size of the FM-MURAM partition.
|
||||
*
|
||||
* Creates partition in the MURAM.
|
||||
* The routine returns a pointer to the MURAM partition.
|
||||
* This pointer must be passed as to all other FM-MURAM function calls.
|
||||
* No actual initialization or configuration of FM_MURAM hardware is done by
|
||||
* this routine.
|
||||
*
|
||||
* Return: pointer to FM-MURAM object, or NULL for Failure.
|
||||
*/
|
||||
struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
|
||||
|
||||
/**
|
||||
* fman_muram_offset_to_vbase
|
||||
* @muram: FM-MURAM module pointer.
|
||||
* @offset: the offset of the memory block
|
||||
*
|
||||
* Gives the address of the memory region from specific offset
|
||||
*
|
||||
* Return: The address of the memory block
|
||||
*/
|
||||
unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
|
||||
unsigned long offset);
|
||||
|
||||
/**
|
||||
* fman_muram_alloc
|
||||
* @muram: FM-MURAM module pointer.
|
||||
* @size: Size of the memory to be allocated.
|
||||
*
|
||||
* Allocate some memory from FM-MURAM partition.
|
||||
*
|
||||
* Return: address of the allocated memory; NULL otherwise.
|
||||
*/
|
||||
int fman_muram_alloc(struct muram_info *muram, size_t size);
|
||||
unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
|
||||
|
||||
/**
|
||||
* fman_muram_free_mem
|
||||
* muram: FM-MURAM module pointer.
|
||||
* offset: offset of the memory region to be freed.
|
||||
* size: size of the memory to be freed.
|
||||
*
|
||||
* Free an allocated memory from FM-MURAM partition.
|
||||
*/
|
||||
void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
|
||||
void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
|
||||
size_t size);
|
||||
|
||||
#endif /* __FM_MURAM_EXT */
|
||||
|
@ -40,13 +40,14 @@
|
||||
#include "fman.h"
|
||||
#include "fman_sp.h"
|
||||
|
||||
#include <asm/mpc85xx.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/libfdt_env.h>
|
||||
|
||||
/* Queue ID */
|
||||
#define DFLT_FQ_ID 0x00FFFFFF
|
||||
@ -107,14 +108,10 @@
|
||||
#define BMI_EBD_EN 0x80000000
|
||||
|
||||
#define BMI_PORT_CFG_EN 0x80000000
|
||||
#define BMI_PORT_CFG_FDOVR 0x02000000
|
||||
|
||||
#define BMI_PORT_STATUS_BSY 0x80000000
|
||||
|
||||
#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
|
||||
#define BMI_DMA_ATTR_IC_STASH_ON 0x10000000
|
||||
#define BMI_DMA_ATTR_HDR_STASH_ON 0x04000000
|
||||
#define BMI_DMA_ATTR_SG_STASH_ON 0x01000000
|
||||
#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
|
||||
|
||||
#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
|
||||
@ -165,16 +162,12 @@
|
||||
|
||||
#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
|
||||
|
||||
#define BMI_SG_DISABLE FMAN_SP_SG_DISABLE
|
||||
|
||||
#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
|
||||
#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
|
||||
|
||||
#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \
|
||||
((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
|
||||
|
||||
#define BMI_PORT_RFNE_FRWD_RPD 0x40000000
|
||||
|
||||
#define RX_ERRS_TO_ENQ \
|
||||
(FM_PORT_FRM_ERR_DMA | \
|
||||
FM_PORT_FRM_ERR_PHYSICAL | \
|
||||
@ -190,12 +183,10 @@
|
||||
|
||||
/* NIA defines */
|
||||
#define NIA_ORDER_RESTOR 0x00800000
|
||||
#define NIA_ENG_FM_CTL 0x00000000
|
||||
#define NIA_ENG_BMI 0x00500000
|
||||
#define NIA_ENG_QMI_ENQ 0x00540000
|
||||
#define NIA_ENG_QMI_DEQ 0x00580000
|
||||
|
||||
#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME 0x00000028
|
||||
#define NIA_BMI_AC_ENQ_FRAME 0x00000002
|
||||
#define NIA_BMI_AC_TX_RELEASE 0x000002C0
|
||||
#define NIA_BMI_AC_RELEASE 0x000000C0
|
||||
@ -401,8 +392,6 @@ struct fman_port_cfg {
|
||||
u8 cheksum_last_bytes_ignore;
|
||||
u8 rx_cut_end_bytes;
|
||||
struct fman_buf_pool_depletion buf_pool_depletion;
|
||||
bool discard_override;
|
||||
bool en_buf_pool_depletion;
|
||||
struct fman_ext_pools ext_buf_pools;
|
||||
u32 tx_fifo_min_level;
|
||||
u32 tx_fifo_low_comf_level;
|
||||
@ -413,32 +402,17 @@ struct fman_port_cfg {
|
||||
struct fman_sp_int_context_data_copy int_context;
|
||||
u32 discard_mask;
|
||||
u32 err_mask;
|
||||
bool forward_reuse_int_context;
|
||||
struct fman_buffer_prefix_content buffer_prefix_content;
|
||||
bool dont_release_buf;
|
||||
bool set_num_of_tasks;
|
||||
bool set_num_of_open_dmas;
|
||||
bool set_size_of_fifo;
|
||||
bool bcb_workaround;
|
||||
|
||||
u8 rx_fd_bits;
|
||||
u32 tx_fifo_deq_pipeline_depth;
|
||||
bool errata_A006675;
|
||||
bool errata_A006320;
|
||||
bool excessive_threshold_register;
|
||||
bool fmbm_rebm_has_sgd;
|
||||
bool fmbm_tfne_has_features;
|
||||
bool qmi_deq_options_support;
|
||||
|
||||
enum fman_port_dma_swap dma_swap_data;
|
||||
bool dma_ic_stash_on;
|
||||
bool dma_header_stash_on;
|
||||
bool dma_sg_stash_on;
|
||||
bool dma_write_optimize;
|
||||
enum fman_port_color color;
|
||||
bool sync_req;
|
||||
|
||||
bool no_scatter_gather;
|
||||
};
|
||||
|
||||
struct fman_port_rx_pools_params {
|
||||
@ -458,6 +432,7 @@ struct fman_port_dts_params {
|
||||
|
||||
struct fman_port {
|
||||
void *fm;
|
||||
struct device *dev;
|
||||
struct fman_rev_info rev_info;
|
||||
u8 port_id;
|
||||
enum fman_port_type port_type;
|
||||
@ -493,21 +468,9 @@ static int init_bmi_rx(struct fman_port *port)
|
||||
struct fman_port_cfg *cfg = port->cfg;
|
||||
u32 tmp;
|
||||
|
||||
/* Rx Configuration register */
|
||||
tmp = 0;
|
||||
if (cfg->discard_override)
|
||||
tmp |= BMI_PORT_CFG_FDOVR;
|
||||
iowrite32be(tmp, ®s->fmbm_rcfg);
|
||||
|
||||
/* DMA attributes */
|
||||
tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
|
||||
if (cfg->dma_ic_stash_on)
|
||||
tmp |= BMI_DMA_ATTR_IC_STASH_ON;
|
||||
if (cfg->dma_header_stash_on)
|
||||
tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
|
||||
if (cfg->dma_sg_stash_on)
|
||||
tmp |= BMI_DMA_ATTR_SG_STASH_ON;
|
||||
if (cfg->dma_write_optimize)
|
||||
/* Enable write optimization */
|
||||
tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
|
||||
iowrite32be(tmp, ®s->fmbm_rda);
|
||||
|
||||
@ -548,15 +511,13 @@ static int init_bmi_rx(struct fman_port *port)
|
||||
tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
|
||||
BMI_EXT_BUF_MARG_START_SHIFT;
|
||||
tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
|
||||
if (cfg->fmbm_rebm_has_sgd && cfg->no_scatter_gather)
|
||||
tmp |= BMI_SG_DISABLE;
|
||||
iowrite32be(tmp, ®s->fmbm_rebm);
|
||||
|
||||
/* Frame attributes */
|
||||
tmp = BMI_CMD_RX_MR_DEF;
|
||||
tmp |= BMI_CMD_ATTR_ORDER;
|
||||
tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
|
||||
if (cfg->sync_req)
|
||||
/* Synchronization request */
|
||||
tmp |= BMI_CMD_ATTR_SYNC;
|
||||
|
||||
iowrite32be(tmp, ®s->fmbm_rfca);
|
||||
@ -564,10 +525,6 @@ static int init_bmi_rx(struct fman_port *port)
|
||||
/* NIA */
|
||||
tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
|
||||
|
||||
if (cfg->errata_A006675)
|
||||
tmp |= NIA_ENG_FM_CTL |
|
||||
NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME;
|
||||
else
|
||||
tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
|
||||
iowrite32be(tmp, ®s->fmbm_rfne);
|
||||
|
||||
@ -597,12 +554,6 @@ static int init_bmi_tx(struct fman_port *port)
|
||||
|
||||
/* DMA attributes */
|
||||
tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
|
||||
if (cfg->dma_ic_stash_on)
|
||||
tmp |= BMI_DMA_ATTR_IC_STASH_ON;
|
||||
if (cfg->dma_header_stash_on)
|
||||
tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
|
||||
if (cfg->dma_sg_stash_on)
|
||||
tmp |= BMI_DMA_ATTR_SG_STASH_ON;
|
||||
iowrite32be(tmp, ®s->fmbm_tda);
|
||||
|
||||
/* Tx FIFO parameters */
|
||||
@ -698,7 +649,6 @@ static int init_qmi(struct fman_port *port)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cfg->qmi_deq_options_support) {
|
||||
switch (cfg->deq_prefetch_option) {
|
||||
case FMAN_PORT_DEQ_NO_PREFETCH:
|
||||
break;
|
||||
@ -711,7 +661,7 @@ static int init_qmi(struct fman_port *port)
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
|
||||
tmp |= cfg->deq_byte_cnt;
|
||||
iowrite32be(tmp, ®s->fmqm_pndc);
|
||||
@ -883,11 +833,11 @@ static int verify_size_of_fifo(struct fman_port *port)
|
||||
|
||||
/* Verify the size */
|
||||
if (port->fifo_bufs.num < min_fifo_size_required)
|
||||
pr_debug("FIFO size should be enlarged to %d bytes\n",
|
||||
min_fifo_size_required);
|
||||
dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
|
||||
__func__, min_fifo_size_required);
|
||||
else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
|
||||
pr_debug("For b2b processing,FIFO may be enlarged to %d bytes\n",
|
||||
opt_fifo_size_for_b2b);
|
||||
dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
|
||||
__func__, opt_fifo_size_for_b2b);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -961,7 +911,7 @@ static int set_ext_buffer_pools(struct fman_port *port)
|
||||
|
||||
err = set_bpools(port, &bpools);
|
||||
if (err != 0) {
|
||||
pr_err("FMan port: set_bpools\n");
|
||||
dev_err(port->dev, "%s: set_bpools() failed\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -976,8 +926,6 @@ static int init_low_level_driver(struct fman_port *port)
|
||||
switch (port->port_type) {
|
||||
case FMAN_PORT_TYPE_RX:
|
||||
cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
|
||||
if (cfg->forward_reuse_int_context)
|
||||
cfg->rx_fd_bits = (u8)(BMI_PORT_RFNE_FRWD_RPD >> 24);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -990,7 +938,8 @@ static int init_low_level_driver(struct fman_port *port)
|
||||
port->cfg->int_buf_start_margin = port->internal_buf_offset;
|
||||
|
||||
if (init(port) != 0) {
|
||||
pr_err("fman_port_init\n");
|
||||
dev_err(port->dev, "%s: fman port initialization failed\n",
|
||||
__func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -1004,9 +953,9 @@ static int init_low_level_driver(struct fman_port *port)
|
||||
* Otherwise, if fmbm_tcfqid is 0 the FM will release
|
||||
* buffers to BM regardless of fmbm_tfene
|
||||
*/
|
||||
out_be32(&port->bmi_regs->tx.fmbm_tcfqid, 0xFFFFFF);
|
||||
out_be32(&port->bmi_regs->tx.fmbm_tfene,
|
||||
NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE);
|
||||
iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
|
||||
iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
|
||||
&port->bmi_regs->tx.fmbm_tfene);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1038,7 +987,7 @@ static int fill_soc_specific_params(struct fman_port *port)
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("Unsupported FMan version\n");
|
||||
dev_err(port->dev, "%s: Unsupported FMan version\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1218,14 +1167,12 @@ static void set_dflt_cfg(struct fman_port *port,
|
||||
struct fman_port_cfg *cfg = port->cfg;
|
||||
|
||||
cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
|
||||
cfg->dma_write_optimize = true;
|
||||
cfg->color = FMAN_PORT_COLOR_GREEN;
|
||||
cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
|
||||
cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
|
||||
cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
|
||||
cfg->tx_fifo_low_comf_level = (5 * 1024);
|
||||
cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
|
||||
cfg->sync_req = true;
|
||||
cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
|
||||
cfg->tx_fifo_deq_pipeline_depth =
|
||||
BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
|
||||
@ -1242,14 +1189,10 @@ static void set_dflt_cfg(struct fman_port *port,
|
||||
cfg->errata_A006320 = true;
|
||||
|
||||
/* Excessive Threshold register - exists for pre-FMv3 chips only */
|
||||
if (port->rev_info.major < 6) {
|
||||
if (port->rev_info.major < 6)
|
||||
cfg->excessive_threshold_register = true;
|
||||
} else {
|
||||
cfg->fmbm_rebm_has_sgd = true;
|
||||
else
|
||||
cfg->fmbm_tfne_has_features = true;
|
||||
}
|
||||
|
||||
cfg->qmi_deq_options_support = true;
|
||||
|
||||
cfg->buffer_prefix_content.data_align =
|
||||
DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
|
||||
@ -1267,15 +1210,6 @@ static void set_rx_dflt_cfg(struct fman_port *port,
|
||||
port_params->specific_params.rx_params.err_fqid;
|
||||
port->cfg->dflt_fqid =
|
||||
port_params->specific_params.rx_params.dflt_fqid;
|
||||
|
||||
/* Set BCB workaround on Rx ports, only for B4860 rev1 */
|
||||
if (port->rev_info.major >= 6) {
|
||||
unsigned int svr;
|
||||
|
||||
svr = mfspr(SPRN_SVR);
|
||||
if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) == 1))
|
||||
port->cfg->bcb_workaround = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void set_tx_dflt_cfg(struct fman_port *port,
|
||||
@ -1295,6 +1229,20 @@ static void set_tx_dflt_cfg(struct fman_port *port,
|
||||
port->cfg->deq_high_priority = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* fman_port_config
|
||||
* @port: Pointer to the port structure
|
||||
* @params: Pointer to data structure of parameters
|
||||
*
|
||||
* Creates a descriptor for the FM PORT module.
|
||||
* The routine returns a pointer to the FM PORT object.
|
||||
* This descriptor must be passed as first parameter to all other FM PORT
|
||||
* function calls.
|
||||
* No actual initialization or configuration of FM hardware is done by this
|
||||
* routine.
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_config(struct fman_port *port, struct fman_port_params *params)
|
||||
{
|
||||
void __iomem *base_addr = port->dts_params.base_addr;
|
||||
@ -1330,10 +1278,8 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
|
||||
|
||||
/* Continue with other parameters */
|
||||
/* set memory map pointers */
|
||||
port->bmi_regs = (union fman_port_bmi_regs __iomem *)
|
||||
(base_addr + BMI_PORT_REGS_OFFSET);
|
||||
port->qmi_regs = (struct fman_port_qmi_regs __iomem *)
|
||||
(base_addr + QMI_PORT_REGS_OFFSET);
|
||||
port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
|
||||
port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
|
||||
|
||||
port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
|
||||
/* resource distribution. */
|
||||
@ -1377,7 +1323,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
|
||||
u32 reg;
|
||||
|
||||
reg = 0x00001013;
|
||||
out_be32(&port->bmi_regs->tx.fmbm_tfp, reg);
|
||||
iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1391,6 +1337,14 @@ err_params:
|
||||
}
|
||||
EXPORT_SYMBOL(fman_port_config);
|
||||
|
||||
/**
|
||||
* fman_port_init
|
||||
* port: A pointer to a FM Port module.
|
||||
* Initializes the FM PORT module by defining the software structure and
|
||||
* configuring the hardware registers.
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_init(struct fman_port *port)
|
||||
{
|
||||
struct fman_port_cfg *cfg;
|
||||
@ -1408,14 +1362,6 @@ int fman_port_init(struct fman_port *port)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 Errata workaround */
|
||||
if (port->rev_info.major >= 6 && (port->cfg->bcb_workaround) &&
|
||||
((port->port_type == FMAN_PORT_TYPE_RX) &&
|
||||
(port->port_speed == 1000))) {
|
||||
port->cfg->discard_mask |= FM_PORT_FRM_ERR_PHYSICAL;
|
||||
port->fifo_bufs.num += 4 * 1024;
|
||||
}
|
||||
|
||||
cfg = port->cfg;
|
||||
|
||||
if (port->port_type == FMAN_PORT_TYPE_RX) {
|
||||
@ -1430,8 +1376,8 @@ int fman_port_init(struct fman_port *port)
|
||||
if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
|
||||
cfg->buf_margins.end_margins >
|
||||
port->rx_pools_params.largest_buf_size) {
|
||||
pr_err("buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
|
||||
cfg->buf_margins.start_margins,
|
||||
dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
|
||||
__func__, cfg->buf_margins.start_margins,
|
||||
cfg->buf_margins.end_margins,
|
||||
port->rx_pools_params.largest_buf_size);
|
||||
return -EINVAL;
|
||||
@ -1473,6 +1419,31 @@ int fman_port_init(struct fman_port *port)
|
||||
}
|
||||
EXPORT_SYMBOL(fman_port_init);
|
||||
|
||||
/**
|
||||
* fman_port_cfg_buf_prefix_content
|
||||
* @port A pointer to a FM Port module.
|
||||
* @buffer_prefix_content A structure of parameters describing
|
||||
* the structure of the buffer.
|
||||
* Out parameter:
|
||||
* Start margin - offset of data from
|
||||
* start of external buffer.
|
||||
* Defines the structure, size and content of the application buffer.
|
||||
* The prefix, in Tx ports, if 'pass_prs_result', the application should set
|
||||
* a value to their offsets in the prefix of the FM will save the first
|
||||
* 'priv_data_size', than, depending on 'pass_prs_result' and
|
||||
* 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
|
||||
* (in this order), to the application buffer, and to offset.
|
||||
* Calling this routine changes the buffer margins definitions in the internal
|
||||
* driver data base from its default configuration:
|
||||
* Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
|
||||
* Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
|
||||
* Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
|
||||
* May be used for all ports
|
||||
*
|
||||
* Allowed only following fman_port_config() and before fman_port_init().
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_cfg_buf_prefix_content(struct fman_port *port,
|
||||
struct fman_buffer_prefix_content *
|
||||
buffer_prefix_content)
|
||||
@ -1494,9 +1465,24 @@ int fman_port_cfg_buf_prefix_content(struct fman_port *port,
|
||||
}
|
||||
EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
|
||||
|
||||
/**
|
||||
* fman_port_disable
|
||||
* port: A pointer to a FM Port module.
|
||||
*
|
||||
* Gracefully disable an FM port. The port will not start new tasks after all
|
||||
* tasks associated with the port are terminated.
|
||||
*
|
||||
* This is a blocking routine, it returns after port is gracefully stopped,
|
||||
* i.e. the port will not except new frames, but it will finish all frames
|
||||
* or tasks which were already began.
|
||||
* Allowed only following fman_port_init().
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_disable(struct fman_port *port)
|
||||
{
|
||||
u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp;
|
||||
u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
|
||||
u32 tmp;
|
||||
bool rx_port, failure = false;
|
||||
int count;
|
||||
|
||||
@ -1553,16 +1539,27 @@ int fman_port_disable(struct fman_port *port)
|
||||
}
|
||||
|
||||
if (failure)
|
||||
pr_debug("FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
|
||||
port->port_id);
|
||||
dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
|
||||
__func__, port->port_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_port_disable);
|
||||
|
||||
/**
|
||||
* fman_port_enable
|
||||
* port: A pointer to a FM Port module.
|
||||
*
|
||||
* A runtime routine provided to allow disable/enable of port.
|
||||
*
|
||||
* Allowed only following fman_port_init().
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_enable(struct fman_port *port)
|
||||
{
|
||||
u32 __iomem *bmi_cfg_reg, tmp;
|
||||
u32 __iomem *bmi_cfg_reg;
|
||||
u32 tmp;
|
||||
bool rx_port;
|
||||
|
||||
if (!is_init_done(port->cfg))
|
||||
@ -1595,12 +1592,30 @@ int fman_port_enable(struct fman_port *port)
|
||||
}
|
||||
EXPORT_SYMBOL(fman_port_enable);
|
||||
|
||||
/**
|
||||
* fman_port_bind
|
||||
* dev: FMan Port OF device pointer
|
||||
*
|
||||
* Bind to a specific FMan Port.
|
||||
*
|
||||
* Allowed only after the port was created.
|
||||
*
|
||||
* Return: A pointer to the FMan port device.
|
||||
*/
|
||||
struct fman_port *fman_port_bind(struct device *dev)
|
||||
{
|
||||
return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
|
||||
}
|
||||
EXPORT_SYMBOL(fman_port_bind);
|
||||
|
||||
/**
|
||||
* fman_port_get_qman_channel_id
|
||||
* port: Pointer to the FMan port devuce
|
||||
*
|
||||
* Get the QMan channel ID for the specific port
|
||||
*
|
||||
* Return: QMan channel ID
|
||||
*/
|
||||
u32 fman_port_get_qman_channel_id(struct fman_port *port)
|
||||
{
|
||||
return port->dts_params.qman_channel_id;
|
||||
@ -1624,7 +1639,7 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
|
||||
#ifndef __rtems__
|
||||
struct resource *dev_res;
|
||||
#endif /* __rtems__ */
|
||||
const u32 *u32_prop;
|
||||
u32 val;
|
||||
int err = 0, lenp;
|
||||
enum fman_port_type port_type;
|
||||
u16 port_speed;
|
||||
@ -1634,13 +1649,15 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
|
||||
if (!port)
|
||||
return -ENOMEM;
|
||||
|
||||
port->dev = &of_dev->dev;
|
||||
|
||||
port_node = of_node_get(of_dev->dev.of_node);
|
||||
|
||||
/* Get the FM node */
|
||||
#ifndef __rtems__
|
||||
fm_node = of_get_parent(port_node);
|
||||
if (!fm_node) {
|
||||
pr_err("of_get_parent() failed\n");
|
||||
dev_err(port->dev, "%s: of_get_parent() failed\n", __func__);
|
||||
err = -ENODEV;
|
||||
goto return_err;
|
||||
}
|
||||
@ -1653,28 +1670,20 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp);
|
||||
if (!u32_prop) {
|
||||
pr_err("of_get_property(%s, cell-index) failed\n",
|
||||
port_node->full_name);
|
||||
err = of_property_read_u32(port_node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(port->dev, "%s: reading cell-index for %s failed\n",
|
||||
__func__, port_node->full_name);
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
}
|
||||
if (WARN_ON(lenp != sizeof(u32))) {
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
}
|
||||
port_id = (u8)*u32_prop;
|
||||
|
||||
port_id = (u8)val;
|
||||
port->dts_params.id = port_id;
|
||||
|
||||
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
|
||||
port_type = FMAN_PORT_TYPE_TX;
|
||||
port_speed = 1000;
|
||||
u32_prop = (const u32 *)of_get_property(port_node,
|
||||
"fsl,fman-10g-port",
|
||||
&lenp);
|
||||
if (u32_prop)
|
||||
if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
|
||||
port_speed = 10000;
|
||||
|
||||
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
|
||||
@ -1687,9 +1696,7 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
|
||||
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
|
||||
port_type = FMAN_PORT_TYPE_RX;
|
||||
port_speed = 1000;
|
||||
u32_prop = (const u32 *)of_get_property(port_node,
|
||||
"fsl,fman-10g-port", &lenp);
|
||||
if (u32_prop)
|
||||
if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
|
||||
port_speed = 10000;
|
||||
|
||||
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
|
||||
@ -1700,7 +1707,7 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
|
||||
port_type = FMAN_PORT_TYPE_RX;
|
||||
|
||||
} else {
|
||||
pr_err("Illegal port type\n");
|
||||
dev_err(port->dev, "%s: Illegal port type\n", __func__);
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
}
|
||||
@ -1713,7 +1720,8 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
|
||||
|
||||
qman_channel_id = fman_get_qman_channel_id(fman, port_id);
|
||||
if (qman_channel_id == 0) {
|
||||
pr_err("incorrect qman-channel-id\n");
|
||||
dev_err(port->dev, "%s: incorrect qman-channel-id\n",
|
||||
__func__);
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
}
|
||||
@ -1722,7 +1730,8 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
|
||||
|
||||
err = of_address_to_resource(port_node, 0, &res);
|
||||
if (err < 0) {
|
||||
pr_err("of_address_to_resource() failed\n");
|
||||
dev_err(port->dev, "%s: of_address_to_resource() failed\n",
|
||||
__func__);
|
||||
err = -ENOMEM;
|
||||
goto return_err;
|
||||
}
|
||||
@ -1732,21 +1741,20 @@ static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
|
||||
of_node_put(port_node);
|
||||
|
||||
#ifndef __rtems__
|
||||
dev_res = __devm_request_region(fman_get_device(fman), &res,
|
||||
res.start, (res.end + 1 - res.start),
|
||||
"fman-port");
|
||||
dev_res = __devm_request_region(port->dev, &res, res.start,
|
||||
resource_size(&res), "fman-port");
|
||||
if (!dev_res) {
|
||||
pr_err("__devm_request_region() failed\n");
|
||||
dev_err(port->dev, "%s: __devm_request_region() failed\n",
|
||||
__func__);
|
||||
err = -EINVAL;
|
||||
goto free_port;
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
port->dts_params.base_addr = devm_ioremap(fman_get_device(fman),
|
||||
res.start,
|
||||
(res.end + 1 - res.start));
|
||||
if (port->dts_params.base_addr == 0)
|
||||
pr_err("devm_ioremap() failed\n");
|
||||
port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
|
||||
resource_size(&res));
|
||||
if (!port->dts_params.base_addr)
|
||||
dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
|
||||
|
||||
dev_set_drvdata(&of_dev->dev, port);
|
||||
|
||||
@ -1780,8 +1788,28 @@ static struct platform_driver fman_port_driver = {
|
||||
.probe = fman_port_probe,
|
||||
};
|
||||
|
||||
builtin_platform_driver(fman_port_driver);
|
||||
static int __init fman_port_load(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
pr_debug("FSL DPAA FMan driver\n");
|
||||
|
||||
err = platform_driver_register(&fman_port_driver);
|
||||
if (err < 0)
|
||||
pr_err("Error, platform_driver_register() = %d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
module_init(fman_port_load);
|
||||
|
||||
static void __exit fman_port_unload(void)
|
||||
{
|
||||
platform_driver_unregister(&fman_port_driver);
|
||||
}
|
||||
module_exit(fman_port_unload);
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
|
||||
#else /* __rtems__ */
|
||||
#include <sys/cdefs.h>
|
||||
#include <sys/param.h>
|
||||
|
@ -132,109 +132,20 @@ struct fman_port_params {
|
||||
/* Additional parameters depending on port type. */
|
||||
};
|
||||
|
||||
/**
|
||||
* fman_port_config
|
||||
* @port: Pointer to the port structure
|
||||
* @params: Pointer to data structure of parameters
|
||||
*
|
||||
* Creates a descriptor for the FM PORT module.
|
||||
* The routine returns a pointer to the FM PORT object.
|
||||
* This descriptor must be passed as first parameter to all other FM PORT
|
||||
* function calls.
|
||||
* No actual initialization or configuration of FM hardware is done by this
|
||||
* routine.
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_config(struct fman_port *port, struct fman_port_params *params);
|
||||
|
||||
/**
|
||||
* fman_port_init
|
||||
* port: A pointer to a FM Port module.
|
||||
* Initializes the FM PORT module by defining the software structure and
|
||||
* configuring the hardware registers.
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_init(struct fman_port *port);
|
||||
|
||||
/**
|
||||
* fman_port_cfg_buf_prefix_content
|
||||
* @port A pointer to a FM Port module.
|
||||
* @buffer_prefix_content A structure of parameters describing
|
||||
* the structure of the buffer.
|
||||
* Out parameter:
|
||||
* Start margin - offset of data from
|
||||
* start of external buffer.
|
||||
* Defines the structure, size and content of the application buffer.
|
||||
* The prefix, in Tx ports, if 'pass_prs_result', the application should set
|
||||
* a value to their offsets in the prefix of the FM will save the first
|
||||
* 'priv_data_size', than, depending on 'pass_prs_result' and
|
||||
* 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
|
||||
* (in this order), to the application buffer, and to offset.
|
||||
* Calling this routine changes the buffer margins definitions in the internal
|
||||
* driver data base from its default configuration:
|
||||
* Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
|
||||
* Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
|
||||
* Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
|
||||
* May be used for all ports
|
||||
*
|
||||
* Allowed only following fman_port_config() and before fman_port_init().
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_cfg_buf_prefix_content(struct fman_port *port,
|
||||
struct fman_buffer_prefix_content
|
||||
*buffer_prefix_content);
|
||||
|
||||
/**
|
||||
* fman_port_disable
|
||||
* port: A pointer to a FM Port module.
|
||||
*
|
||||
* Gracefully disable an FM port. The port will not start new tasks after all
|
||||
* tasks associated with the port are terminated.
|
||||
*
|
||||
* This is a blocking routine, it returns after port is gracefully stopped,
|
||||
* i.e. the port will not except new frames, but it will finish all frames
|
||||
* or tasks which were already began.
|
||||
* Allowed only following fman_port_init().
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_disable(struct fman_port *port);
|
||||
|
||||
/**
|
||||
* fman_port_enable
|
||||
* port: A pointer to a FM Port module.
|
||||
*
|
||||
* A runtime routine provided to allow disable/enable of port.
|
||||
*
|
||||
* Allowed only following fman_port_init().
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_port_enable(struct fman_port *port);
|
||||
|
||||
/**
|
||||
* fman_port_get_qman_channel_id
|
||||
* port: Pointer to the FMan port devuce
|
||||
*
|
||||
* Get the QMan channel ID for the specific port
|
||||
*
|
||||
* Return: QMan channel ID
|
||||
*/
|
||||
u32 fman_port_get_qman_channel_id(struct fman_port *port);
|
||||
|
||||
/**
|
||||
* fman_port_bind
|
||||
* dev: FMan Port OF device pointer
|
||||
*
|
||||
* Bind to a specific FMan Port.
|
||||
*
|
||||
* Allowed only after the port was created.
|
||||
*
|
||||
* Return: A pointer to the FMan port device.
|
||||
*/
|
||||
struct fman_port *fman_port_bind(struct device *dev);
|
||||
|
||||
#endif /* __FMAN_PORT_H */
|
||||
|
@ -84,6 +84,7 @@ void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
|
||||
|
||||
int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
|
||||
int_context_data_copy,
|
||||
@ -168,4 +169,5 @@ int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_sp_build_buffer_struct);
|
||||
|
||||
|
@ -36,31 +36,22 @@
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include "crc_mac_addr_ext.h"
|
||||
|
||||
#include "fman_tgec.h"
|
||||
#include "fman.h"
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/bitrev.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/crc32.h>
|
||||
|
||||
/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
|
||||
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
|
||||
|
||||
/* Command and Configuration Register (COMMAND_CONFIG) */
|
||||
#define CMD_CFG_EN_TIMESTAMP 0x00100000
|
||||
#define CMD_CFG_NO_LEN_CHK 0x00020000
|
||||
#define CMD_CFG_SEND_IDLE 0x00010000
|
||||
#define CMD_CFG_RX_ER_DISC 0x00004000
|
||||
#define CMD_CFG_CMD_FRM_EN 0x00002000
|
||||
#define CMD_CFG_LOOPBACK_EN 0x00000400
|
||||
#define CMD_CFG_TX_ADDR_INS 0x00000200
|
||||
#define CMD_CFG_PAUSE_IGNORE 0x00000100
|
||||
#define CMD_CFG_PAUSE_FWD 0x00000080
|
||||
#define CMF_CFG_CRC_FWD 0x00000040
|
||||
#define CMD_CFG_PROMIS_EN 0x00000010
|
||||
#define CMD_CFG_WAN_MODE 0x00000008
|
||||
#define CMD_CFG_RX_EN 0x00000002
|
||||
#define CMD_CFG_TX_EN 0x00000001
|
||||
|
||||
@ -92,23 +83,6 @@
|
||||
#define DEFAULT_MAX_FRAME_LENGTH 0x600
|
||||
#define DEFAULT_PAUSE_QUANT 0xf000
|
||||
|
||||
#define TGEC_DEFAULT_EXCEPTIONS \
|
||||
((u32)((TGEC_IMASK_MDIO_SCAN_EVENT) |\
|
||||
(TGEC_IMASK_REM_FAULT) |\
|
||||
(TGEC_IMASK_LOC_FAULT) |\
|
||||
(TGEC_IMASK_TX_ECC_ER) |\
|
||||
(TGEC_IMASK_TX_FIFO_UNFL) |\
|
||||
(TGEC_IMASK_TX_FIFO_OVFL) |\
|
||||
(TGEC_IMASK_TX_ER) |\
|
||||
(TGEC_IMASK_RX_FIFO_OVFL) |\
|
||||
(TGEC_IMASK_RX_ECC_ER) |\
|
||||
(TGEC_IMASK_RX_JAB_FRM) |\
|
||||
(TGEC_IMASK_RX_OVRSZ_FRM) |\
|
||||
(TGEC_IMASK_RX_RUNT_FRM) |\
|
||||
(TGEC_IMASK_RX_FRAG_FRM) |\
|
||||
(TGEC_IMASK_RX_CRC_ER) |\
|
||||
(TGEC_IMASK_RX_ALIGN_ER)))
|
||||
|
||||
/* number of pattern match registers (entries) */
|
||||
#define TGEC_NUM_OF_PADDRS 1
|
||||
|
||||
@ -222,17 +196,8 @@ struct tgec_regs {
|
||||
};
|
||||
|
||||
struct tgec_cfg {
|
||||
bool rx_error_discard;
|
||||
bool pause_ignore;
|
||||
bool pause_forward_enable;
|
||||
bool no_length_check_enable;
|
||||
bool cmd_frame_enable;
|
||||
bool send_idle_enable;
|
||||
bool wan_mode_enable;
|
||||
bool promiscuous_mode_enable;
|
||||
bool tx_addr_ins_enable;
|
||||
bool loopback_enable;
|
||||
bool time_stamp_enable;
|
||||
u16 max_frame_length;
|
||||
u16 pause_quant;
|
||||
u32 tx_ipg_length;
|
||||
@ -270,17 +235,8 @@ static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
|
||||
|
||||
static void set_dflts(struct tgec_cfg *cfg)
|
||||
{
|
||||
cfg->wan_mode_enable = false;
|
||||
cfg->promiscuous_mode_enable = false;
|
||||
cfg->pause_forward_enable = false;
|
||||
cfg->pause_ignore = false;
|
||||
cfg->tx_addr_ins_enable = false;
|
||||
cfg->loopback_enable = false;
|
||||
cfg->cmd_frame_enable = false;
|
||||
cfg->rx_error_discard = false;
|
||||
cfg->send_idle_enable = false;
|
||||
cfg->no_length_check_enable = true;
|
||||
cfg->time_stamp_enable = false;
|
||||
cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
|
||||
cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
|
||||
cfg->pause_quant = DEFAULT_PAUSE_QUANT;
|
||||
@ -293,28 +249,12 @@ static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
|
||||
|
||||
/* Config */
|
||||
tmp = CMF_CFG_CRC_FWD;
|
||||
if (cfg->wan_mode_enable)
|
||||
tmp |= CMD_CFG_WAN_MODE;
|
||||
if (cfg->promiscuous_mode_enable)
|
||||
tmp |= CMD_CFG_PROMIS_EN;
|
||||
if (cfg->pause_forward_enable)
|
||||
tmp |= CMD_CFG_PAUSE_FWD;
|
||||
if (cfg->pause_ignore)
|
||||
tmp |= CMD_CFG_PAUSE_IGNORE;
|
||||
if (cfg->tx_addr_ins_enable)
|
||||
tmp |= CMD_CFG_TX_ADDR_INS;
|
||||
if (cfg->loopback_enable)
|
||||
tmp |= CMD_CFG_LOOPBACK_EN;
|
||||
if (cfg->cmd_frame_enable)
|
||||
tmp |= CMD_CFG_CMD_FRM_EN;
|
||||
if (cfg->rx_error_discard)
|
||||
tmp |= CMD_CFG_RX_ER_DISC;
|
||||
if (cfg->send_idle_enable)
|
||||
tmp |= CMD_CFG_SEND_IDLE;
|
||||
if (cfg->no_length_check_enable)
|
||||
/* Payload length check disable */
|
||||
tmp |= CMD_CFG_NO_LEN_CHK;
|
||||
if (cfg->time_stamp_enable)
|
||||
tmp |= CMD_CFG_EN_TIMESTAMP;
|
||||
iowrite32be(tmp, ®s->command_config);
|
||||
|
||||
/* Max Frame Length */
|
||||
@ -348,12 +288,6 @@ static int check_init_parameters(struct fman_mac *tgec)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
|
||||
if (!tgec->cfg->no_length_check_enable) {
|
||||
pr_warn("Length Check!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -421,18 +355,6 @@ static int get_exception_flag(enum fman_mac_exceptions exception)
|
||||
return bit_mask;
|
||||
}
|
||||
|
||||
static u32 get_mac_addr_hash_code(u64 eth_addr)
|
||||
{
|
||||
u32 crc;
|
||||
|
||||
/* CRC calculation */
|
||||
GET_MAC_ADDR_CRC(eth_addr, crc);
|
||||
|
||||
crc = bitrev32(crc);
|
||||
|
||||
return crc;
|
||||
}
|
||||
|
||||
static void tgec_err_exception(void *handle)
|
||||
{
|
||||
struct fman_mac *tgec = (struct fman_mac *)handle;
|
||||
@ -613,7 +535,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
|
||||
{
|
||||
struct tgec_regs __iomem *regs = tgec->regs;
|
||||
struct eth_hash_entry *hash_entry;
|
||||
u32 crc, hash;
|
||||
u32 crc = 0xFFFFFFFF, hash;
|
||||
u64 addr;
|
||||
|
||||
if (!is_init_done(tgec->cfg))
|
||||
@ -627,8 +549,8 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
/* CRC calculation */
|
||||
crc = get_mac_addr_hash_code(addr);
|
||||
|
||||
crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
|
||||
crc = bitrev32(crc);
|
||||
/* Take 9 MSB bits */
|
||||
hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
|
||||
|
||||
@ -651,7 +573,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
|
||||
struct tgec_regs __iomem *regs = tgec->regs;
|
||||
struct eth_hash_entry *hash_entry = NULL;
|
||||
struct list_head *pos;
|
||||
u32 crc, hash;
|
||||
u32 crc = 0xFFFFFFFF, hash;
|
||||
u64 addr;
|
||||
|
||||
if (!is_init_done(tgec->cfg))
|
||||
@ -660,7 +582,8 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
|
||||
addr = ((*(u64 *)eth_addr) >> 16);
|
||||
|
||||
/* CRC calculation */
|
||||
crc = get_mac_addr_hash_code(addr);
|
||||
crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
|
||||
crc = bitrev32(crc);
|
||||
/* Take 9 MSB bits */
|
||||
hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
|
||||
|
||||
@ -803,9 +726,6 @@ int tgec_free(struct fman_mac *tgec)
|
||||
{
|
||||
free_init_resources(tgec);
|
||||
|
||||
if (tgec->cfg)
|
||||
tgec->cfg = NULL;
|
||||
|
||||
kfree(tgec->cfg);
|
||||
kfree(tgec);
|
||||
|
||||
@ -836,11 +756,25 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
|
||||
|
||||
set_dflts(cfg);
|
||||
|
||||
tgec->regs = (struct tgec_regs __iomem *)(base_addr);
|
||||
tgec->regs = base_addr;
|
||||
tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
|
||||
tgec->max_speed = params->max_speed;
|
||||
tgec->mac_id = params->mac_id;
|
||||
tgec->exceptions = TGEC_DEFAULT_EXCEPTIONS;
|
||||
tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
|
||||
TGEC_IMASK_REM_FAULT |
|
||||
TGEC_IMASK_LOC_FAULT |
|
||||
TGEC_IMASK_TX_ECC_ER |
|
||||
TGEC_IMASK_TX_FIFO_UNFL |
|
||||
TGEC_IMASK_TX_FIFO_OVFL |
|
||||
TGEC_IMASK_TX_ER |
|
||||
TGEC_IMASK_RX_FIFO_OVFL |
|
||||
TGEC_IMASK_RX_ECC_ER |
|
||||
TGEC_IMASK_RX_JAB_FRM |
|
||||
TGEC_IMASK_RX_OVRSZ_FRM |
|
||||
TGEC_IMASK_RX_RUNT_FRM |
|
||||
TGEC_IMASK_RX_FRAG_FRM |
|
||||
TGEC_IMASK_RX_CRC_ER |
|
||||
TGEC_IMASK_RX_ALIGN_ER);
|
||||
tgec->exception_cb = params->exception_cb;
|
||||
tgec->event_cb = params->event_cb;
|
||||
tgec->dev_id = params->dev_id;
|
||||
|
@ -37,6 +37,7 @@
|
||||
|
||||
#ifdef __rtems__
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <net/if_dl.h>
|
||||
#include <bsp/fdt.h>
|
||||
#include "../../../../../../rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h"
|
||||
@ -52,6 +53,7 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/phy_fixed.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/libfdt_env.h>
|
||||
|
||||
#include "mac.h"
|
||||
#include "fman_mac.h"
|
||||
@ -59,13 +61,8 @@
|
||||
#include "fman_tgec.h"
|
||||
#include "fman_memac.h"
|
||||
|
||||
#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
|
||||
|
||||
MODULE_DESCRIPTION(MAC_DESCRIPTION);
|
||||
MODULE_DESCRIPTION("FSL FMan MAC API based driver");
|
||||
|
||||
struct mac_priv_s {
|
||||
struct device *dev;
|
||||
@ -74,6 +71,11 @@ struct mac_priv_s {
|
||||
phy_interface_t phy_if;
|
||||
struct fman *fman;
|
||||
struct device_node *phy_node;
|
||||
struct device_node *internal_phy_node;
|
||||
#ifdef __rtems__
|
||||
struct device_node phy_node_storage;
|
||||
struct device_node internal_phy_node_storage;
|
||||
#endif /* __rtems__ */
|
||||
/* List of multicast addresses */
|
||||
struct list_head mc_addr_list;
|
||||
struct platform_device *eth_dev;
|
||||
@ -90,15 +92,15 @@ struct mac_address {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static void mac_exception(void *_mac_dev, enum fman_mac_exceptions ex)
|
||||
static void mac_exception(void *handle, enum fman_mac_exceptions ex)
|
||||
{
|
||||
struct mac_device *mac_dev;
|
||||
struct mac_priv_s *priv;
|
||||
|
||||
mac_dev = (struct mac_device *)_mac_dev;
|
||||
mac_dev = handle;
|
||||
priv = mac_dev->priv;
|
||||
|
||||
if (FM_MAC_EX_10G_RX_FIFO_OVFL == ex) {
|
||||
if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
|
||||
/* don't flag RX FIFO after the first */
|
||||
mac_dev->set_exception(mac_dev->fman_mac,
|
||||
FM_MAC_EX_10G_RX_FIFO_OVFL, false);
|
||||
@ -118,7 +120,8 @@ static void set_fman_mac_params(struct mac_device *mac_dev,
|
||||
|
||||
#ifndef __rtems__
|
||||
params->base_addr = (typeof(params->base_addr))
|
||||
devm_ioremap(priv->dev, mac_dev->res->start, 0x2000);
|
||||
devm_ioremap(priv->dev, mac_dev->res->start,
|
||||
resource_size(mac_dev->res));
|
||||
#else /* __rtems__ */
|
||||
params->base_addr = priv->vaddr;
|
||||
#endif /* __rtems__ */
|
||||
@ -131,6 +134,7 @@ static void set_fman_mac_params(struct mac_device *mac_dev,
|
||||
params->exception_cb = mac_exception;
|
||||
params->event_cb = mac_exception;
|
||||
params->dev_id = mac_dev;
|
||||
params->internal_phy_node = priv->internal_phy_node;
|
||||
}
|
||||
|
||||
static int tgec_initialization(struct mac_device *mac_dev)
|
||||
@ -362,9 +366,19 @@ static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
|
||||
/**
|
||||
* fman_set_mac_active_pause
|
||||
* @mac_dev: A pointer to the MAC device
|
||||
* @rx: Pause frame setting for RX
|
||||
* @tx: Pause frame setting for TX
|
||||
*
|
||||
* Set the MAC RX/TX PAUSE frames settings
|
||||
*
|
||||
* Avoid redundant calls to FMD, if the MAC driver already contains the desired
|
||||
* active PAUSE settings. Otherwise, the new active settings should be reflected
|
||||
* in FMan.
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
|
||||
{
|
||||
@ -392,8 +406,16 @@ int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
|
||||
EXPORT_SYMBOL(fman_set_mac_active_pause);
|
||||
|
||||
#ifndef __rtems__
|
||||
/* Determine the MAC RX/TX PAUSE frames settings based on PHY
|
||||
/**
|
||||
* fman_get_pause_cfg
|
||||
* @mac_dev: A pointer to the MAC device
|
||||
* @rx: Return value for RX setting
|
||||
* @tx: Return value for TX setting
|
||||
*
|
||||
* Determine the MAC RX/TX PAUSE frames settings based on PHY
|
||||
* autonegotiation or values set by eththool.
|
||||
*
|
||||
* Return: Pointer to FMan device.
|
||||
*/
|
||||
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
|
||||
bool *tx_pause)
|
||||
@ -495,7 +517,7 @@ static void adjust_link_memac(struct net_device *net_dev)
|
||||
/* Initializes driver's PHY state, and attaches to the PHY.
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
static int init_phy(struct net_device *net_dev,
|
||||
static struct phy_device *init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev,
|
||||
void (*adj_lnk)(struct net_device *))
|
||||
{
|
||||
@ -506,7 +528,7 @@ static int init_phy(struct net_device *net_dev,
|
||||
priv->phy_if);
|
||||
if (!phy_dev) {
|
||||
netdev_err(net_dev, "Could not connect to PHY\n");
|
||||
return -ENODEV;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Remove any features not supported by the controller */
|
||||
@ -519,22 +541,22 @@ static int init_phy(struct net_device *net_dev,
|
||||
|
||||
mac_dev->phy_dev = phy_dev;
|
||||
|
||||
return 0;
|
||||
return phy_dev;
|
||||
}
|
||||
|
||||
static int dtsec_init_phy(struct net_device *net_dev,
|
||||
static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev)
|
||||
{
|
||||
return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
|
||||
}
|
||||
|
||||
static int tgec_init_phy(struct net_device *net_dev,
|
||||
static struct phy_device *tgec_init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev)
|
||||
{
|
||||
return init_phy(net_dev, mac_dev, adjust_link_void);
|
||||
}
|
||||
|
||||
static int memac_init_phy(struct net_device *net_dev,
|
||||
static struct phy_device *memac_init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev)
|
||||
{
|
||||
return init_phy(net_dev, mac_dev, &adjust_link_memac);
|
||||
@ -639,31 +661,6 @@ static void setup_memac(struct mac_device *mac_dev)
|
||||
static DEFINE_MUTEX(eth_lock);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static const char phy_str[][11] = {
|
||||
[PHY_INTERFACE_MODE_MII] = "mii",
|
||||
[PHY_INTERFACE_MODE_GMII] = "gmii",
|
||||
[PHY_INTERFACE_MODE_SGMII] = "sgmii",
|
||||
[PHY_INTERFACE_MODE_TBI] = "tbi",
|
||||
[PHY_INTERFACE_MODE_RMII] = "rmii",
|
||||
[PHY_INTERFACE_MODE_RGMII] = "rgmii",
|
||||
[PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
|
||||
[PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
|
||||
[PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
|
||||
[PHY_INTERFACE_MODE_RTBI] = "rtbi",
|
||||
[PHY_INTERFACE_MODE_XGMII] = "xgmii"
|
||||
};
|
||||
|
||||
static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(phy_str); i++)
|
||||
if (strcmp(str, phy_str[i]) == 0)
|
||||
return (phy_interface_t)i;
|
||||
|
||||
return PHY_INTERFACE_MODE_MII;
|
||||
}
|
||||
|
||||
static const u16 phy2speed[] = {
|
||||
[PHY_INTERFACE_MODE_MII] = SPEED_100,
|
||||
[PHY_INTERFACE_MODE_GMII] = SPEED_1000,
|
||||
@ -675,6 +672,7 @@ static const u16 phy2speed[] = {
|
||||
[PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
|
||||
[PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
|
||||
[PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
|
||||
[PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
|
||||
[PHY_INTERFACE_MODE_XGMII] = SPEED_10000
|
||||
};
|
||||
|
||||
@ -754,13 +752,9 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
#ifdef __rtems__
|
||||
struct fman_mac_softc *sc = device_get_softc(_dev);
|
||||
#endif /* __rtems__ */
|
||||
int err, i, lenp;
|
||||
int err, i, nph;
|
||||
struct device *dev;
|
||||
#ifndef __rtems__
|
||||
struct device_node *mac_node, *dev_node, *tbi_node;
|
||||
#else /* __rtems__ */
|
||||
struct device_node *mac_node;
|
||||
#endif /* __rtems__ */
|
||||
struct device_node *mac_node, *dev_node;
|
||||
struct mac_device *mac_dev;
|
||||
#ifndef __rtems__
|
||||
struct platform_device *of_dev;
|
||||
@ -768,10 +762,9 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
struct resource res;
|
||||
struct mac_priv_s *priv;
|
||||
const u8 *mac_addr;
|
||||
const char *char_prop;
|
||||
const u32 *u32_prop;
|
||||
u32 val;
|
||||
u8 fman_id;
|
||||
const phandle *phandle_prop;
|
||||
int phy_if;
|
||||
|
||||
dev = &_of_dev->dev;
|
||||
mac_node = dev->of_node;
|
||||
@ -798,10 +791,26 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
|
||||
if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
|
||||
setup_dtsec(mac_dev);
|
||||
#ifndef __rtems__
|
||||
priv->internal_phy_node = of_parse_phandle(mac_node,
|
||||
"tbi-handle", 0);
|
||||
#else /* __rtems__ */
|
||||
priv->internal_phy_node = of_parse_phandle(
|
||||
&priv->internal_phy_node_storage, mac_node, "tbi-handle",
|
||||
0);
|
||||
#endif /* __rtems__ */
|
||||
} else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
|
||||
setup_tgec(mac_dev);
|
||||
} else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
|
||||
setup_memac(mac_dev);
|
||||
#ifndef __rtems__
|
||||
priv->internal_phy_node = of_parse_phandle(mac_node,
|
||||
"pcsphy-handle", 0);
|
||||
#else /* __rtems__ */
|
||||
priv->internal_phy_node = of_parse_phandle(
|
||||
&priv->internal_phy_node_storage, mac_node, "pcsphy-handle",
|
||||
0);
|
||||
#endif /* __rtems__ */
|
||||
} else {
|
||||
#ifndef __rtems__
|
||||
dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
|
||||
@ -835,15 +844,15 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
}
|
||||
|
||||
/* Get the FMan cell-index */
|
||||
u32_prop = of_get_property(dev_node, "cell-index", &lenp);
|
||||
if (!u32_prop) {
|
||||
dev_err(dev, "of_get_property(%s, cell-index) failed\n",
|
||||
err = of_property_read_u32(dev_node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to read cell-index for %s\n",
|
||||
dev_node->full_name);
|
||||
err = -EINVAL;
|
||||
goto _return_of_node_put;
|
||||
}
|
||||
WARN_ON(lenp != sizeof(u32));
|
||||
fman_id = (u8)*u32_prop + 1; /* cell-index 0 => FMan id 1 */
|
||||
/* cell-index 0 => FMan id 1 */
|
||||
fman_id = (u8)(val + 1);
|
||||
|
||||
priv->fman = fman_bind(&of_dev->dev);
|
||||
if (!priv->fman) {
|
||||
@ -888,26 +897,11 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
priv->vaddr = devm_ioremap(dev, res.start, res.end + 1 - res.start);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
#ifndef __rtems__
|
||||
#define TBIPA_OFFSET 0x1c
|
||||
#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
|
||||
tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
|
||||
if (tbi_node) {
|
||||
u32 tbiaddr = TBIPA_DEFAULT_ADDR;
|
||||
|
||||
u32_prop = of_get_property(tbi_node, "reg", NULL);
|
||||
if (u32_prop)
|
||||
tbiaddr = *u32_prop;
|
||||
out_be32(priv->vaddr + TBIPA_OFFSET, tbiaddr);
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
if (!of_device_is_available(mac_node)) {
|
||||
#ifndef __rtems__
|
||||
devm_iounmap(dev, priv->vaddr);
|
||||
__devm_release_region(dev, fman_get_mem_region(priv->fman),
|
||||
res.start, res.end + 1 - res.start);
|
||||
fman_unbind(priv->fman);
|
||||
devm_kfree(dev, mac_dev);
|
||||
#endif /* __rtems__ */
|
||||
dev_set_drvdata(dev, NULL);
|
||||
@ -915,15 +909,14 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
}
|
||||
|
||||
/* Get the cell-index */
|
||||
u32_prop = of_get_property(mac_node, "cell-index", &lenp);
|
||||
if (!u32_prop) {
|
||||
dev_err(dev, "of_get_property(%s, cell-index) failed\n",
|
||||
err = of_property_read_u32(mac_node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to read cell-index for %s\n",
|
||||
mac_node->full_name);
|
||||
err = -EINVAL;
|
||||
goto _return_dev_set_drvdata;
|
||||
}
|
||||
WARN_ON(lenp != sizeof(u32));
|
||||
priv->cell_index = (u8)*u32_prop;
|
||||
priv->cell_index = (u8)val;
|
||||
|
||||
/* Get the MAC address */
|
||||
mac_addr = of_get_mac_address(mac_node);
|
||||
@ -936,25 +929,43 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
|
||||
|
||||
/* Get the port handles */
|
||||
phandle_prop = of_get_property(mac_node, "fsl,fman-ports", &lenp);
|
||||
if (!phandle_prop) {
|
||||
dev_err(dev, "of_get_property(%s, fsl,fman-ports) failed\n",
|
||||
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
|
||||
if (unlikely(nph < 0)) {
|
||||
dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n",
|
||||
mac_node->full_name);
|
||||
err = nph;
|
||||
goto _return_dev_set_drvdata;
|
||||
}
|
||||
|
||||
if (nph != ARRAY_SIZE(mac_dev->port)) {
|
||||
dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n",
|
||||
mac_node->full_name);
|
||||
err = -EINVAL;
|
||||
goto _return_dev_set_drvdata;
|
||||
}
|
||||
BUG_ON(lenp != sizeof(phandle) * ARRAY_SIZE(mac_dev->port));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
|
||||
#ifndef __rtems__
|
||||
#ifdef __rtems__
|
||||
struct fman_ivars *ivars;
|
||||
device_t child;
|
||||
|
||||
ivars = &mac_dev->ivars[i];
|
||||
#endif /* __rtems__ */
|
||||
/* Find the port node */
|
||||
dev_node = of_find_node_by_phandle(phandle_prop[i]);
|
||||
#ifndef __rtems__
|
||||
dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
|
||||
#else /* __rtems__ */
|
||||
dev_node = of_parse_phandle(&ivars->dn, mac_node,
|
||||
"fsl,fman-ports", i);
|
||||
#endif /* __rtems__ */
|
||||
if (!dev_node) {
|
||||
dev_err(dev, "of_find_node_by_phandle() failed\n");
|
||||
dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n",
|
||||
mac_node->full_name);
|
||||
err = -EINVAL;
|
||||
goto _return_of_node_put;
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
of_dev = of_find_device_by_node(dev_node);
|
||||
if (!of_dev) {
|
||||
dev_err(dev, "of_find_device_by_node(%s) failed\n",
|
||||
@ -972,22 +983,7 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
}
|
||||
of_node_put(dev_node);
|
||||
#else /* __rtems__ */
|
||||
int node;
|
||||
struct fman_ivars *ivars;
|
||||
device_t child;
|
||||
|
||||
node = fdt_node_offset_by_phandle(bsp_fdt_get(), phandle_prop[i]);
|
||||
if (node < 0) {
|
||||
goto _return_of_node_put;
|
||||
}
|
||||
|
||||
ivars = kzalloc(sizeof(*ivars), GFP_KERNEL);
|
||||
if (ivars == NULL) {
|
||||
goto _return_of_node_put;
|
||||
}
|
||||
|
||||
ivars->dn.offset = node;
|
||||
ivars->of_dev.dev.of_node = &ivars->dn;
|
||||
ivars->of_dev.dev.of_node = dev_node;
|
||||
ivars->of_dev.dev.base = _of_dev->dev.base;
|
||||
ivars->fman = fman;
|
||||
|
||||
@ -1010,23 +1006,20 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
}
|
||||
|
||||
/* Get the PHY connection type */
|
||||
char_prop = (const char *)of_get_property(mac_node,
|
||||
"phy-connection-type", NULL);
|
||||
if (!char_prop) {
|
||||
phy_if = of_get_phy_mode(mac_node);
|
||||
if (phy_if < 0) {
|
||||
dev_warn(dev,
|
||||
"of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n",
|
||||
"of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
|
||||
mac_node->full_name);
|
||||
priv->phy_if = PHY_INTERFACE_MODE_MII;
|
||||
} else {
|
||||
priv->phy_if = str2phy(char_prop);
|
||||
phy_if = PHY_INTERFACE_MODE_SGMII;
|
||||
}
|
||||
priv->phy_if = phy_if;
|
||||
|
||||
priv->speed = phy2speed[priv->phy_if];
|
||||
priv->max_speed = priv->speed;
|
||||
#ifndef __rtems__
|
||||
mac_dev->if_support = DTSEC_SUPPORTED;
|
||||
/* We don't support half-duplex in SGMII mode */
|
||||
if (strstr(char_prop, "sgmii"))
|
||||
if (priv->phy_if == PHY_INTERFACE_MODE_SGMII)
|
||||
mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
|
||||
SUPPORTED_100baseT_Half);
|
||||
|
||||
@ -1035,9 +1028,8 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
mac_dev->if_support |= SUPPORTED_1000baseT_Full;
|
||||
|
||||
/* The 10G interface only supports one mode */
|
||||
if (strstr(char_prop, "xgmii"))
|
||||
if (priv->phy_if == PHY_INTERFACE_MODE_XGMII)
|
||||
mac_dev->if_support = SUPPORTED_10000baseT_Full;
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/* Get the rest of the PHY information */
|
||||
#ifndef __rtems__
|
||||
@ -1051,20 +1043,30 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
|
||||
priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
|
||||
GFP_KERNEL);
|
||||
if (!priv->fixed_link)
|
||||
if (!priv->fixed_link) {
|
||||
err = -ENOMEM;
|
||||
goto _return_dev_set_drvdata;
|
||||
}
|
||||
|
||||
priv->phy_node = of_node_get(mac_node);
|
||||
phy = of_phy_find_device(priv->phy_node);
|
||||
if (!phy)
|
||||
if (!phy) {
|
||||
err = -EINVAL;
|
||||
goto _return_dev_set_drvdata;
|
||||
}
|
||||
|
||||
priv->fixed_link->link = phy->link;
|
||||
priv->fixed_link->speed = phy->speed;
|
||||
priv->fixed_link->duplex = phy->duplex;
|
||||
priv->fixed_link->pause = phy->pause;
|
||||
priv->fixed_link->asym_pause = phy->asym_pause;
|
||||
|
||||
put_device(&phy->mdio.dev);
|
||||
}
|
||||
#else /* __rtems__ */
|
||||
priv->phy_node = of_parse_phandle(&priv->phy_node_storage, mac_node,
|
||||
"phy-handle", 0);
|
||||
mac_dev->phy_dev = of_phy_find_device(priv->phy_node);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
err = mac_dev->init(mac_dev);
|
||||
@ -1077,7 +1079,7 @@ static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman
|
||||
/* pause frame autonegotiation enabled */
|
||||
mac_dev->autoneg_pause = true;
|
||||
|
||||
/* by intializing the values to false, force FMD to enable PAUSE frames
|
||||
/* By intializing the values to false, force FMD to enable PAUSE frames
|
||||
* on RX and TX
|
||||
*/
|
||||
mac_dev->rx_pause_req = true;
|
||||
@ -1107,7 +1109,6 @@ _return_of_node_put:
|
||||
#endif /* __rtems__ */
|
||||
_return_dev_set_drvdata:
|
||||
kfree(priv->fixed_link);
|
||||
kfree(priv);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
_return:
|
||||
return err;
|
||||
|
@ -65,11 +65,10 @@ struct mac_device {
|
||||
#endif /* __rtems__ */
|
||||
u8 addr[ETH_ALEN];
|
||||
struct fman_port *port[2];
|
||||
#ifndef __rtems__
|
||||
u32 if_support;
|
||||
struct phy_device *phy_dev;
|
||||
#endif /* __rtems__ */
|
||||
#ifdef __rtems__
|
||||
struct fman_ivars ivars[2];
|
||||
struct platform_device pdev;
|
||||
struct dpaa_eth_data data;
|
||||
struct net_device net_dev;
|
||||
@ -83,7 +82,8 @@ struct mac_device {
|
||||
bool promisc;
|
||||
|
||||
#ifndef __rtems__
|
||||
int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
|
||||
struct phy_device *(*init_phy)(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev);
|
||||
#else /* __rtems__ */
|
||||
void (*adjust_link)(struct mac_device *mac_dev, u16 speed);
|
||||
#endif /* __rtems__ */
|
||||
@ -119,28 +119,8 @@ struct dpaa_eth_data {
|
||||
|
||||
extern const char *mac_driver_description;
|
||||
|
||||
/**
|
||||
* fman_set_mac_active_pause
|
||||
* @mac_dev: A pointer to the MAC device
|
||||
* @rx: Pause frame setting for RX
|
||||
* @tx: Pause frame setting for TX
|
||||
*
|
||||
* Set the MAC RX/TX PAUSE frames settings
|
||||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
|
||||
|
||||
/**
|
||||
* fman_get_pause_cfg
|
||||
* @mac_dev: A pointer to the MAC device
|
||||
* @rx: Return value for RX setting
|
||||
* @tx: Return value for TX setting
|
||||
*
|
||||
* Determine the MAC RX/TX PAUSE frames settings
|
||||
*
|
||||
* Return: Pointer to FMan device.
|
||||
*/
|
||||
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
|
||||
bool *tx_pause);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
392
linux/drivers/soc/fsl/qbman/bman_ccsr.c
Normal file
392
linux/drivers/soc/fsl/qbman/bman_ccsr.c
Normal file
@ -0,0 +1,392 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "bman_priv.h"
|
||||
|
||||
u16 bman_ip_rev;
|
||||
EXPORT_SYMBOL(bman_ip_rev);
|
||||
|
||||
/* Register offsets */
|
||||
#define REG_FBPR_FPC 0x0800
|
||||
#define REG_ECSR 0x0a00
|
||||
#define REG_ECIR 0x0a04
|
||||
#define REG_EADR 0x0a08
|
||||
#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
|
||||
#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
|
||||
#define REG_IP_REV_1 0x0bf8
|
||||
#define REG_IP_REV_2 0x0bfc
|
||||
#define REG_FBPR_BARE 0x0c00
|
||||
#define REG_FBPR_BAR 0x0c04
|
||||
#define REG_FBPR_AR 0x0c10
|
||||
#define REG_SRCIDR 0x0d04
|
||||
#define REG_LIODNR 0x0d08
|
||||
#define REG_ERR_ISR 0x0e00
|
||||
#define REG_ERR_IER 0x0e04
|
||||
#define REG_ERR_ISDR 0x0e08
|
||||
|
||||
/* Used by all error interrupt registers except 'inhibit' */
|
||||
#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
|
||||
#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
|
||||
#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
|
||||
#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
|
||||
#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
|
||||
|
||||
struct bman_hwerr_txt {
|
||||
u32 mask;
|
||||
const char *txt;
|
||||
};
|
||||
|
||||
static const struct bman_hwerr_txt bman_hwerr_txts[] = {
|
||||
{ BM_EIRQ_IVCI, "Invalid Command Verb" },
|
||||
{ BM_EIRQ_FLWI, "FBPR Low Watermark" },
|
||||
{ BM_EIRQ_MBEI, "Multi-bit ECC Error" },
|
||||
{ BM_EIRQ_SBEI, "Single-bit ECC Error" },
|
||||
{ BM_EIRQ_BSCN, "Pool State Change Notification" },
|
||||
};
|
||||
|
||||
/* Only trigger low water mark interrupt once only */
|
||||
#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
|
||||
|
||||
/* Pointer to the start of the BMan's CCSR space */
|
||||
static u32 __iomem *bm_ccsr_start;
|
||||
|
||||
static inline u32 bm_ccsr_in(u32 offset)
|
||||
{
|
||||
return ioread32be(bm_ccsr_start + offset/4);
|
||||
}
|
||||
static inline void bm_ccsr_out(u32 offset, u32 val)
|
||||
{
|
||||
iowrite32be(val, bm_ccsr_start + offset/4);
|
||||
}
|
||||
|
||||
static void bm_get_version(u16 *id, u8 *major, u8 *minor)
|
||||
{
|
||||
u32 v = bm_ccsr_in(REG_IP_REV_1);
|
||||
*id = (v >> 16);
|
||||
*major = (v >> 8) & 0xff;
|
||||
*minor = v & 0xff;
|
||||
}
|
||||
|
||||
/* signal transactions for FBPRs with higher priority */
|
||||
#define FBPR_AR_RPRIO_HI BIT(30)
|
||||
|
||||
static void bm_set_memory(u64 ba, u32 size)
|
||||
{
|
||||
u32 exp = ilog2(size);
|
||||
/* choke if size isn't within range */
|
||||
DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
|
||||
is_power_of_2(size));
|
||||
/* choke if '[e]ba' has lower-alignment than 'size' */
|
||||
DPAA_ASSERT(!(ba & (size - 1)));
|
||||
bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
|
||||
bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
|
||||
bm_ccsr_out(REG_FBPR_AR, exp - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Location and size of BMan private memory
|
||||
*
|
||||
* Ideally we would use the DMA API to turn rmem->base into a DMA address
|
||||
* (especially if iommu translations ever get involved). Unfortunately, the
|
||||
* DMA API currently does not allow mapping anything that is not backed with
|
||||
* a struct page.
|
||||
*/
|
||||
#ifndef __rtems__
|
||||
static dma_addr_t fbpr_a;
|
||||
static size_t fbpr_sz;
|
||||
|
||||
static int bman_fbpr(struct reserved_mem *rmem)
|
||||
{
|
||||
fbpr_a = rmem->base;
|
||||
fbpr_sz = rmem->size;
|
||||
|
||||
WARN_ON(!(fbpr_a && fbpr_sz));
|
||||
|
||||
return 0;
|
||||
}
|
||||
RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
|
||||
#else /* __rtems__ */
|
||||
static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fbpr, 16777216);
|
||||
#define fbpr_a ((uintptr_t)&fbpr[0])
|
||||
#define fbpr_sz sizeof(fbpr)
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static irqreturn_t bman_isr(int irq, void *ptr)
|
||||
{
|
||||
u32 isr_val, ier_val, ecsr_val, isr_mask, i;
|
||||
struct device *dev = ptr;
|
||||
|
||||
ier_val = bm_ccsr_in(REG_ERR_IER);
|
||||
isr_val = bm_ccsr_in(REG_ERR_ISR);
|
||||
ecsr_val = bm_ccsr_in(REG_ECSR);
|
||||
isr_mask = isr_val & ier_val;
|
||||
|
||||
if (!isr_mask)
|
||||
return IRQ_NONE;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
|
||||
if (bman_hwerr_txts[i].mask & isr_mask) {
|
||||
#ifndef __rtems__
|
||||
dev_err_ratelimited(dev, "ErrInt: %s\n",
|
||||
bman_hwerr_txts[i].txt);
|
||||
#endif /* __rtems__ */
|
||||
if (bman_hwerr_txts[i].mask & ecsr_val) {
|
||||
/* Re-arm error capture registers */
|
||||
bm_ccsr_out(REG_ECSR, ecsr_val);
|
||||
}
|
||||
if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
|
||||
dev_dbg(dev, "Disabling error 0x%x\n",
|
||||
bman_hwerr_txts[i].mask);
|
||||
ier_val &= ~bman_hwerr_txts[i].mask;
|
||||
bm_ccsr_out(REG_ERR_IER, ier_val);
|
||||
}
|
||||
}
|
||||
}
|
||||
bm_ccsr_out(REG_ERR_ISR, isr_val);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int fsl_bman_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret, err_irq;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
#ifdef __rtems__
|
||||
struct resource res_storage;
|
||||
#endif /* __rtems__ */
|
||||
struct resource *res;
|
||||
u16 id, bm_pool_cnt;
|
||||
u8 major, minor;
|
||||
|
||||
#ifndef __rtems__
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
#else /* __rtems__ */
|
||||
res = platform_get_resource(&res_storage, pdev, IORESOURCE_MEM, 0);
|
||||
#endif /* __rtems__ */
|
||||
if (!res) {
|
||||
dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
|
||||
node->full_name);
|
||||
return -ENXIO;
|
||||
}
|
||||
bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
|
||||
if (!bm_ccsr_start)
|
||||
return -ENXIO;
|
||||
|
||||
bm_get_version(&id, &major, &minor);
|
||||
if (major == 1 && minor == 0) {
|
||||
bman_ip_rev = BMAN_REV10;
|
||||
bm_pool_cnt = BM_POOL_MAX;
|
||||
} else if (major == 2 && minor == 0) {
|
||||
bman_ip_rev = BMAN_REV20;
|
||||
bm_pool_cnt = 8;
|
||||
} else if (major == 2 && minor == 1) {
|
||||
bman_ip_rev = BMAN_REV21;
|
||||
bm_pool_cnt = BM_POOL_MAX;
|
||||
} else {
|
||||
dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
|
||||
id, major, minor);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
bm_set_memory(fbpr_a, fbpr_sz);
|
||||
|
||||
err_irq = platform_get_irq(pdev, 0);
|
||||
if (err_irq <= 0) {
|
||||
dev_info(dev, "Can't get %s IRQ\n", node->full_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
|
||||
dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
|
||||
ret, node->full_name);
|
||||
return ret;
|
||||
}
|
||||
/* Disable Buffer Pool State Change */
|
||||
bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
|
||||
/*
|
||||
* Write-to-clear any stale bits, (eg. starvation being asserted prior
|
||||
* to resource allocation during driver init).
|
||||
*/
|
||||
bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
|
||||
/* Enable Error Interrupts */
|
||||
bm_ccsr_out(REG_ERR_IER, 0xffffffff);
|
||||
|
||||
bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
|
||||
if (IS_ERR(bm_bpalloc)) {
|
||||
ret = PTR_ERR(bm_bpalloc);
|
||||
dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* seed BMan resource pool */
|
||||
ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
|
||||
0, bm_pool_cnt - 1, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
#ifndef __rtems__
|
||||
static const struct of_device_id fsl_bman_ids[] = {
|
||||
{
|
||||
.compatible = "fsl,bman",
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver fsl_bman_driver = {
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.of_match_table = fsl_bman_ids,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = fsl_bman_probe,
|
||||
};
|
||||
|
||||
builtin_platform_driver(fsl_bman_driver);
|
||||
#else /* __rtems__ */
|
||||
#include <sys/types.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <rtems.h>
|
||||
#include <bsp/fdt.h>
|
||||
#include <bsp/qoriq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
static struct bm_portal_config bman_configs[NR_CPUS];
|
||||
|
||||
u16 bman_pool_max;
|
||||
|
||||
SYSINIT_REFERENCE(irqs);
|
||||
|
||||
static void
|
||||
bman_sysinit(void)
|
||||
{
|
||||
const char *fdt = bsp_fdt_get();
|
||||
struct device_node dn;
|
||||
struct platform_device ofdev = {
|
||||
.dev = {
|
||||
.of_node = &dn,
|
||||
.base = (uintptr_t)&qoriq
|
||||
}
|
||||
};
|
||||
const char *name;
|
||||
int cpu_count = (int)rtems_get_processor_count();
|
||||
int cpu;
|
||||
int ret;
|
||||
int node;
|
||||
int parent;
|
||||
|
||||
qoriq_reset_qman_and_bman();
|
||||
qoriq_clear_ce_portal(&qoriq_bman_portal[0][0],
|
||||
sizeof(qoriq_bman_portal[0]));
|
||||
qoriq_clear_ci_portal(&qoriq_bman_portal[1][0],
|
||||
sizeof(qoriq_bman_portal[1]));
|
||||
|
||||
memset(&dn, 0, sizeof(dn));
|
||||
|
||||
name = "fsl,bman";
|
||||
node = fdt_node_offset_by_compatible(fdt, 0, name);
|
||||
if (node < 0)
|
||||
panic("bman: no bman in FDT");
|
||||
|
||||
dn.full_name = name;
|
||||
dn.offset = node;
|
||||
ret = fsl_bman_probe(&ofdev);
|
||||
if (ret != 0)
|
||||
panic("bman: probe failed");
|
||||
|
||||
name = "fsl,bman-portal";
|
||||
node = fdt_node_offset_by_compatible(fdt, 0, name);
|
||||
if (node < 0)
|
||||
panic("bman: no portals in FDT");
|
||||
parent = fdt_parent_offset(fdt, node);
|
||||
if (parent < 0)
|
||||
panic("bman: no parent of portals in FDT");
|
||||
node = fdt_first_subnode(fdt, parent);
|
||||
|
||||
dn.full_name = name;
|
||||
dn.offset = node;
|
||||
|
||||
for (cpu = 0; cpu < cpu_count; ++cpu) {
|
||||
struct bm_portal_config *pcfg = &bman_configs[cpu];
|
||||
struct bman_portal *portal;
|
||||
struct resource res;
|
||||
|
||||
if (node < 0)
|
||||
panic("bman: missing portal in FDT");
|
||||
|
||||
ret = of_address_to_resource(&dn, 0, &res);
|
||||
if (ret != 0)
|
||||
panic("bman: no portal CE address");
|
||||
pcfg->addr_virt[0] = (__iomem void *)
|
||||
((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
|
||||
BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >=
|
||||
(uintptr_t)&qoriq_bman_portal[0][0]);
|
||||
BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] <
|
||||
(uintptr_t)&qoriq_bman_portal[1][0]);
|
||||
|
||||
ret = of_address_to_resource(&dn, 1, &res);
|
||||
if (ret != 0)
|
||||
panic("bman: no portal CI address");
|
||||
pcfg->addr_virt[1] = (__iomem void *)
|
||||
((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
|
||||
BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >=
|
||||
(uintptr_t)&qoriq_bman_portal[1][0]);
|
||||
BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
|
||||
(uintptr_t)&qoriq_bman_portal[2][0]);
|
||||
|
||||
pcfg->irq = of_irq_to_resource(&dn, 0, NULL);
|
||||
if (pcfg->irq == NO_IRQ)
|
||||
panic("bman: no portal interrupt");
|
||||
|
||||
pcfg->cpu = cpu;
|
||||
|
||||
portal = bman_create_affine_portal(pcfg);
|
||||
if (portal == NULL)
|
||||
panic("bman: cannot create portal");
|
||||
|
||||
bman_p_irqsource_add(portal, BM_PIRQ_RCRI);
|
||||
|
||||
node = fdt_next_subnode(fdt, node);
|
||||
dn.offset = node;
|
||||
}
|
||||
}
|
||||
SYSINIT(bman, SI_SUB_CPU, SI_ORDER_FIRST, bman_sysinit, NULL);
|
||||
#endif /* __rtems__ */
|
@ -1,399 +0,0 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "bman_priv.h"
|
||||
|
||||
/*
|
||||
* Global variables of the max portal/pool number this BMan version supported
|
||||
*/
|
||||
u16 bman_ip_rev;
|
||||
EXPORT_SYMBOL(bman_ip_rev);
|
||||
|
||||
u16 bman_pool_max;
|
||||
EXPORT_SYMBOL(bman_pool_max);
|
||||
|
||||
/* After initialising cpus that own shared portal configs, we cache the
|
||||
* resulting portals (ie. not just the configs) in this array. Then we
|
||||
* initialise slave cpus that don't have their own portals, redirecting them to
|
||||
* portals from this cache in a round-robin assignment. */
|
||||
static struct bman_portal *shared_portals[NR_CPUS] __initdata;
|
||||
static int num_shared_portals __initdata;
|
||||
static int shared_portals_idx __initdata;
|
||||
|
||||
static LIST_HEAD(unused_pcfgs);
|
||||
static void *affine_bportals[NR_CPUS];
|
||||
|
||||
#ifndef __rtems__
|
||||
static const int flags[] = {0, _PAGE_GUARDED | _PAGE_NO_CACHE};
|
||||
#else /* __rtems__ */
|
||||
static const int flags[] = {0, 0};
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static struct bm_portal_config * __init get_pcfg(struct list_head *list)
|
||||
{
|
||||
struct bm_portal_config *pcfg;
|
||||
|
||||
if (list_empty(list))
|
||||
return NULL;
|
||||
pcfg = list_entry(list->prev, struct bm_portal_config, list);
|
||||
list_del(&pcfg->list);
|
||||
|
||||
return pcfg;
|
||||
}
|
||||
|
||||
static struct bman_portal * __init init_pcfg(struct bm_portal_config *pcfg)
|
||||
{
|
||||
struct bman_portal *p = bman_create_affine_portal(pcfg);
|
||||
|
||||
if (p) {
|
||||
#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
|
||||
bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
|
||||
#endif
|
||||
pr_info("Portal %sinitialised, cpu %d\n",
|
||||
pcfg->public_cfg.is_shared ? "(shared) " : "",
|
||||
pcfg->public_cfg.cpu);
|
||||
affine_bportals[pcfg->public_cfg.cpu] = p;
|
||||
} else
|
||||
pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static void __init init_slave(int cpu)
|
||||
{
|
||||
struct bman_portal *p;
|
||||
|
||||
p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
|
||||
if (!p)
|
||||
pr_err("Slave portal failure on cpu %d\n", cpu);
|
||||
else
|
||||
pr_info("Portal %s initialised, cpu %d\n", "(slave) ", cpu);
|
||||
if (shared_portals_idx >= num_shared_portals)
|
||||
shared_portals_idx = 0;
|
||||
affine_bportals[cpu] = p;
|
||||
}
|
||||
|
||||
/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
|
||||
* parsing is in dpaa_sys.h. The syntax is a comma-separated list of indexes
|
||||
* and/or ranges of indexes, with each being optionally prefixed by "s" to
|
||||
* explicitly mark it or them for sharing.
|
||||
* Eg;
|
||||
* bportals=s0,1-3,s4
|
||||
* means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
|
||||
* portals, and any remaining cpus share the portals that are assigned to cpus 0
|
||||
* or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
|
||||
* cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
|
||||
* 0's portal.) */
|
||||
static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
|
||||
static struct cpumask want_shared __initdata; /* cpus requested with "s" */
|
||||
|
||||
static int __init parse_bportals(char *str)
|
||||
{
|
||||
return parse_portals_bootarg(str, &want_shared, &want_unshared,
|
||||
"bportals");
|
||||
}
|
||||
__setup("bportals=", parse_bportals);
|
||||
|
||||
static void __cold bman_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu];
|
||||
const struct bm_portal_config *pcfg;
|
||||
|
||||
if (p) {
|
||||
pcfg = bman_get_bm_portal_config(p);
|
||||
if (pcfg)
|
||||
irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void __cold bman_online_cpu(unsigned int cpu)
|
||||
{
|
||||
struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu];
|
||||
const struct bm_portal_config *pcfg;
|
||||
|
||||
if (p) {
|
||||
pcfg = bman_get_bm_portal_config(p);
|
||||
if (pcfg)
|
||||
irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
|
||||
}
|
||||
}
|
||||
|
||||
static int __cold bman_hotplug_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
bman_online_cpu(cpu);
|
||||
break;
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
bman_offline_cpu(cpu);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block bman_hotplug_cpu_notifier = {
|
||||
.notifier_call = bman_hotplug_cpu_callback,
|
||||
};
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int __cold bman_portal_probe(struct platform_device *of_dev)
|
||||
{
|
||||
struct device *dev = &of_dev->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct bm_portal_config *pcfg;
|
||||
int i, irq, ret;
|
||||
|
||||
if (!of_device_is_available(node))
|
||||
return -ENODEV;
|
||||
|
||||
if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
|
||||
of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
|
||||
bman_ip_rev = BMAN_REV10;
|
||||
bman_pool_max = 64;
|
||||
} else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
|
||||
of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
|
||||
bman_ip_rev = BMAN_REV20;
|
||||
bman_pool_max = 8;
|
||||
} else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0") ||
|
||||
of_device_is_compatible(node, "fsl,bman-portal-2.1.1") ||
|
||||
of_device_is_compatible(node, "fsl,bman-portal-2.1.2") ||
|
||||
of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
|
||||
bman_ip_rev = BMAN_REV21;
|
||||
bman_pool_max = 64;
|
||||
}
|
||||
|
||||
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
|
||||
if (!pcfg) {
|
||||
dev_err(dev, "Can't allocate portal config\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = DPA_PORTAL_CE; i <= DPA_PORTAL_CI; i++) {
|
||||
ret = of_address_to_resource(node, i, pcfg->addr_phys + i);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Can't get %s property 'reg::%d'\n",
|
||||
node->full_name, i);
|
||||
return ret;
|
||||
}
|
||||
ret = devm_request_resource(dev, &iomem_resource,
|
||||
pcfg->addr_phys + i);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
pcfg->addr_virt[i] = devm_ioremap_prot(dev,
|
||||
pcfg->addr_phys[i].start,
|
||||
resource_size(pcfg->addr_phys + i),
|
||||
flags[i]);
|
||||
if (!pcfg->addr_virt[i])
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
pcfg->public_cfg.cpu = -1;
|
||||
|
||||
irq = irq_of_parse_and_map(node, 0);
|
||||
if (irq == NO_IRQ) {
|
||||
dev_err(dev, "Can't get %s property 'interrupts'\n",
|
||||
node->full_name);
|
||||
return -ENXIO;
|
||||
}
|
||||
pcfg->public_cfg.irq = irq;
|
||||
|
||||
bman_depletion_fill(&pcfg->public_cfg.mask);
|
||||
|
||||
list_add_tail(&pcfg->list, &unused_pcfgs);
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
static int __cold bman_portal_remove(struct platform_device *of_dev)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
static const struct of_device_id bman_portal_ids[] = {
|
||||
{
|
||||
.compatible = "fsl,bman-portal",
|
||||
},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, bman_portal_ids);
|
||||
|
||||
static struct platform_driver bman_portal_driver = {
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.of_match_table = bman_portal_ids,
|
||||
},
|
||||
.probe = bman_portal_probe,
|
||||
.remove = bman_portal_remove,
|
||||
};
|
||||
|
||||
static int __init bman_portal_driver_register(struct platform_driver *drv)
|
||||
{
|
||||
int _errno;
|
||||
struct cpumask slave_cpus;
|
||||
struct cpumask unshared_cpus = *cpu_none_mask;
|
||||
struct cpumask shared_cpus = *cpu_none_mask;
|
||||
LIST_HEAD(unshared_pcfgs);
|
||||
LIST_HEAD(shared_pcfgs);
|
||||
struct bm_portal_config *pcfg;
|
||||
struct bman_portal *p;
|
||||
int cpu;
|
||||
struct cpumask offline_cpus;
|
||||
|
||||
_errno = platform_driver_register(drv);
|
||||
if (_errno < 0)
|
||||
return _errno;
|
||||
|
||||
/* Initialise the BMan driver. The meat of this function deals with portals. The
|
||||
* following describes the flow of portal-handling, the code "steps" refer to
|
||||
* this description;
|
||||
* 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
|
||||
* ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
|
||||
* bound).
|
||||
* 2. The "want_shared" and "want_unshared" lists (as filled by the
|
||||
* "bportals=[...]" bootarg) are processed, allocating portals and assigning
|
||||
* them to cpus, placing them in the relevant list and setting ::cpu as
|
||||
* appropriate. If no "bportals" bootarg was present, the defaut is to try to
|
||||
* assign portals to all online cpus at the time of driver initialisation.
|
||||
* Any failure to allocate portals (when parsing the "want" lists or when
|
||||
* using default behaviour) will be silently tolerated (the "fixup" logic in
|
||||
* step 3 will determine what happens in this case).
|
||||
* 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
|
||||
* sharing and sharing is required (because not all cpus have been assigned
|
||||
* portals), then one portal will marked for sharing. Conversely if no
|
||||
* sharing is required, any portals marked for sharing will not be shared. It
|
||||
* may be that sharing occurs when it wasn't expected, if portal allocation
|
||||
* failed to honour all the requested assignments (including the default
|
||||
* assignments if no bootarg is present).
|
||||
* 4. Unshared portals are initialised on their respective cpus.
|
||||
* 5. Shared portals are initialised on their respective cpus.
|
||||
* 6. Each remaining cpu is initialised to slave to one of the shared portals,
|
||||
* which are selected in a round-robin fashion.
|
||||
*/
|
||||
/* Step 2. */
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpumask_test_cpu(cpu, &want_shared)) {
|
||||
pcfg = get_pcfg(&unused_pcfgs);
|
||||
if (!pcfg)
|
||||
break;
|
||||
pcfg->public_cfg.cpu = cpu;
|
||||
list_add_tail(&pcfg->list, &shared_pcfgs);
|
||||
cpumask_set_cpu(cpu, &shared_cpus);
|
||||
}
|
||||
if (cpumask_test_cpu(cpu, &want_unshared)) {
|
||||
if (cpumask_test_cpu(cpu, &shared_cpus))
|
||||
continue;
|
||||
pcfg = get_pcfg(&unused_pcfgs);
|
||||
if (!pcfg)
|
||||
break;
|
||||
pcfg->public_cfg.cpu = cpu;
|
||||
list_add_tail(&pcfg->list, &unshared_pcfgs);
|
||||
cpumask_set_cpu(cpu, &unshared_cpus);
|
||||
}
|
||||
}
|
||||
if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
|
||||
/* Default, give an unshared portal to each online cpu */
|
||||
for_each_possible_cpu(cpu) {
|
||||
pcfg = get_pcfg(&unused_pcfgs);
|
||||
if (!pcfg)
|
||||
break;
|
||||
pcfg->public_cfg.cpu = cpu;
|
||||
list_add_tail(&pcfg->list, &unshared_pcfgs);
|
||||
cpumask_set_cpu(cpu, &unshared_cpus);
|
||||
}
|
||||
}
|
||||
/* Step 3. */
|
||||
cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
|
||||
cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
|
||||
if (cpumask_empty(&slave_cpus)) {
|
||||
/* No sharing required */
|
||||
if (!list_empty(&shared_pcfgs)) {
|
||||
/* Migrate "shared" to "unshared" */
|
||||
cpumask_or(&unshared_cpus, &unshared_cpus,
|
||||
&shared_cpus);
|
||||
cpumask_clear(&shared_cpus);
|
||||
list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
|
||||
INIT_LIST_HEAD(&shared_pcfgs);
|
||||
}
|
||||
} else {
|
||||
/* Sharing required */
|
||||
if (list_empty(&shared_pcfgs)) {
|
||||
/* Migrate one "unshared" to "shared" */
|
||||
pcfg = get_pcfg(&unshared_pcfgs);
|
||||
if (!pcfg) {
|
||||
pr_crit("No portals available!\n");
|
||||
return 0;
|
||||
}
|
||||
cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
|
||||
cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
|
||||
list_add_tail(&pcfg->list, &shared_pcfgs);
|
||||
}
|
||||
}
|
||||
/* Step 4. */
|
||||
list_for_each_entry(pcfg, &unshared_pcfgs, list) {
|
||||
pcfg->public_cfg.is_shared = 0;
|
||||
p = init_pcfg(pcfg);
|
||||
}
|
||||
/* Step 5. */
|
||||
list_for_each_entry(pcfg, &shared_pcfgs, list) {
|
||||
pcfg->public_cfg.is_shared = 1;
|
||||
p = init_pcfg(pcfg);
|
||||
if (p)
|
||||
shared_portals[num_shared_portals++] = p;
|
||||
}
|
||||
/* Step 6. */
|
||||
if (!cpumask_empty(&slave_cpus))
|
||||
for_each_cpu(cpu, &slave_cpus)
|
||||
init_slave(cpu);
|
||||
pr_info("Portals initialised\n");
|
||||
cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
|
||||
for_each_cpu(cpu, &offline_cpus)
|
||||
bman_offline_cpu(cpu);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
|
||||
#endif
|
||||
|
||||
bman_seed_bpid_range(0, bman_pool_max);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_driver(bman_portal_driver,
|
||||
bman_portal_driver_register, platform_driver_unregister);
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -34,103 +34,49 @@
|
||||
|
||||
#include <soc/fsl/bman.h>
|
||||
|
||||
/* used by CCSR and portal interrupt code */
|
||||
enum bm_isr_reg {
|
||||
bm_isr_status = 0,
|
||||
bm_isr_enable = 1,
|
||||
bm_isr_disable = 2,
|
||||
bm_isr_inhibit = 3
|
||||
};
|
||||
/* Portal processing (interrupt) sources */
|
||||
#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
|
||||
|
||||
#ifdef CONFIG_FSL_BMAN
|
||||
/* Set depletion thresholds associated with a buffer pool. Requires that the
|
||||
* operating system have access to BMan CCSR (ie. compiled in support and
|
||||
* run-time access courtesy of the device-tree). */
|
||||
int bm_pool_set(u32 bpid, const u32 *thresholds);
|
||||
#define BM_POOL_THRESH_SW_ENTER 0
|
||||
#define BM_POOL_THRESH_SW_EXIT 1
|
||||
#define BM_POOL_THRESH_HW_ENTER 2
|
||||
#define BM_POOL_THRESH_HW_EXIT 3
|
||||
|
||||
/* Read the free buffer count for a given buffer */
|
||||
u32 bm_pool_free_buffers(u32 bpid);
|
||||
|
||||
#endif /* CONFIG_FSL_BMAN */
|
||||
|
||||
#if defined(CONFIG_FSL_BMAN_PORTAL) || defined(CONFIG_FSL_BMAN_PORTAL_MODULE)
|
||||
/* Revision info (for errata and feature handling) */
|
||||
#define BMAN_REV10 0x0100
|
||||
#define BMAN_REV20 0x0200
|
||||
#define BMAN_REV21 0x0201
|
||||
extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
|
||||
|
||||
extern struct gen_pool *bm_bpalloc;
|
||||
|
||||
struct bm_portal_config {
|
||||
/* Corenet portal addresses;
|
||||
* [0]==cache-enabled, [1]==cache-inhibited. */
|
||||
__iomem void *addr_virt[2];
|
||||
/*
|
||||
* Corenet portal addresses;
|
||||
* [0]==cache-enabled, [1]==cache-inhibited.
|
||||
*/
|
||||
void __iomem *addr_virt[2];
|
||||
#ifndef __rtems__
|
||||
struct resource addr_phys[2];
|
||||
/* Allow these to be joined in lists */
|
||||
struct list_head list;
|
||||
#endif /* __rtems__ */
|
||||
struct device *dev;
|
||||
/* User-visible portal configuration settings */
|
||||
struct bman_portal_config public_cfg;
|
||||
/* portal is affined to this cpu */
|
||||
int cpu;
|
||||
/* portal interrupt line */
|
||||
int irq;
|
||||
};
|
||||
|
||||
/* Hooks from bman_driver.c in to bman_high.c */
|
||||
struct bman_portal *bman_create_portal(
|
||||
struct bman_portal *portal,
|
||||
const struct bm_portal_config *config);
|
||||
struct bman_portal *bman_create_affine_portal(
|
||||
const struct bm_portal_config *config);
|
||||
struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
|
||||
int cpu);
|
||||
void bman_destroy_portal(struct bman_portal *bm);
|
||||
|
||||
const struct bm_portal_config *bman_destroy_affine_portal(void);
|
||||
|
||||
/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
|
||||
* the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
|
||||
* might fail (if the buffer pool is depleted). So this value provides some
|
||||
* "stagger" in that the bman_acquire() function will only fail if lots of bufs
|
||||
* are requested at once or if h/w has been tested a couple of times without
|
||||
* luck. The _HIGH value: when bman_release() is called and the stockpile
|
||||
* fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
|
||||
* the release ring is full). So this value provides some "stagger" so that
|
||||
* ring-access is retried a couple of times prior to the API returning a
|
||||
* failure. The following *must* be true;
|
||||
* BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
|
||||
* (to avoid thrashing)
|
||||
* BMAN_STOCKPILE_SZ >= 16
|
||||
* (as the release logic expects to either send 8 buffers to hw prior to
|
||||
* adding the given buffers to the stockpile or add the buffers to the
|
||||
* stockpile before sending 8 to hw, as the API must be an all-or-nothing
|
||||
* success/fail.)
|
||||
/*
|
||||
* The below bman_p_***() variant might be called in a situation that the cpu
|
||||
* which the portal affine to is not online yet.
|
||||
* @bman_portal specifies which portal the API will use.
|
||||
*/
|
||||
#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */
|
||||
#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */
|
||||
#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
|
||||
int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
|
||||
|
||||
/*************************************************/
|
||||
/* BMan s/w corenet portal, low-level i/face */
|
||||
/*************************************************/
|
||||
|
||||
/* Used by all portal interrupt registers except 'inhibit'
|
||||
/*
|
||||
* Used by all portal interrupt registers except 'inhibit'
|
||||
* This mask contains all the "irqsource" bits visible to API users
|
||||
*/
|
||||
#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
|
||||
|
||||
/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
|
||||
* the disable register" rather than "disable the ability to write". */
|
||||
#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
|
||||
#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
|
||||
#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
|
||||
#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
|
||||
#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
|
||||
#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
|
||||
#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
|
||||
#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
|
||||
#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI
|
||||
|
||||
const struct bm_portal_config *
|
||||
bman_get_bm_portal_config(const struct bman_portal *portal);
|
||||
#endif /* CONFIG_FSL_BMAN_PORTAL* */
|
||||
|
@ -1,60 +0,0 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "bman_test.h"
|
||||
|
||||
MODULE_AUTHOR("Geoff Thorpe");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_DESCRIPTION("BMan testing");
|
||||
|
||||
static int test_init(void)
|
||||
{
|
||||
#ifdef CONFIG_FSL_BMAN_TEST_API
|
||||
int loop = 1;
|
||||
|
||||
while (loop--)
|
||||
bman_test_api();
|
||||
#endif
|
||||
#ifdef CONFIG_FSL_BMAN_TEST_THRESH
|
||||
bman_test_thresh();
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void test_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
module_init(test_init);
|
||||
module_exit(test_exit);
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -30,5 +30,6 @@
|
||||
|
||||
#include "bman_priv.h"
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
void bman_test_api(void);
|
||||
void bman_test_thresh(void);
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -34,33 +34,15 @@
|
||||
|
||||
#include "bman_test.h"
|
||||
|
||||
/*************/
|
||||
/* constants */
|
||||
/*************/
|
||||
|
||||
#define PORTAL_OPAQUE ((void *)0xf00dbeef)
|
||||
#define POOL_OPAQUE ((void *)0xdeadabba)
|
||||
#define NUM_BUFS 93
|
||||
#define LOOPS 3
|
||||
#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
|
||||
|
||||
/***************/
|
||||
/* global vars */
|
||||
/***************/
|
||||
|
||||
static struct bman_pool *pool;
|
||||
static int depleted;
|
||||
static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
|
||||
static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
|
||||
static int bufs_received;
|
||||
|
||||
/* Predeclare the callback so we can instantiate pool parameters */
|
||||
static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
|
||||
|
||||
/**********************/
|
||||
/* internal functions */
|
||||
/**********************/
|
||||
|
||||
static void bufs_init(void)
|
||||
{
|
||||
int i;
|
||||
@ -72,9 +54,10 @@ static void bufs_init(void)
|
||||
|
||||
static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
|
||||
{
|
||||
if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
|
||||
if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
|
||||
|
||||
/* On SoCs with BMan revison 2.0, BMan only respects the 40
|
||||
/*
|
||||
* On SoCs with BMan revison 2.0, BMan only respects the 40
|
||||
* LS-bits of buffer addresses, masking off the upper 8-bits on
|
||||
* release commands. The API provides for 48-bit addresses
|
||||
* because some SoCs support all 48-bits. When generating
|
||||
@ -84,11 +67,11 @@ static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
|
||||
* don't match), or we need to mask the upper 8-bits off when
|
||||
* comparing. We do the latter.
|
||||
*/
|
||||
if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
|
||||
< (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
|
||||
if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
|
||||
(bm_buffer_get64(b) & BMAN_TOKEN_MASK))
|
||||
return -1;
|
||||
if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
|
||||
> (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
|
||||
if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
|
||||
(bm_buffer_get64(b) & BMAN_TOKEN_MASK))
|
||||
return 1;
|
||||
} else {
|
||||
if (bm_buffer_get64(a) < bm_buffer_get64(b))
|
||||
@ -110,79 +93,63 @@ static void bufs_confirm(void)
|
||||
for (j = 0; j < NUM_BUFS; j++)
|
||||
if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
|
||||
matches++;
|
||||
BUG_ON(matches != 1);
|
||||
WARN_ON(matches != 1);
|
||||
}
|
||||
}
|
||||
|
||||
/********/
|
||||
/* test */
|
||||
/********/
|
||||
|
||||
static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
|
||||
void *pool_ctx, int __depleted)
|
||||
{
|
||||
BUG_ON(__pool != pool);
|
||||
BUG_ON(pool_ctx != POOL_OPAQUE);
|
||||
depleted = __depleted;
|
||||
}
|
||||
|
||||
void bman_test_api(void)
|
||||
{
|
||||
struct bman_pool_params pparams = {
|
||||
.flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
|
||||
.cb = depletion_cb,
|
||||
.cb_ctx = POOL_OPAQUE,
|
||||
};
|
||||
int i, loops = LOOPS;
|
||||
|
||||
bufs_init();
|
||||
|
||||
pr_info(" --- Starting high-level test ---\n");
|
||||
pr_info("%s(): Starting\n", __func__);
|
||||
|
||||
pool = bman_new_pool(&pparams);
|
||||
BUG_ON(!pool);
|
||||
pool = bman_new_pool();
|
||||
if (!pool) {
|
||||
pr_crit("bman_new_pool() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/*******************/
|
||||
/* Release buffers */
|
||||
/*******************/
|
||||
do_loop:
|
||||
i = 0;
|
||||
while (i < NUM_BUFS) {
|
||||
u32 flags = BMAN_RELEASE_FLAG_WAIT;
|
||||
int num = 8;
|
||||
|
||||
if ((i + num) > NUM_BUFS)
|
||||
if (i + num > NUM_BUFS)
|
||||
num = NUM_BUFS - i;
|
||||
if ((i + num) == NUM_BUFS)
|
||||
flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
|
||||
if (bman_release(pool, bufs_in + i, num, flags))
|
||||
panic("bman_release() failed\n");
|
||||
if (bman_release(pool, bufs_in + i, num)) {
|
||||
pr_crit("bman_release() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
i += num;
|
||||
}
|
||||
|
||||
/*******************/
|
||||
/* Acquire buffers */
|
||||
/*******************/
|
||||
while (i > 0) {
|
||||
int tmp, num = 8;
|
||||
|
||||
if (num > i)
|
||||
num = i;
|
||||
tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
|
||||
BUG_ON(tmp != num);
|
||||
tmp = bman_acquire(pool, bufs_out + i - num, num);
|
||||
WARN_ON(tmp != num);
|
||||
i -= num;
|
||||
}
|
||||
i = bman_acquire(pool, NULL, 1, 0);
|
||||
BUG_ON(i > 0);
|
||||
i = bman_acquire(pool, NULL, 1);
|
||||
WARN_ON(i > 0);
|
||||
|
||||
bufs_confirm();
|
||||
|
||||
if (--loops)
|
||||
goto do_loop;
|
||||
|
||||
/************/
|
||||
/* Clean up */
|
||||
/************/
|
||||
bman_free_pool(pool);
|
||||
pr_info(" --- Finished high-level test ---\n");
|
||||
pr_info("%s(): Finished\n", __func__);
|
||||
return;
|
||||
|
||||
failed:
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
@ -1,216 +0,0 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2010 - 2015 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "bman_test.h"
|
||||
|
||||
/* Test constants */
|
||||
#define TEST_NUMBUFS 129728
|
||||
#define TEST_EXIT 129536
|
||||
#define TEST_ENTRY 129024
|
||||
|
||||
struct affine_test_data {
|
||||
struct task_struct *t;
|
||||
int cpu;
|
||||
#ifndef __rtems__
|
||||
int expect_affinity;
|
||||
#endif /* __rtems__ */
|
||||
int drain;
|
||||
int num_enter;
|
||||
int num_exit;
|
||||
struct list_head node;
|
||||
struct completion wakethread;
|
||||
struct completion wakeparent;
|
||||
};
|
||||
|
||||
static void cb_depletion(struct bman_portal *portal,
|
||||
struct bman_pool *pool,
|
||||
void *opaque,
|
||||
int depleted)
|
||||
{
|
||||
struct affine_test_data *data = opaque;
|
||||
int c = smp_processor_id();
|
||||
|
||||
pr_info("%s(): bpid=%d, depleted=%d, cpu=%d, original=%d\n", __func__,
|
||||
bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
|
||||
/* We should be executing on the CPU of the thread that owns the pool if
|
||||
* and that CPU has an affine portal (ie. it isn't slaved). */
|
||||
#ifndef __rtems__
|
||||
BUG_ON((c != data->cpu) && data->expect_affinity);
|
||||
BUG_ON((c == data->cpu) && !data->expect_affinity);
|
||||
#endif /* __rtems__ */
|
||||
if (depleted)
|
||||
data->num_enter++;
|
||||
else
|
||||
data->num_exit++;
|
||||
}
|
||||
|
||||
/* Params used to set up a pool, this also dynamically allocates a BPID */
|
||||
static const struct bman_pool_params params_nocb = {
|
||||
.flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
|
||||
.thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
|
||||
};
|
||||
|
||||
/* Params used to set up each cpu's pool with callbacks enabled */
|
||||
static struct bman_pool_params params_cb = {
|
||||
.bpid = 0, /* will be replaced to match pool_nocb */
|
||||
.flags = BMAN_POOL_FLAG_DEPLETION,
|
||||
.cb = cb_depletion
|
||||
};
|
||||
|
||||
static struct bman_pool *pool_nocb;
|
||||
static LIST_HEAD(threads);
|
||||
|
||||
static int affine_test(void *__data)
|
||||
{
|
||||
struct bman_pool *pool;
|
||||
struct affine_test_data *data = __data;
|
||||
struct bman_pool_params my_params = params_cb;
|
||||
|
||||
pr_info("Thread %d: starting\n", data->cpu);
|
||||
/* create the pool */
|
||||
my_params.cb_ctx = data;
|
||||
pool = bman_new_pool(&my_params);
|
||||
BUG_ON(!pool);
|
||||
complete(&data->wakeparent);
|
||||
wait_for_completion(&data->wakethread);
|
||||
init_completion(&data->wakethread);
|
||||
|
||||
/* if we're the drainer, we get signalled for that */
|
||||
if (data->drain) {
|
||||
struct bm_buffer buf;
|
||||
int ret;
|
||||
|
||||
pr_info("Thread %d: draining...\n", data->cpu);
|
||||
do {
|
||||
ret = bman_acquire(pool, &buf, 1, 0);
|
||||
} while (ret > 0);
|
||||
pr_info("Thread %d: draining done.\n", data->cpu);
|
||||
complete(&data->wakeparent);
|
||||
wait_for_completion(&data->wakethread);
|
||||
init_completion(&data->wakethread);
|
||||
}
|
||||
|
||||
/* cleanup */
|
||||
bman_free_pool(pool);
|
||||
while (!kthread_should_stop())
|
||||
cpu_relax();
|
||||
pr_info("Thread %d: exiting\n", data->cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct affine_test_data *start_affine_test(int cpu, int drain)
|
||||
{
|
||||
struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
|
||||
if (!data)
|
||||
return NULL;
|
||||
data->cpu = cpu;
|
||||
#ifndef __rtems__
|
||||
data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
|
||||
#endif /* __rtems__ */
|
||||
data->drain = drain;
|
||||
data->num_enter = 0;
|
||||
data->num_exit = 0;
|
||||
init_completion(&data->wakethread);
|
||||
init_completion(&data->wakeparent);
|
||||
list_add_tail(&data->node, &threads);
|
||||
data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
|
||||
#ifndef __rtems__
|
||||
BUG_ON(IS_ERR(data->t));
|
||||
#else /* __rtems__ */
|
||||
BUG_ON(data->t == NULL);
|
||||
#endif /* __rtems__ */
|
||||
kthread_bind(data->t, cpu);
|
||||
wake_up_process(data->t);
|
||||
return data;
|
||||
}
|
||||
|
||||
void bman_test_thresh(void)
|
||||
{
|
||||
int loop = TEST_NUMBUFS;
|
||||
int ret, num_cpus = 0;
|
||||
struct affine_test_data *data, *drainer = NULL;
|
||||
|
||||
pr_info("%s(): Start\n", __func__);
|
||||
|
||||
/* allocate a BPID and seed it */
|
||||
pool_nocb = bman_new_pool(¶ms_nocb);
|
||||
BUG_ON(!pool_nocb);
|
||||
while (loop--) {
|
||||
struct bm_buffer buf;
|
||||
|
||||
bm_buffer_set64(&buf, 0x0badbeef + loop);
|
||||
ret = bman_release(pool_nocb, &buf, 1,
|
||||
BMAN_RELEASE_FLAG_WAIT);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
while (!bman_rcr_is_empty())
|
||||
cpu_relax();
|
||||
pr_info("%s(): Buffers are in\n", __func__);
|
||||
|
||||
/* create threads and wait for them to create pools */
|
||||
params_cb.bpid = bman_get_params(pool_nocb)->bpid;
|
||||
#ifndef __rtems__
|
||||
for_each_cpu(loop, cpu_online_mask) {
|
||||
#else /* __rtems__ */
|
||||
for (loop = 0; loop < rtems_get_processor_count(); ++loop) {
|
||||
#endif /* __rtems__ */
|
||||
data = start_affine_test(loop, drainer ? 0 : 1);
|
||||
BUG_ON(!data);
|
||||
if (!drainer)
|
||||
drainer = data;
|
||||
num_cpus++;
|
||||
wait_for_completion(&data->wakeparent);
|
||||
}
|
||||
|
||||
/* signal the drainer to start draining */
|
||||
complete(&drainer->wakethread);
|
||||
wait_for_completion(&drainer->wakeparent);
|
||||
init_completion(&drainer->wakeparent);
|
||||
|
||||
/* tear down */
|
||||
list_for_each_entry_safe(data, drainer, &threads, node) {
|
||||
complete(&data->wakethread);
|
||||
ret = kthread_stop(data->t);
|
||||
BUG_ON(ret);
|
||||
list_del(&data->node);
|
||||
/* check that we get the expected callbacks (and no others) */
|
||||
BUG_ON(data->num_enter != 1);
|
||||
BUG_ON(data->num_exit != 0);
|
||||
kfree(data);
|
||||
}
|
||||
bman_free_pool(pool_nocb);
|
||||
|
||||
pr_info("%s(): Done\n", __func__);
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "dpaa_sys.h"
|
||||
|
||||
#include <soc/fsl/bman.h>
|
||||
|
||||
/* BMan APIs are front-ends to the common code */
|
||||
|
||||
static DECLARE_DPAA_RESOURCE(bpalloc); /* BPID allocator */
|
||||
|
||||
/* BPID allocator front-end */
|
||||
|
||||
int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
|
||||
{
|
||||
return dpaa_resource_new(&bpalloc, result, count, align, partial);
|
||||
}
|
||||
EXPORT_SYMBOL(bman_alloc_bpid_range);
|
||||
|
||||
static int bp_cleanup(u32 bpid)
|
||||
{
|
||||
return bman_shutdown_pool(bpid) == 0;
|
||||
}
|
||||
void bman_release_bpid_range(u32 bpid, u32 count)
|
||||
{
|
||||
u32 total_invalid = dpaa_resource_release(&bpalloc,
|
||||
bpid, count, bp_cleanup);
|
||||
|
||||
if (total_invalid)
|
||||
pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
|
||||
bpid, bpid + count - 1, count, total_invalid);
|
||||
}
|
||||
EXPORT_SYMBOL(bman_release_bpid_range);
|
||||
|
||||
void bman_seed_bpid_range(u32 bpid, u32 count)
|
||||
{
|
||||
dpaa_resource_seed(&bpalloc, bpid, count);
|
||||
}
|
||||
EXPORT_SYMBOL(bman_seed_bpid_range);
|
||||
|
||||
int bman_reserve_bpid_range(u32 bpid, u32 count)
|
||||
{
|
||||
return dpaa_resource_reserve(&bpalloc, bpid, count);
|
||||
}
|
||||
EXPORT_SYMBOL(bman_reserve_bpid_range);
|
@ -1,363 +0,0 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_FSL_BMAN_PORTAL) || \
|
||||
defined(CONFIG_FSL_BMAN_PORTAL_MODULE) || \
|
||||
defined(CONFIG_FSL_QMAN_PORTAL) || \
|
||||
defined(CONFIG_FSL_QMAN_PORTAL_MODULE)
|
||||
#include "dpaa_sys.h"
|
||||
|
||||
/* The allocator is a (possibly-empty) list of these */
|
||||
struct dpaa_resource_node {
|
||||
struct list_head list;
|
||||
u32 base;
|
||||
u32 num;
|
||||
/* refcount and is_alloced are only set
|
||||
when the node is in the used list */
|
||||
unsigned int refcount;
|
||||
int is_alloced;
|
||||
};
|
||||
|
||||
#ifdef DPAA_RESOURCE_DEBUG
|
||||
#define DPRINT pr_info
|
||||
static void DUMP(struct dpaa_resource *alloc)
|
||||
{
|
||||
int off = 0;
|
||||
char buf[256];
|
||||
struct dpaa_resource_node *p;
|
||||
|
||||
pr_info("Free Nodes\n");
|
||||
list_for_each_entry(p, &alloc->free, list) {
|
||||
if (off < 255)
|
||||
off += snprintf(buf + off, 255-off, "{%d,%d}",
|
||||
p->base, p->base + p->num - 1);
|
||||
}
|
||||
pr_info("%s\n", buf);
|
||||
|
||||
off = 0;
|
||||
pr_info("Used Nodes\n");
|
||||
list_for_each_entry(p, &alloc->used, list) {
|
||||
if (off < 255)
|
||||
off += snprintf(buf + off, 255-off, "{%d,%d}",
|
||||
p->base, p->base + p->num - 1);
|
||||
}
|
||||
pr_info("%s\n", buf);
|
||||
}
|
||||
#else
|
||||
#define DPRINT(x...)
|
||||
#define DUMP(a)
|
||||
#endif
|
||||
|
||||
int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result,
|
||||
u32 count, u32 align, int partial)
|
||||
{
|
||||
struct dpaa_resource_node *i = NULL, *next_best = NULL,
|
||||
*used_node = NULL;
|
||||
u32 base, next_best_base = 0, num = 0, next_best_num = 0;
|
||||
struct dpaa_resource_node *margin_left, *margin_right;
|
||||
|
||||
*result = (u32)-1;
|
||||
DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
|
||||
DUMP(alloc);
|
||||
/* If 'align' is 0, it should behave as though it was 1 */
|
||||
if (!align)
|
||||
align = 1;
|
||||
margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
|
||||
if (!margin_left)
|
||||
goto err;
|
||||
margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
|
||||
if (!margin_right) {
|
||||
kfree(margin_left);
|
||||
goto err;
|
||||
}
|
||||
spin_lock_irq(&alloc->lock);
|
||||
list_for_each_entry(i, &alloc->free, list) {
|
||||
base = (i->base + align - 1) / align;
|
||||
base *= align;
|
||||
if ((base - i->base) >= i->num)
|
||||
/* alignment is impossible, regardless of count */
|
||||
continue;
|
||||
num = i->num - (base - i->base);
|
||||
if (num >= count) {
|
||||
/* this one will do nicely */
|
||||
num = count;
|
||||
goto done;
|
||||
}
|
||||
if (num > next_best_num) {
|
||||
next_best = i;
|
||||
next_best_base = base;
|
||||
next_best_num = num;
|
||||
}
|
||||
}
|
||||
if (partial && next_best) {
|
||||
i = next_best;
|
||||
base = next_best_base;
|
||||
num = next_best_num;
|
||||
} else
|
||||
i = NULL;
|
||||
done:
|
||||
if (i) {
|
||||
if (base != i->base) {
|
||||
margin_left->base = i->base;
|
||||
margin_left->num = base - i->base;
|
||||
list_add_tail(&margin_left->list, &i->list);
|
||||
} else
|
||||
kfree(margin_left);
|
||||
if ((base + num) < (i->base + i->num)) {
|
||||
margin_right->base = base + num;
|
||||
margin_right->num = (i->base + i->num) -
|
||||
(base + num);
|
||||
list_add(&margin_right->list, &i->list);
|
||||
} else
|
||||
kfree(margin_right);
|
||||
list_del(&i->list);
|
||||
kfree(i);
|
||||
*result = base;
|
||||
}
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
err:
|
||||
DPRINT("returning %d\n", i ? num : -ENOMEM);
|
||||
DUMP(alloc);
|
||||
if (!i)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Add the allocation to the used list with a refcount of 1 */
|
||||
used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
|
||||
if (!used_node)
|
||||
return -ENOMEM;
|
||||
used_node->base = *result;
|
||||
used_node->num = num;
|
||||
used_node->refcount = 1;
|
||||
used_node->is_alloced = 1;
|
||||
list_add_tail(&used_node->list, &alloc->used);
|
||||
return (int)num;
|
||||
}
|
||||
EXPORT_SYMBOL(dpaa_resource_new);
|
||||
|
||||
/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
|
||||
* forcing error-handling on to users in the deallocation path. */
|
||||
static void _dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
|
||||
u32 count)
|
||||
{
|
||||
struct dpaa_resource_node *i,
|
||||
*node = kmalloc(sizeof(*node), GFP_ATOMIC);
|
||||
|
||||
BUG_ON(!node);
|
||||
DPRINT("release_range(%d,%d)\n", base_id, count);
|
||||
DUMP(alloc);
|
||||
BUG_ON(!count);
|
||||
spin_lock_irq(&alloc->lock);
|
||||
|
||||
node->base = base_id;
|
||||
node->num = count;
|
||||
list_for_each_entry(i, &alloc->free, list) {
|
||||
if (i->base >= node->base) {
|
||||
/* BUG_ON(any overlapping) */
|
||||
BUG_ON(i->base < (node->base + node->num));
|
||||
list_add_tail(&node->list, &i->list);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
list_add_tail(&node->list, &alloc->free);
|
||||
done:
|
||||
/* Merge to the left */
|
||||
i = list_entry(node->list.prev, struct dpaa_resource_node, list);
|
||||
if (node->list.prev != &alloc->free) {
|
||||
BUG_ON((i->base + i->num) > node->base);
|
||||
if ((i->base + i->num) == node->base) {
|
||||
node->base = i->base;
|
||||
node->num += i->num;
|
||||
list_del(&i->list);
|
||||
kfree(i);
|
||||
}
|
||||
}
|
||||
/* Merge to the right */
|
||||
i = list_entry(node->list.next, struct dpaa_resource_node, list);
|
||||
if (node->list.next != &alloc->free) {
|
||||
BUG_ON((node->base + node->num) > i->base);
|
||||
if ((node->base + node->num) == i->base) {
|
||||
node->num += i->num;
|
||||
list_del(&i->list);
|
||||
kfree(i);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
DUMP(alloc);
|
||||
}
|
||||
|
||||
static void dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
|
||||
u32 count)
|
||||
{
|
||||
struct dpaa_resource_node *i = NULL;
|
||||
|
||||
spin_lock_irq(&alloc->lock);
|
||||
|
||||
/* First find the node in the used list and decrement its ref count */
|
||||
list_for_each_entry(i, &alloc->used, list) {
|
||||
if (i->base == base_id && i->num == count) {
|
||||
--i->refcount;
|
||||
if (i->refcount == 0) {
|
||||
list_del(&i->list);
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
if (i->is_alloced)
|
||||
_dpaa_resource_free(alloc, base_id,
|
||||
count);
|
||||
kfree(i);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* Couldn't find the allocation */
|
||||
pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
|
||||
base_id, count);
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
}
|
||||
|
||||
/* Same as free but no previous allocation checking is needed */
|
||||
void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count)
|
||||
{
|
||||
_dpaa_resource_free(alloc, base_id, count);
|
||||
}
|
||||
EXPORT_SYMBOL(dpaa_resource_seed);
|
||||
|
||||
/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
|
||||
* desired range is not available, or 0 for success
|
||||
*/
|
||||
int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num)
|
||||
{
|
||||
struct dpaa_resource_node *i = NULL, *used_node;
|
||||
|
||||
DPRINT("alloc_reserve(%d,%d)\n", base, num);
|
||||
DUMP(alloc);
|
||||
|
||||
spin_lock_irq(&alloc->lock);
|
||||
|
||||
/* Check for the node in the used list.
|
||||
If found, increase it's refcount */
|
||||
list_for_each_entry(i, &alloc->used, list) {
|
||||
if ((i->base == base) && (i->num == num)) {
|
||||
++i->refcount;
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
return 0;
|
||||
}
|
||||
if ((base >= i->base) && (base < (i->base + i->num))) {
|
||||
/* This is an attempt to reserve a region that was
|
||||
already reserved or alloced with a different
|
||||
base or num */
|
||||
pr_err("Cannot reserve %d - %d, it overlaps with"
|
||||
" existing reservation from %d - %d\n",
|
||||
base, base + num - 1, i->base,
|
||||
i->base + i->num - 1);
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
/* Check to make sure this ID isn't in the free list */
|
||||
list_for_each_entry(i, &alloc->free, list) {
|
||||
if ((base >= i->base) && (base < (i->base + i->num))) {
|
||||
/* yep, the reservation is within this node */
|
||||
pr_err("Cannot reserve %d - %d, it overlaps with"
|
||||
" free range %d - %d and must be alloced\n",
|
||||
base, base + num - 1,
|
||||
i->base, i->base + i->num - 1);
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
/* Add the allocation to the used list with a refcount of 1 */
|
||||
used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
|
||||
if (!used_node) {
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
return -ENOMEM;
|
||||
|
||||
}
|
||||
used_node->base = base;
|
||||
used_node->num = num;
|
||||
used_node->refcount = 1;
|
||||
used_node->is_alloced = 0;
|
||||
list_add_tail(&used_node->list, &alloc->used);
|
||||
spin_unlock_irq(&alloc->lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dpaa_resource_reserve);
|
||||
|
||||
/* This is a sort-of-conditional dpaa_resource_free() routine. Eg. when
|
||||
* releasing FQIDs (probably from user-space), it can filter out those
|
||||
* that aren't in the OOS state (better to leak a h/w resource than to
|
||||
* crash). This function returns the number of invalid IDs that were not
|
||||
* released.
|
||||
*/
|
||||
u32 dpaa_resource_release(struct dpaa_resource *alloc,
|
||||
u32 id, u32 count, int (*is_valid)(u32 id))
|
||||
{
|
||||
int valid_mode = 0;
|
||||
u32 loop = id, total_invalid = 0;
|
||||
|
||||
while (loop < (id + count)) {
|
||||
int isvalid = is_valid ? is_valid(loop) : 1;
|
||||
|
||||
if (!valid_mode) {
|
||||
/* We're looking for a valid ID to terminate an invalid
|
||||
* range */
|
||||
if (isvalid) {
|
||||
/* We finished a range of invalid IDs, a valid
|
||||
* range is now underway */
|
||||
valid_mode = 1;
|
||||
count -= (loop - id);
|
||||
id = loop;
|
||||
} else
|
||||
total_invalid++;
|
||||
} else {
|
||||
/* We're looking for an invalid ID to terminate a
|
||||
* valid range */
|
||||
if (!isvalid) {
|
||||
/* Release the range of valid IDs, an unvalid
|
||||
* range is now underway */
|
||||
if (loop > id)
|
||||
dpaa_resource_free(alloc, id,
|
||||
loop - id);
|
||||
valid_mode = 0;
|
||||
}
|
||||
}
|
||||
loop++;
|
||||
}
|
||||
/* Release any unterminated range of valid IDs */
|
||||
if (valid_mode && count)
|
||||
dpaa_resource_free(alloc, id, count);
|
||||
return total_invalid;
|
||||
}
|
||||
EXPORT_SYMBOL(dpaa_resource_release);
|
||||
#endif /* CONFIG_FSL_*MAN_PORTAL* */
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -31,23 +31,19 @@
|
||||
#ifndef __DPAA_SYS_H
|
||||
#define __DPAA_SYS_H
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/ctype.h>
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#include <linux/cpu.h>
|
||||
#endif
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#ifdef __rtems__
|
||||
#include <asm/cache.h>
|
||||
#include <asm/mpc85xx.h>
|
||||
@ -55,238 +51,91 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <bsp/linker-symbols.h>
|
||||
|
||||
#define DPAA_NOCACHENOLOAD_ALIGNED_REGION(designator, size) \
|
||||
BSP_NOCACHENOLOAD_SUBSECTION(designator) __aligned(size) \
|
||||
uint8_t designator[size]
|
||||
|
||||
#ifdef __PPC_CPU_E6500__
|
||||
#define dma_wmb() ppc_light_weight_synchronize()
|
||||
#else
|
||||
#define dma_wmb() ppc_enforce_in_order_execution_of_io()
|
||||
#endif
|
||||
|
||||
#define prefetch(x) ppc_data_cache_block_touch(x)
|
||||
#endif /* __rtems__ */
|
||||
|
||||
struct dpaa_resource {
|
||||
struct list_head free;
|
||||
spinlock_t lock;
|
||||
struct list_head used;
|
||||
};
|
||||
|
||||
#define DECLARE_DPAA_RESOURCE(name) \
|
||||
struct dpaa_resource name = { \
|
||||
.free = { \
|
||||
.prev = &name.free, \
|
||||
.next = &name.free \
|
||||
}, \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.used = { \
|
||||
.prev = &name.used, \
|
||||
.next = &name.used \
|
||||
} \
|
||||
}
|
||||
|
||||
int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result,
|
||||
u32 count, u32 align, int partial);
|
||||
u32 dpaa_resource_release(struct dpaa_resource *alloc,
|
||||
u32 id, u32 count, int (*is_valid)(u32 id));
|
||||
void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count);
|
||||
int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num);
|
||||
|
||||
/* When copying aligned words or shorts, try to avoid memcpy() */
|
||||
#define CONFIG_TRY_BETTER_MEMCPY
|
||||
|
||||
/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
|
||||
#define DPA_PORTAL_CE 0
|
||||
#define DPA_PORTAL_CI 1
|
||||
#define DPAA_PORTAL_CE 0
|
||||
#define DPAA_PORTAL_CI 1
|
||||
|
||||
/* Misc inline assists */
|
||||
#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
|
||||
#error "Unsupported Cacheline Size"
|
||||
#endif
|
||||
|
||||
/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
|
||||
* barriers and that dcb*() won't fall victim to compiler or execution
|
||||
* reordering with respect to other code/instructions that manipulate the same
|
||||
* cacheline. */
|
||||
#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
|
||||
static inline void dpaa_flush(void *p)
|
||||
{
|
||||
#ifndef __rtems__
|
||||
#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
|
||||
#ifdef CONFIG_PPC
|
||||
flush_dcache_range((unsigned long)p, (unsigned long)p+64);
|
||||
#elif defined(CONFIG_ARM32)
|
||||
__cpuc_flush_dcache_area(p, 64);
|
||||
#elif defined(CONFIG_ARM64)
|
||||
__flush_dcache_area(p, 64);
|
||||
#endif
|
||||
#else /* __rtems__ */
|
||||
#ifdef __PPC_CPU_E6500__
|
||||
#define lwsync() ppc_light_weight_synchronize()
|
||||
ppc_data_cache_block_flush(p);
|
||||
#else
|
||||
#define lwsync() ppc_synchronize_data()
|
||||
#error "Unsupported platform"
|
||||
#endif
|
||||
#endif /* __rtems__ */
|
||||
#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
|
||||
#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
|
||||
#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
|
||||
#define dcbi(p) dcbf(p)
|
||||
#ifdef CONFIG_PPC_E500MC
|
||||
#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
|
||||
#define dcbz_64(p) dcbzl(p)
|
||||
#define dcbf_64(p) dcbf(p)
|
||||
/* Commonly used combo */
|
||||
#define dcbit_ro(p) \
|
||||
do { \
|
||||
dcbi(p); \
|
||||
dcbt_ro(p); \
|
||||
} while (0)
|
||||
#else
|
||||
#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
|
||||
#define dcbz_64(p) \
|
||||
do { \
|
||||
dcbz((u32)p + 32); \
|
||||
dcbz(p); \
|
||||
} while (0)
|
||||
#define dcbf_64(p) \
|
||||
do { \
|
||||
dcbf((u32)p + 32); \
|
||||
dcbf(p); \
|
||||
} while (0)
|
||||
/* Commonly used combo */
|
||||
#define dcbit_ro(p) \
|
||||
do { \
|
||||
dcbi(p); \
|
||||
dcbi((u32)p + 32); \
|
||||
dcbt_ro(p); \
|
||||
dcbt_ro((u32)p + 32); \
|
||||
} while (0)
|
||||
#endif /* CONFIG_PPC_E500MC */
|
||||
|
||||
static inline u64 mfatb(void)
|
||||
{
|
||||
u32 hi, lo, chk;
|
||||
|
||||
do {
|
||||
hi = mfspr(SPRN_ATBU);
|
||||
lo = mfspr(SPRN_ATBL);
|
||||
chk = mfspr(SPRN_ATBU);
|
||||
} while (unlikely(hi != chk));
|
||||
return ((u64)hi << 32) | (u64)lo;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FSL_DPA_CHECKING
|
||||
#define DPA_ASSERT(x) WARN_ON(!(x))
|
||||
#else
|
||||
#define DPA_ASSERT(x)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRY_BETTER_MEMCPY
|
||||
static inline void copy_words(void *dest, const void *src, size_t sz)
|
||||
{
|
||||
u32 *__dest = dest;
|
||||
const u32 *__src = src;
|
||||
size_t __sz = sz >> 2;
|
||||
|
||||
BUG_ON((unsigned long)dest & 0x3);
|
||||
BUG_ON((unsigned long)src & 0x3);
|
||||
BUG_ON(sz & 0x3);
|
||||
while (__sz--)
|
||||
*(__dest++) = *(__src++);
|
||||
}
|
||||
#else
|
||||
#define copy_words memcpy
|
||||
#endif
|
||||
|
||||
/* RB-trees */
|
||||
|
||||
/* We encapsulate RB-trees so that its easier to use non-linux forms in
|
||||
* non-linux systems. This also encapsulates the extra plumbing that linux code
|
||||
* usually provides when using RB-trees. This encapsulation assumes that the
|
||||
* data type held by the tree is u32. */
|
||||
|
||||
struct dpa_rbtree {
|
||||
struct rb_root root;
|
||||
};
|
||||
#define DPA_RBTREE { .root = RB_ROOT }
|
||||
|
||||
static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
|
||||
{
|
||||
tree->root = RB_ROOT;
|
||||
}
|
||||
|
||||
#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
|
||||
static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
|
||||
{ \
|
||||
struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
|
||||
while (*p) { \
|
||||
u32 item; \
|
||||
parent = *p; \
|
||||
item = rb_entry(parent, type, node_field)->val_field; \
|
||||
if (obj->val_field < item) \
|
||||
p = &parent->rb_left; \
|
||||
else if (obj->val_field > item) \
|
||||
p = &parent->rb_right; \
|
||||
else \
|
||||
return -EBUSY; \
|
||||
} \
|
||||
rb_link_node(&obj->node_field, parent, p); \
|
||||
rb_insert_color(&obj->node_field, &tree->root); \
|
||||
return 0; \
|
||||
} \
|
||||
static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
|
||||
{ \
|
||||
rb_erase(&obj->node_field, &tree->root); \
|
||||
} \
|
||||
static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
|
||||
{ \
|
||||
type *ret; \
|
||||
struct rb_node *p = tree->root.rb_node; \
|
||||
while (p) { \
|
||||
ret = rb_entry(p, type, node_field); \
|
||||
if (val < ret->val_field) \
|
||||
p = p->rb_left; \
|
||||
else if (val > ret->val_field) \
|
||||
p = p->rb_right; \
|
||||
else \
|
||||
return ret; \
|
||||
} \
|
||||
return NULL; \
|
||||
}
|
||||
#define dpaa_invalidate(p) dpaa_flush(p)
|
||||
|
||||
#ifndef __rtems__
|
||||
/* Bootargs */
|
||||
|
||||
/* QMan has "qportals=" and BMan has "bportals=", they use the same syntax
|
||||
* though; a comma-separated list of items, each item being a cpu index and/or a
|
||||
* range of cpu indices, and each item optionally be prefixed by "s" to indicate
|
||||
* that the portal associated with that cpu should be shared. See bman_driver.c
|
||||
* for more specifics. */
|
||||
static int __parse_portals_cpu(const char **s, unsigned int *cpu)
|
||||
{
|
||||
*cpu = 0;
|
||||
if (!isdigit(**s))
|
||||
return -EINVAL;
|
||||
while (isdigit(**s))
|
||||
*cpu = *cpu * 10 + (*((*s)++) - '0');
|
||||
return 0;
|
||||
}
|
||||
static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
|
||||
struct cpumask *want_unshared,
|
||||
const char *argname)
|
||||
{
|
||||
const char *s = str;
|
||||
unsigned int shared, cpu1, cpu2, loop;
|
||||
|
||||
keep_going:
|
||||
if (*s == 's') {
|
||||
shared = 1;
|
||||
s++;
|
||||
} else
|
||||
shared = 0;
|
||||
if (__parse_portals_cpu(&s, &cpu1))
|
||||
goto err;
|
||||
if (*s == '-') {
|
||||
s++;
|
||||
if (__parse_portals_cpu(&s, &cpu2))
|
||||
goto err;
|
||||
if (cpu2 < cpu1)
|
||||
goto err;
|
||||
} else
|
||||
cpu2 = cpu1;
|
||||
for (loop = cpu1; loop <= cpu2; loop++)
|
||||
cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
|
||||
if (*s == ',') {
|
||||
s++;
|
||||
goto keep_going;
|
||||
} else if ((*s == '\0') || isspace(*s))
|
||||
return 0;
|
||||
err:
|
||||
pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
|
||||
(unsigned long)s - (unsigned long)str);
|
||||
return -EINVAL;
|
||||
}
|
||||
#define dpaa_zero(p) memset(p, 0, 64)
|
||||
#else /* __rtems__ */
|
||||
#ifdef __PPC_CPU_E6500__
|
||||
#define dpaa_zero(p) ppc_data_cache_block_clear_to_zero(p)
|
||||
#else
|
||||
#define dpaa_zero(p) memset(p, 0, 64)
|
||||
#endif
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static inline void dpaa_touch_ro(void *p)
|
||||
{
|
||||
#if (L1_CACHE_BYTES == 32)
|
||||
prefetch(p+32);
|
||||
#endif
|
||||
prefetch(p);
|
||||
}
|
||||
|
||||
/* Commonly used combo */
|
||||
static inline void dpaa_invalidate_touch_ro(void *p)
|
||||
{
|
||||
dpaa_invalidate(p);
|
||||
dpaa_touch_ro(p);
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_FSL_DPAA_CHECKING
|
||||
#define DPAA_ASSERT(x) WARN_ON(!(x))
|
||||
#else
|
||||
#define DPAA_ASSERT(x)
|
||||
#endif
|
||||
|
||||
/* cyclic helper for rings */
|
||||
static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
|
||||
{
|
||||
/* 'first' is included, 'last' is excluded */
|
||||
if (first <= last)
|
||||
return last - first;
|
||||
return ringsize + last - first;
|
||||
}
|
||||
|
||||
/* Offset applied to genalloc pools due to zero being an error return */
|
||||
#define DPAA_GENALLOC_OFF 0x80000000
|
||||
|
||||
#endif /* __DPAA_SYS_H */
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
883
linux/drivers/soc/fsl/qbman/qman_ccsr.c
Normal file
883
linux/drivers/soc/fsl/qbman/qman_ccsr.c
Normal file
@ -0,0 +1,883 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "qman_priv.h"
|
||||
#ifdef __rtems__
|
||||
#undef dev_crit
|
||||
#undef dev_dbg
|
||||
#undef dev_err
|
||||
#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__)
|
||||
#define dev_dbg dev_crit
|
||||
#define dev_err dev_crit
|
||||
#endif /* __rtems__ */
|
||||
|
||||
u16 qman_ip_rev;
|
||||
EXPORT_SYMBOL(qman_ip_rev);
|
||||
u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
|
||||
EXPORT_SYMBOL(qm_channel_pool1);
|
||||
|
||||
/* Register offsets */
|
||||
#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
|
||||
#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
|
||||
#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
|
||||
#define REG_DD_CFG 0x0200
|
||||
#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
|
||||
#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
|
||||
#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
|
||||
#define REG_PFDR_FPC 0x0400
|
||||
#define REG_PFDR_FP_HEAD 0x0404
|
||||
#define REG_PFDR_FP_TAIL 0x0408
|
||||
#define REG_PFDR_FP_LWIT 0x0410
|
||||
#define REG_PFDR_CFG 0x0414
|
||||
#define REG_SFDR_CFG 0x0500
|
||||
#define REG_SFDR_IN_USE 0x0504
|
||||
#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
|
||||
#define REG_WQ_DEF_ENC_WQID 0x0630
|
||||
#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
|
||||
#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
|
||||
#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
|
||||
#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
|
||||
#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
|
||||
#define REG_CM_CFG 0x0800
|
||||
#define REG_ECSR 0x0a00
|
||||
#define REG_ECIR 0x0a04
|
||||
#define REG_EADR 0x0a08
|
||||
#define REG_ECIR2 0x0a0c
|
||||
#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
|
||||
#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
|
||||
#define REG_MCR 0x0b00
|
||||
#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
|
||||
#define REG_MISC_CFG 0x0be0
|
||||
#define REG_HID_CFG 0x0bf0
|
||||
#define REG_IDLE_STAT 0x0bf4
|
||||
#define REG_IP_REV_1 0x0bf8
|
||||
#define REG_IP_REV_2 0x0bfc
|
||||
#define REG_FQD_BARE 0x0c00
|
||||
#define REG_PFDR_BARE 0x0c20
|
||||
#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
|
||||
#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
|
||||
#define REG_QCSP_BARE 0x0c80
|
||||
#define REG_QCSP_BAR 0x0c84
|
||||
#define REG_CI_SCHED_CFG 0x0d00
|
||||
#define REG_SRCIDR 0x0d04
|
||||
#define REG_LIODNR 0x0d08
|
||||
#define REG_CI_RLM_AVG 0x0d14
|
||||
#define REG_ERR_ISR 0x0e00
|
||||
#define REG_ERR_IER 0x0e04
|
||||
#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
|
||||
#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
|
||||
#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
|
||||
|
||||
/* Assists for QMAN_MCR */
|
||||
#define MCR_INIT_PFDR 0x01000000
|
||||
#define MCR_get_rslt(v) (u8)((v) >> 24)
|
||||
#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
|
||||
#define MCR_rslt_ok(r) ((r) == 0xf0)
|
||||
#define MCR_rslt_eaccess(r) ((r) == 0xf8)
|
||||
#define MCR_rslt_inval(r) ((r) == 0xff)
|
||||
|
||||
/*
|
||||
* Corenet initiator settings. Stash request queues are 4-deep to match cores
|
||||
* ability to snarf. Stash priority is 3, other priorities are 2.
|
||||
*/
|
||||
#define QM_CI_SCHED_CFG_SRCCIV 4
|
||||
#define QM_CI_SCHED_CFG_SRQ_W 3
|
||||
#define QM_CI_SCHED_CFG_RW_W 2
|
||||
#define QM_CI_SCHED_CFG_BMAN_W 2
|
||||
/* write SRCCIV enable */
|
||||
#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
|
||||
|
||||
/* Follows WQ_CS_CFG0-5 */
|
||||
enum qm_wq_class {
|
||||
qm_wq_portal = 0,
|
||||
qm_wq_pool = 1,
|
||||
qm_wq_fman0 = 2,
|
||||
qm_wq_fman1 = 3,
|
||||
qm_wq_caam = 4,
|
||||
qm_wq_pme = 5,
|
||||
qm_wq_first = qm_wq_portal,
|
||||
qm_wq_last = qm_wq_pme
|
||||
};
|
||||
|
||||
/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
|
||||
enum qm_memory {
|
||||
qm_memory_fqd,
|
||||
qm_memory_pfdr
|
||||
};
|
||||
|
||||
/* Used by all error interrupt registers except 'inhibit' */
|
||||
#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
|
||||
#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
|
||||
#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
|
||||
#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
|
||||
#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
|
||||
#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
|
||||
#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
|
||||
#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
|
||||
#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
|
||||
#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
|
||||
#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
|
||||
#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
|
||||
#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
|
||||
#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
|
||||
#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
|
||||
#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
|
||||
#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
|
||||
#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
|
||||
|
||||
/* QMAN_ECIR valid error bit */
|
||||
#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
|
||||
QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
|
||||
QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
|
||||
#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
|
||||
QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
|
||||
QM_EIRQ_IFSI)
|
||||
|
||||
struct qm_ecir {
|
||||
u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
|
||||
};
|
||||
|
||||
static bool qm_ecir_is_dcp(const struct qm_ecir *p)
|
||||
{
|
||||
return p->info & BIT(29);
|
||||
}
|
||||
|
||||
static int qm_ecir_get_pnum(const struct qm_ecir *p)
|
||||
{
|
||||
return (p->info >> 24) & 0x1f;
|
||||
}
|
||||
|
||||
static int qm_ecir_get_fqid(const struct qm_ecir *p)
|
||||
{
|
||||
return p->info & (BIT(24) - 1);
|
||||
}
|
||||
|
||||
struct qm_ecir2 {
|
||||
u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
|
||||
};
|
||||
|
||||
static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
|
||||
{
|
||||
return p->info & BIT(31);
|
||||
}
|
||||
|
||||
static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
|
||||
{
|
||||
return p->info & (BIT(10) - 1);
|
||||
}
|
||||
|
||||
struct qm_eadr {
|
||||
u32 info; /* memid[24-27], eadr[0-11] */
|
||||
/* v3: memid[24-28], eadr[0-15] */
|
||||
};
|
||||
|
||||
static int qm_eadr_get_memid(const struct qm_eadr *p)
|
||||
{
|
||||
return (p->info >> 24) & 0xf;
|
||||
}
|
||||
|
||||
static int qm_eadr_get_eadr(const struct qm_eadr *p)
|
||||
{
|
||||
return p->info & (BIT(12) - 1);
|
||||
}
|
||||
|
||||
static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
|
||||
{
|
||||
return (p->info >> 24) & 0x1f;
|
||||
}
|
||||
|
||||
static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
|
||||
{
|
||||
return p->info & (BIT(16) - 1);
|
||||
}
|
||||
|
||||
struct qman_hwerr_txt {
|
||||
u32 mask;
|
||||
const char *txt;
|
||||
};
|
||||
|
||||
|
||||
static const struct qman_hwerr_txt qman_hwerr_txts[] = {
|
||||
{ QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
|
||||
{ QM_EIRQ_CTDE, "Corenet Target Data Error" },
|
||||
{ QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
|
||||
{ QM_EIRQ_PLWI, "PFDR Low Watermark" },
|
||||
{ QM_EIRQ_MBEI, "Multi-bit ECC Error" },
|
||||
{ QM_EIRQ_SBEI, "Single-bit ECC Error" },
|
||||
{ QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
|
||||
{ QM_EIRQ_ICVI, "Invalid Command Verb" },
|
||||
{ QM_EIRQ_IFSI, "Invalid Flow Control State" },
|
||||
{ QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
|
||||
{ QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
|
||||
{ QM_EIRQ_IDSI, "Invalid Dequeue Source" },
|
||||
{ QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
|
||||
{ QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
|
||||
{ QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
|
||||
{ QM_EIRQ_IESI, "Invalid Enqueue State" },
|
||||
{ QM_EIRQ_IECI, "Invalid Enqueue Channel" },
|
||||
{ QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
|
||||
};
|
||||
|
||||
struct qman_error_info_mdata {
|
||||
u16 addr_mask;
|
||||
u16 bits;
|
||||
const char *txt;
|
||||
};
|
||||
|
||||
static const struct qman_error_info_mdata error_mdata[] = {
|
||||
{ 0x01FF, 24, "FQD cache tag memory 0" },
|
||||
{ 0x01FF, 24, "FQD cache tag memory 1" },
|
||||
{ 0x01FF, 24, "FQD cache tag memory 2" },
|
||||
{ 0x01FF, 24, "FQD cache tag memory 3" },
|
||||
{ 0x0FFF, 512, "FQD cache memory" },
|
||||
{ 0x07FF, 128, "SFDR memory" },
|
||||
{ 0x01FF, 72, "WQ context memory" },
|
||||
{ 0x00FF, 240, "CGR memory" },
|
||||
{ 0x00FF, 302, "Internal Order Restoration List memory" },
|
||||
{ 0x01FF, 256, "SW portal ring memory" },
|
||||
};
|
||||
|
||||
#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
|
||||
|
||||
/*
|
||||
* TODO: unimplemented registers
|
||||
*
|
||||
* Keeping a list here of QMan registers I have not yet covered;
|
||||
* QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
|
||||
* DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
|
||||
* QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
|
||||
*/
|
||||
|
||||
/* Pointer to the start of the QMan's CCSR space */
|
||||
static u32 __iomem *qm_ccsr_start;
|
||||
/* A SDQCR mask comprising all the available/visible pool channels */
|
||||
static u32 qm_pools_sdqcr;
|
||||
|
||||
static inline u32 qm_ccsr_in(u32 offset)
|
||||
{
|
||||
return ioread32be(qm_ccsr_start + offset/4);
|
||||
}
|
||||
|
||||
static inline void qm_ccsr_out(u32 offset, u32 val)
|
||||
{
|
||||
iowrite32be(val, qm_ccsr_start + offset/4);
|
||||
}
|
||||
|
||||
u32 qm_get_pools_sdqcr(void)
|
||||
{
|
||||
return qm_pools_sdqcr;
|
||||
}
|
||||
|
||||
enum qm_dc_portal {
|
||||
qm_dc_portal_fman0 = 0,
|
||||
qm_dc_portal_fman1 = 1
|
||||
};
|
||||
|
||||
static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
|
||||
{
|
||||
DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
|
||||
portal == qm_dc_portal_fman1);
|
||||
if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
|
||||
qm_ccsr_out(REG_DCP_CFG(portal),
|
||||
(ed ? 0x1000 : 0) | (sernd & 0x3ff));
|
||||
else
|
||||
qm_ccsr_out(REG_DCP_CFG(portal),
|
||||
(ed ? 0x100 : 0) | (sernd & 0x1f));
|
||||
}
|
||||
|
||||
static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
|
||||
u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
|
||||
u8 csw5, u8 csw6, u8 csw7)
|
||||
{
|
||||
qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
|
||||
((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
|
||||
((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
|
||||
((csw6 & 0x7) << 4) | (csw7 & 0x7));
|
||||
}
|
||||
|
||||
static void qm_set_hid(void)
|
||||
{
|
||||
qm_ccsr_out(REG_HID_CFG, 0);
|
||||
}
|
||||
|
||||
static void qm_set_corenet_initiator(void)
|
||||
{
|
||||
qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
|
||||
(QM_CI_SCHED_CFG_SRCCIV << 24) |
|
||||
(QM_CI_SCHED_CFG_SRQ_W << 8) |
|
||||
(QM_CI_SCHED_CFG_RW_W << 4) |
|
||||
QM_CI_SCHED_CFG_BMAN_W);
|
||||
}
|
||||
|
||||
static void qm_get_version(u16 *id, u8 *major, u8 *minor)
|
||||
{
|
||||
u32 v = qm_ccsr_in(REG_IP_REV_1);
|
||||
*id = (v >> 16);
|
||||
*major = (v >> 8) & 0xff;
|
||||
*minor = v & 0xff;
|
||||
}
|
||||
|
||||
#define PFDR_AR_EN BIT(31)
|
||||
static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
|
||||
{
|
||||
u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
|
||||
u32 exp = ilog2(size);
|
||||
|
||||
/* choke if size isn't within range */
|
||||
DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
|
||||
is_power_of_2(size));
|
||||
/* choke if 'ba' has lower-alignment than 'size' */
|
||||
DPAA_ASSERT(!(ba & (size - 1)));
|
||||
qm_ccsr_out(offset, upper_32_bits(ba));
|
||||
qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
|
||||
qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
|
||||
}
|
||||
|
||||
static void qm_set_pfdr_threshold(u32 th, u8 k)
|
||||
{
|
||||
qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
|
||||
qm_ccsr_out(REG_PFDR_CFG, k);
|
||||
}
|
||||
|
||||
static void qm_set_sfdr_threshold(u16 th)
|
||||
{
|
||||
qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
|
||||
}
|
||||
|
||||
static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
|
||||
{
|
||||
u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
|
||||
|
||||
DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
|
||||
/* Make sure the command interface is 'idle' */
|
||||
if (!MCR_rslt_idle(rslt)) {
|
||||
dev_crit(dev, "QMAN_MCR isn't idle");
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
/* Write the MCR command params then the verb */
|
||||
qm_ccsr_out(REG_MCP(0), pfdr_start);
|
||||
/*
|
||||
* TODO: remove this - it's a workaround for a model bug that is
|
||||
* corrected in more recent versions. We use the workaround until
|
||||
* everyone has upgraded.
|
||||
*/
|
||||
qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
|
||||
dma_wmb();
|
||||
qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
|
||||
/* Poll for the result */
|
||||
do {
|
||||
rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
|
||||
} while (!MCR_rslt_idle(rslt));
|
||||
if (MCR_rslt_ok(rslt))
|
||||
return 0;
|
||||
if (MCR_rslt_eaccess(rslt))
|
||||
return -EACCES;
|
||||
if (MCR_rslt_inval(rslt))
|
||||
return -EINVAL;
|
||||
dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ideally we would use the DMA API to turn rmem->base into a DMA address
|
||||
* (especially if iommu translations ever get involved). Unfortunately, the
|
||||
* DMA API currently does not allow mapping anything that is not backed with
|
||||
* a struct page.
|
||||
*/
|
||||
#ifndef __rtems__
|
||||
static dma_addr_t fqd_a, pfdr_a;
|
||||
static size_t fqd_sz, pfdr_sz;
|
||||
|
||||
static int qman_fqd(struct reserved_mem *rmem)
|
||||
{
|
||||
fqd_a = rmem->base;
|
||||
fqd_sz = rmem->size;
|
||||
|
||||
WARN_ON(!(fqd_a && fqd_sz));
|
||||
|
||||
return 0;
|
||||
}
|
||||
RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
|
||||
|
||||
static int qman_pfdr(struct reserved_mem *rmem)
|
||||
{
|
||||
pfdr_a = rmem->base;
|
||||
pfdr_sz = rmem->size;
|
||||
|
||||
WARN_ON(!(pfdr_a && pfdr_sz));
|
||||
|
||||
return 0;
|
||||
}
|
||||
RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
|
||||
#else /* __rtems__ */
|
||||
static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fqd, 4194304);
|
||||
#define fqd_a ((uintptr_t)&fqd[0])
|
||||
#define fqd_sz sizeof(fqd)
|
||||
static DPAA_NOCACHENOLOAD_ALIGNED_REGION(pfdr, 33554432);
|
||||
#define pfdr_a ((uintptr_t)&pfdr[0])
|
||||
#define pfdr_sz sizeof(pfdr)
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static unsigned int qm_get_fqid_maxcnt(void)
|
||||
{
|
||||
return fqd_sz / 64;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush this memory range from data cache so that QMAN originated
|
||||
* transactions for this memory region could be marked non-coherent.
|
||||
*/
|
||||
static int zero_priv_mem(struct device *dev, struct device_node *node,
|
||||
phys_addr_t addr, size_t sz)
|
||||
{
|
||||
#ifndef __rtems__
|
||||
/* map as cacheable, non-guarded */
|
||||
void __iomem *tmpp = ioremap_prot(addr, sz, 0);
|
||||
|
||||
if (!tmpp)
|
||||
return -ENOMEM;
|
||||
|
||||
memset_io(tmpp, 0, sz);
|
||||
flush_dcache_range((unsigned long)tmpp,
|
||||
(unsigned long)tmpp + sz);
|
||||
iounmap(tmpp);
|
||||
|
||||
#else /* __rtems__ */
|
||||
memset((void *)(uintptr_t)addr, 0, sz);
|
||||
#endif /* __rtems__ */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void log_edata_bits(struct device *dev, u32 bit_count)
|
||||
{
|
||||
u32 i, j, mask = 0xffffffff;
|
||||
|
||||
dev_warn(dev, "ErrInt, EDATA:\n");
|
||||
i = bit_count / 32;
|
||||
if (bit_count % 32) {
|
||||
i++;
|
||||
mask = ~(mask << bit_count % 32);
|
||||
}
|
||||
j = 16 - i;
|
||||
dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
|
||||
j++;
|
||||
for (; j < 16; j++)
|
||||
dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
|
||||
}
|
||||
|
||||
static void log_additional_error_info(struct device *dev, u32 isr_val,
|
||||
u32 ecsr_val)
|
||||
{
|
||||
struct qm_ecir ecir_val;
|
||||
struct qm_eadr eadr_val;
|
||||
int memid;
|
||||
|
||||
ecir_val.info = qm_ccsr_in(REG_ECIR);
|
||||
/* Is portal info valid */
|
||||
if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
|
||||
struct qm_ecir2 ecir2_val;
|
||||
|
||||
ecir2_val.info = qm_ccsr_in(REG_ECIR2);
|
||||
if (ecsr_val & PORTAL_ECSR_ERR) {
|
||||
dev_warn(dev, "ErrInt: %s id %d\n",
|
||||
qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
|
||||
qm_ecir2_get_pnum(&ecir2_val));
|
||||
}
|
||||
if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
|
||||
dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
|
||||
qm_ecir_get_fqid(&ecir_val));
|
||||
|
||||
if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
|
||||
eadr_val.info = qm_ccsr_in(REG_EADR);
|
||||
memid = qm_eadr_v3_get_memid(&eadr_val);
|
||||
dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
|
||||
error_mdata[memid].txt,
|
||||
error_mdata[memid].addr_mask
|
||||
& qm_eadr_v3_get_eadr(&eadr_val));
|
||||
log_edata_bits(dev, error_mdata[memid].bits);
|
||||
}
|
||||
} else {
|
||||
if (ecsr_val & PORTAL_ECSR_ERR) {
|
||||
dev_warn(dev, "ErrInt: %s id %d\n",
|
||||
qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
|
||||
qm_ecir_get_pnum(&ecir_val));
|
||||
}
|
||||
if (ecsr_val & FQID_ECSR_ERR)
|
||||
dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
|
||||
qm_ecir_get_fqid(&ecir_val));
|
||||
|
||||
if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
|
||||
eadr_val.info = qm_ccsr_in(REG_EADR);
|
||||
memid = qm_eadr_get_memid(&eadr_val);
|
||||
dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
|
||||
error_mdata[memid].txt,
|
||||
error_mdata[memid].addr_mask
|
||||
& qm_eadr_get_eadr(&eadr_val));
|
||||
log_edata_bits(dev, error_mdata[memid].bits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t qman_isr(int irq, void *ptr)
|
||||
{
|
||||
u32 isr_val, ier_val, ecsr_val, isr_mask, i;
|
||||
struct device *dev = ptr;
|
||||
|
||||
ier_val = qm_ccsr_in(REG_ERR_IER);
|
||||
isr_val = qm_ccsr_in(REG_ERR_ISR);
|
||||
ecsr_val = qm_ccsr_in(REG_ECSR);
|
||||
isr_mask = isr_val & ier_val;
|
||||
|
||||
if (!isr_mask)
|
||||
return IRQ_NONE;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
|
||||
if (qman_hwerr_txts[i].mask & isr_mask) {
|
||||
#ifndef __rtems__
|
||||
dev_err_ratelimited(dev, "ErrInt: %s\n",
|
||||
qman_hwerr_txts[i].txt);
|
||||
#endif /* __rtems__ */
|
||||
if (qman_hwerr_txts[i].mask & ecsr_val) {
|
||||
log_additional_error_info(dev, isr_mask,
|
||||
ecsr_val);
|
||||
/* Re-arm error capture registers */
|
||||
qm_ccsr_out(REG_ECSR, ecsr_val);
|
||||
}
|
||||
if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
|
||||
dev_dbg(dev, "Disabling error 0x%x\n",
|
||||
qman_hwerr_txts[i].mask);
|
||||
ier_val &= ~qman_hwerr_txts[i].mask;
|
||||
qm_ccsr_out(REG_ERR_IER, ier_val);
|
||||
}
|
||||
}
|
||||
}
|
||||
qm_ccsr_out(REG_ERR_ISR, isr_val);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int qman_init_ccsr(struct device *dev)
|
||||
{
|
||||
int i, err;
|
||||
|
||||
/* FQD memory */
|
||||
qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
|
||||
/* PFDR memory */
|
||||
qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
|
||||
err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
|
||||
if (err)
|
||||
return err;
|
||||
/* thresholds */
|
||||
qm_set_pfdr_threshold(512, 64);
|
||||
qm_set_sfdr_threshold(128);
|
||||
/* clear stale PEBI bit from interrupt status register */
|
||||
qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
|
||||
/* corenet initiator settings */
|
||||
qm_set_corenet_initiator();
|
||||
/* HID settings */
|
||||
qm_set_hid();
|
||||
/* Set scheduling weights to defaults */
|
||||
for (i = qm_wq_first; i <= qm_wq_last; i++)
|
||||
qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
|
||||
/* We are not prepared to accept ERNs for hardware enqueues */
|
||||
qm_set_dc(qm_dc_portal_fman0, 1, 0);
|
||||
qm_set_dc(qm_dc_portal_fman1, 1, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define LIO_CFG_LIODN_MASK 0x0fff0000
|
||||
void qman_liodn_fixup(u16 channel)
|
||||
{
|
||||
static int done;
|
||||
static u32 liodn_offset;
|
||||
u32 before, after;
|
||||
int idx = channel - QM_CHANNEL_SWPORTAL0;
|
||||
|
||||
if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
|
||||
before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
|
||||
else
|
||||
before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
|
||||
if (!done) {
|
||||
liodn_offset = before & LIO_CFG_LIODN_MASK;
|
||||
done = 1;
|
||||
return;
|
||||
}
|
||||
after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
|
||||
if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
|
||||
qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
|
||||
else
|
||||
qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
|
||||
}
|
||||
|
||||
#define IO_CFG_SDEST_MASK 0x00ff0000
|
||||
void qman_set_sdest(u16 channel, unsigned int cpu_idx)
|
||||
{
|
||||
int idx = channel - QM_CHANNEL_SWPORTAL0;
|
||||
u32 before, after;
|
||||
|
||||
if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
|
||||
before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
|
||||
/* Each pair of vcpu share the same SRQ(SDEST) */
|
||||
cpu_idx /= 2;
|
||||
after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
|
||||
qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
|
||||
} else {
|
||||
before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
|
||||
after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
|
||||
qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
|
||||
}
|
||||
}
|
||||
|
||||
static int qman_resource_init(struct device *dev)
|
||||
{
|
||||
int pool_chan_num, cgrid_num;
|
||||
int ret, i;
|
||||
|
||||
switch (qman_ip_rev >> 8) {
|
||||
case 1:
|
||||
pool_chan_num = 15;
|
||||
cgrid_num = 256;
|
||||
break;
|
||||
case 2:
|
||||
pool_chan_num = 3;
|
||||
cgrid_num = 64;
|
||||
break;
|
||||
case 3:
|
||||
pool_chan_num = 15;
|
||||
cgrid_num = 256;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
|
||||
pool_chan_num, -1);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* parse pool channels into the SDQCR mask */
|
||||
for (i = 0; i < cgrid_num; i++)
|
||||
qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
|
||||
|
||||
ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
|
||||
qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fsl_qman_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
#ifdef __rtems__
|
||||
struct resource res_storage;
|
||||
#endif /* __rtems__ */
|
||||
struct resource *res;
|
||||
int ret, err_irq;
|
||||
u16 id;
|
||||
u8 major, minor;
|
||||
|
||||
#ifndef __rtems__
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
#else /* __rtems__ */
|
||||
res = platform_get_resource(&res_storage, pdev, IORESOURCE_MEM, 0);
|
||||
#endif /* __rtems__ */
|
||||
if (!res) {
|
||||
dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
|
||||
node->full_name);
|
||||
return -ENXIO;
|
||||
}
|
||||
qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
|
||||
if (!qm_ccsr_start)
|
||||
return -ENXIO;
|
||||
|
||||
qm_get_version(&id, &major, &minor);
|
||||
if (major == 1 && minor == 0) {
|
||||
dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
|
||||
return -ENODEV;
|
||||
} else if (major == 1 && minor == 1)
|
||||
qman_ip_rev = QMAN_REV11;
|
||||
else if (major == 1 && minor == 2)
|
||||
qman_ip_rev = QMAN_REV12;
|
||||
else if (major == 2 && minor == 0)
|
||||
qman_ip_rev = QMAN_REV20;
|
||||
else if (major == 3 && minor == 0)
|
||||
qman_ip_rev = QMAN_REV30;
|
||||
else if (major == 3 && minor == 1)
|
||||
qman_ip_rev = QMAN_REV31;
|
||||
else {
|
||||
dev_err(dev, "Unknown QMan version\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
|
||||
qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
|
||||
|
||||
ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
|
||||
WARN_ON(ret);
|
||||
if (ret)
|
||||
return -ENODEV;
|
||||
|
||||
ret = qman_init_ccsr(dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "CCSR setup failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
err_irq = platform_get_irq(pdev, 0);
|
||||
if (err_irq <= 0) {
|
||||
dev_info(dev, "Can't get %s property 'interrupts'\n",
|
||||
node->full_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
|
||||
dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
|
||||
ret, node->full_name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write-to-clear any stale bits, (eg. starvation being asserted prior
|
||||
* to resource allocation during driver init).
|
||||
*/
|
||||
qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
|
||||
/* Enable Error Interrupts */
|
||||
qm_ccsr_out(REG_ERR_IER, 0xffffffff);
|
||||
|
||||
qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
|
||||
if (IS_ERR(qm_fqalloc)) {
|
||||
ret = PTR_ERR(qm_fqalloc);
|
||||
dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
|
||||
if (IS_ERR(qm_qpalloc)) {
|
||||
ret = PTR_ERR(qm_qpalloc);
|
||||
dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
|
||||
if (IS_ERR(qm_cgralloc)) {
|
||||
ret = PTR_ERR(qm_cgralloc);
|
||||
dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qman_resource_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = qman_wq_alloc();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
static const struct of_device_id fsl_qman_ids[] = {
|
||||
{
|
||||
.compatible = "fsl,qman",
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver fsl_qman_driver = {
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.of_match_table = fsl_qman_ids,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = fsl_qman_probe,
|
||||
};
|
||||
|
||||
builtin_platform_driver(fsl_qman_driver);
|
||||
#else /* __rtems__ */
|
||||
#include <bsp/fdt.h>
|
||||
#include <bsp/qoriq.h>
|
||||
|
||||
SYSINIT_REFERENCE(bman);
|
||||
|
||||
static void
|
||||
qman_sysinit(void)
|
||||
{
|
||||
const char *fdt = bsp_fdt_get();
|
||||
struct {
|
||||
struct platform_device pdev;
|
||||
struct device_node of_node;
|
||||
} dev;
|
||||
const char *name;
|
||||
int node;
|
||||
int ret;
|
||||
|
||||
name = "fsl,qman";
|
||||
node = fdt_node_offset_by_compatible(fdt, 0, name);
|
||||
if (node < 0)
|
||||
panic("qman: no qman in FDT");
|
||||
|
||||
memset(&dev, 0, sizeof(dev));
|
||||
dev.pdev.dev.of_node = &dev.of_node;
|
||||
dev.pdev.dev.base = (uintptr_t)&qoriq;
|
||||
dev.of_node.offset = node;
|
||||
dev.of_node.full_name = name;
|
||||
|
||||
ret = fsl_qman_probe(&dev.pdev);
|
||||
if (ret != 0)
|
||||
panic("qman: init failed");
|
||||
|
||||
qman_sysinit_portals();
|
||||
}
|
||||
SYSINIT(qman, SI_SUB_CPU, SI_ORDER_SECOND, qman_sysinit, NULL);
|
||||
#endif /* __rtems__ */
|
@ -1,87 +0,0 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2013 - 2015 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "qman_priv.h"
|
||||
|
||||
#include <linux/time.h>
|
||||
|
||||
static int __init early_qman_init(void)
|
||||
{
|
||||
struct device_node *dn;
|
||||
u32 is_portal_available;
|
||||
|
||||
qman_init();
|
||||
|
||||
is_portal_available = 0;
|
||||
for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
|
||||
if (of_device_is_available(dn)) {
|
||||
is_portal_available = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!qman_have_ccsr() && is_portal_available) {
|
||||
struct qman_fq fq = {.fqid = 1};
|
||||
struct qm_mcr_queryfq_np np;
|
||||
int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
|
||||
struct timespec nowts, diffts, startts = current_kernel_time();
|
||||
|
||||
/* Loop while querying given fqid succeeds or time out */
|
||||
while (1) {
|
||||
err = qman_query_fq_np(&fq, &np);
|
||||
if (!err) {
|
||||
/* success, control-plane has configured QMan */
|
||||
break;
|
||||
} else if (err != -ERANGE) {
|
||||
pr_err("I/O error, continuing anyway\n");
|
||||
break;
|
||||
}
|
||||
nowts = current_kernel_time();
|
||||
diffts = timespec_sub(nowts, startts);
|
||||
if (diffts.tv_sec > 0) {
|
||||
if (!retry--) {
|
||||
pr_err("Time out, control-plane dead?\n");
|
||||
break;
|
||||
}
|
||||
pr_warn("Polling for the control-plane (%d)\n",
|
||||
retry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qman_resource_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(early_qman_init);
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -35,304 +35,40 @@
|
||||
#include "qman_priv.h"
|
||||
#ifdef __rtems__
|
||||
#include <bsp/qoriq.h>
|
||||
#undef dev_crit
|
||||
#undef dev_info
|
||||
#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__)
|
||||
#define dev_info dev_crit
|
||||
#endif /* __rtems__ */
|
||||
|
||||
#ifndef __rtems__
|
||||
struct qman_portal *qman_dma_portal;
|
||||
EXPORT_SYMBOL(qman_dma_portal);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/* Enable portal interupts (as opposed to polling mode) */
|
||||
#define CONFIG_FSL_DPA_PIRQ_SLOW 1
|
||||
#define CONFIG_FSL_DPA_PIRQ_FAST 1
|
||||
|
||||
/* Global variable containing revision id (even on non-control plane systems
|
||||
* where CCSR isn't available) */
|
||||
u16 qman_ip_rev;
|
||||
EXPORT_SYMBOL(qman_ip_rev);
|
||||
u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
|
||||
EXPORT_SYMBOL(qm_channel_pool1);
|
||||
u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
|
||||
EXPORT_SYMBOL(qm_channel_caam);
|
||||
u16 qm_channel_pme = QMAN_CHANNEL_PME;
|
||||
EXPORT_SYMBOL(qm_channel_pme);
|
||||
u16 qm_channel_dce = QMAN_CHANNEL_DCE;
|
||||
EXPORT_SYMBOL(qm_channel_dce);
|
||||
u16 qman_portal_max;
|
||||
EXPORT_SYMBOL(qman_portal_max);
|
||||
|
||||
#ifndef __rtems__
|
||||
/* For these variables, and the portal-initialisation logic, the
|
||||
* comments in bman_driver.c apply here so won't be repeated. */
|
||||
static struct qman_portal *shared_portals[NR_CPUS];
|
||||
static int num_shared_portals;
|
||||
static int shared_portals_idx;
|
||||
static LIST_HEAD(unused_pcfgs);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/* A SDQCR mask comprising all the available/visible pool channels */
|
||||
static u32 pools_sdqcr;
|
||||
|
||||
#define STR_ERR_NOPROP "No '%s' property in node %s\n"
|
||||
#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
|
||||
#define STR_FQID_RANGE "fsl,fqid-range"
|
||||
#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
|
||||
#define STR_CGRID_RANGE "fsl,cgrid-range"
|
||||
|
||||
/* A "fsl,fqid-range" node; release the given range to the allocator */
|
||||
static __init int fsl_fqid_range_init(struct device_node *node)
|
||||
{
|
||||
int ret;
|
||||
const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
|
||||
|
||||
if (!range) {
|
||||
pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ret != 8) {
|
||||
pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
qman_seed_fqid_range(range[0], range[1]);
|
||||
pr_info("FQID allocator includes range %d:%d\n",
|
||||
range[0], range[1]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
|
||||
static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
|
||||
{
|
||||
int ret;
|
||||
const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
|
||||
|
||||
if (!chanid) {
|
||||
pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ret != 8) {
|
||||
pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
for (ret = 0; ret < chanid[1]; ret++)
|
||||
pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(chanid[0] + ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* A "fsl,pool-channel-range" node; release the given range to the allocator */
|
||||
static __init int fsl_pool_channel_range_init(struct device_node *node)
|
||||
{
|
||||
int ret;
|
||||
const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
|
||||
|
||||
if (!chanid) {
|
||||
pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ret != 8) {
|
||||
pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
qman_seed_pool_range(chanid[0], chanid[1]);
|
||||
pr_info("Pool channel allocator includes range %d:%d\n",
|
||||
chanid[0], chanid[1]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* A "fsl,cgrid-range" node; release the given range to the allocator */
|
||||
static __init int fsl_cgrid_range_init(struct device_node *node)
|
||||
{
|
||||
struct qman_cgr cgr;
|
||||
int ret, errors = 0;
|
||||
const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
|
||||
|
||||
if (!range) {
|
||||
pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ret != 8) {
|
||||
pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
qman_seed_cgrid_range(range[0], range[1]);
|
||||
pr_info("CGRID allocator includes range %d:%d\n",
|
||||
range[0], range[1]);
|
||||
for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
|
||||
ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
|
||||
if (ret)
|
||||
errors++;
|
||||
}
|
||||
if (errors)
|
||||
pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
|
||||
errors, (errors > 1) ? "s" : "", range[0], range[1]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qman_get_ip_revision(struct device_node *dn)
|
||||
{
|
||||
#ifdef __rtems__
|
||||
struct device_node of_dns;
|
||||
#endif /* __rtems__ */
|
||||
u16 ip_rev = 0;
|
||||
|
||||
for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
|
||||
if (!of_device_is_available(dn))
|
||||
continue;
|
||||
if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
|
||||
of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
|
||||
pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
|
||||
BUG_ON(1);
|
||||
} else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
|
||||
of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
|
||||
ip_rev = QMAN_REV11;
|
||||
qman_portal_max = 10;
|
||||
} else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
|
||||
of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
|
||||
ip_rev = QMAN_REV12;
|
||||
qman_portal_max = 10;
|
||||
} else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
|
||||
of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
|
||||
ip_rev = QMAN_REV20;
|
||||
qman_portal_max = 3;
|
||||
} else if (of_device_is_compatible(dn,
|
||||
"fsl,qman-portal-3.0.0")) {
|
||||
ip_rev = QMAN_REV30;
|
||||
qman_portal_max = 50;
|
||||
} else if (of_device_is_compatible(dn,
|
||||
"fsl,qman-portal-3.0.1")) {
|
||||
ip_rev = QMAN_REV30;
|
||||
qman_portal_max = 25;
|
||||
} else if (of_device_is_compatible(dn,
|
||||
"fsl,qman-portal-3.1.0")) {
|
||||
ip_rev = QMAN_REV31;
|
||||
qman_portal_max = 50;
|
||||
} else if (of_device_is_compatible(dn,
|
||||
"fsl,qman-portal-3.1.1")) {
|
||||
ip_rev = QMAN_REV31;
|
||||
qman_portal_max = 25;
|
||||
} else if (of_device_is_compatible(dn,
|
||||
"fsl,qman-portal-3.1.2")) {
|
||||
ip_rev = QMAN_REV31;
|
||||
qman_portal_max = 18;
|
||||
} else if (of_device_is_compatible(dn,
|
||||
"fsl,qman-portal-3.1.3")) {
|
||||
ip_rev = QMAN_REV31;
|
||||
qman_portal_max = 10;
|
||||
} else {
|
||||
pr_warn("Unknown version in portal node, default to rev1.1\n");
|
||||
ip_rev = QMAN_REV11;
|
||||
qman_portal_max = 10;
|
||||
}
|
||||
|
||||
if (!qman_ip_rev) {
|
||||
if (ip_rev) {
|
||||
qman_ip_rev = ip_rev;
|
||||
} else {
|
||||
pr_warn("Unknown version, default to rev1.1\n");
|
||||
qman_ip_rev = QMAN_REV11;
|
||||
}
|
||||
} else if (ip_rev && (qman_ip_rev != ip_rev))
|
||||
pr_warn("Revision = 0x%04x, but portal '%s' has 0x%04x\n",
|
||||
qman_ip_rev, dn->full_name, ip_rev);
|
||||
if (qman_ip_rev == ip_rev)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
/* Parse a portal node, perform generic mapping duties and return the config. It
|
||||
* is not known at this stage for what purpose (or even if) the portal will be
|
||||
* used. */
|
||||
static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
|
||||
{
|
||||
struct qm_portal_config *pcfg;
|
||||
const u32 *channel;
|
||||
int irq, ret;
|
||||
struct resource res;
|
||||
|
||||
pcfg = kzalloc(sizeof(*pcfg), GFP_KERNEL);
|
||||
if (!pcfg)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* This is a *horrible hack*, but the IOMMU/PAMU driver needs a
|
||||
* 'struct device' in order to get the PAMU stashing setup and the QMan
|
||||
* portal [driver] won't function at all without ring stashing
|
||||
*
|
||||
* Making the QMan portal driver nice and proper is part of the
|
||||
* upstreaming effort
|
||||
*/
|
||||
pcfg->dev.bus = &platform_bus_type;
|
||||
pcfg->dev.of_node = node;
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
pcfg->dev.archdata.iommu_domain = NULL;
|
||||
#endif
|
||||
|
||||
ret = of_address_to_resource(node, DPA_PORTAL_CE,
|
||||
&pcfg->addr_phys[DPA_PORTAL_CE]);
|
||||
if (ret) {
|
||||
pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
|
||||
goto err;
|
||||
}
|
||||
ret = of_address_to_resource(node, DPA_PORTAL_CI,
|
||||
&pcfg->addr_phys[DPA_PORTAL_CI]);
|
||||
if (ret) {
|
||||
pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
|
||||
goto err;
|
||||
}
|
||||
|
||||
channel = of_get_property(node, "fsl,qman-channel-id", &ret);
|
||||
if (!channel || (ret != 4)) {
|
||||
pr_err("Can't get %s property 'fsl,qman-channel-id'\n",
|
||||
node->full_name);
|
||||
goto err;
|
||||
}
|
||||
pcfg->public_cfg.channel = *channel;
|
||||
pcfg->public_cfg.cpu = -1;
|
||||
irq = irq_of_parse_and_map(node, 0);
|
||||
if (irq == NO_IRQ) {
|
||||
pr_err("Can't get %s property 'interrupts'\n", node->full_name);
|
||||
goto err;
|
||||
}
|
||||
pcfg->public_cfg.irq = irq;
|
||||
#ifdef CONFIG_FSL_QMAN_CONFIG
|
||||
/* We need the same LIODN offset for all portals */
|
||||
qman_liodn_fixup(pcfg->public_cfg.channel);
|
||||
#endif
|
||||
|
||||
pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
|
||||
pcfg->addr_phys[DPA_PORTAL_CE].start,
|
||||
resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]),
|
||||
0);
|
||||
pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
|
||||
pcfg->addr_phys[DPA_PORTAL_CI].start,
|
||||
resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
|
||||
_PAGE_GUARDED | _PAGE_NO_CACHE);
|
||||
|
||||
return pcfg;
|
||||
err:
|
||||
kfree(pcfg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct qm_portal_config *get_pcfg(struct list_head *list)
|
||||
{
|
||||
struct qm_portal_config *pcfg;
|
||||
|
||||
if (list_empty(list))
|
||||
return NULL;
|
||||
pcfg = list_entry(list->prev, struct qm_portal_config, list);
|
||||
list_del(&pcfg->list);
|
||||
return pcfg;
|
||||
}
|
||||
static struct cpumask portal_cpus;
|
||||
/* protect qman global registers and global data shared among portals */
|
||||
static DEFINE_SPINLOCK(qman_lock);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
|
||||
{
|
||||
#ifdef CONFIG_FSL_PAMU
|
||||
int ret;
|
||||
struct device *dev = pcfg->dev;
|
||||
int window_count = 1;
|
||||
struct iommu_domain_geometry geom_attr;
|
||||
struct pamu_stash_attribute stash_attr;
|
||||
int ret;
|
||||
|
||||
pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
|
||||
if (!pcfg->iommu_domain) {
|
||||
pr_err("%s(): iommu_domain_alloc() failed", __func__);
|
||||
goto _no_iommu;
|
||||
dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
|
||||
goto no_iommu;
|
||||
}
|
||||
geom_attr.aperture_start = 0;
|
||||
geom_attr.aperture_end =
|
||||
@ -341,14 +77,16 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
|
||||
ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
|
||||
&geom_attr);
|
||||
if (ret < 0) {
|
||||
pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
|
||||
goto _iommu_domain_free;
|
||||
dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
|
||||
ret);
|
||||
goto out_domain_free;
|
||||
}
|
||||
ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
|
||||
&window_count);
|
||||
if (ret < 0) {
|
||||
pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
|
||||
goto _iommu_domain_free;
|
||||
dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
|
||||
ret);
|
||||
goto out_domain_free;
|
||||
}
|
||||
stash_attr.cpu = cpu;
|
||||
stash_attr.cache = PAMU_ATTR_CACHE_L1;
|
||||
@ -356,45 +94,42 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
|
||||
DOMAIN_ATTR_FSL_PAMU_STASH,
|
||||
&stash_attr);
|
||||
if (ret < 0) {
|
||||
pr_err("%s(): iommu_domain_set_attr() = %d",
|
||||
dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
|
||||
__func__, ret);
|
||||
goto _iommu_domain_free;
|
||||
goto out_domain_free;
|
||||
}
|
||||
ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
|
||||
IOMMU_READ | IOMMU_WRITE);
|
||||
if (ret < 0) {
|
||||
pr_err("%s(): iommu_domain_window_enable() = %d",
|
||||
dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
|
||||
__func__, ret);
|
||||
goto _iommu_domain_free;
|
||||
goto out_domain_free;
|
||||
}
|
||||
ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
|
||||
ret = iommu_attach_device(pcfg->iommu_domain, dev);
|
||||
if (ret < 0) {
|
||||
pr_err("%s(): iommu_device_attach() = %d",
|
||||
__func__, ret);
|
||||
goto _iommu_domain_free;
|
||||
dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
|
||||
ret);
|
||||
goto out_domain_free;
|
||||
}
|
||||
ret = iommu_domain_set_attr(pcfg->iommu_domain,
|
||||
DOMAIN_ATTR_FSL_PAMU_ENABLE,
|
||||
&window_count);
|
||||
if (ret < 0) {
|
||||
pr_err("%s(): iommu_domain_set_attr() = %d",
|
||||
__func__, ret);
|
||||
goto _iommu_detach_device;
|
||||
dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
|
||||
ret);
|
||||
goto out_detach_device;
|
||||
}
|
||||
|
||||
_no_iommu:
|
||||
no_iommu:
|
||||
#endif
|
||||
#ifdef CONFIG_FSL_QMAN_CONFIG
|
||||
if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
|
||||
#endif
|
||||
pr_warn("Failed to set the stash request queue\n");
|
||||
qman_set_sdest(pcfg->channel, cpu);
|
||||
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_FSL_PAMU
|
||||
_iommu_detach_device:
|
||||
out_detach_device:
|
||||
iommu_detach_device(pcfg->iommu_domain, NULL);
|
||||
_iommu_domain_free:
|
||||
out_domain_free:
|
||||
iommu_domain_free(pcfg->iommu_domain);
|
||||
pcfg->iommu_domain = NULL;
|
||||
#endif
|
||||
@ -403,14 +138,22 @@ _iommu_domain_free:
|
||||
static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
|
||||
{
|
||||
struct qman_portal *p;
|
||||
u32 irq_sources = 0;
|
||||
|
||||
/* We need the same LIODN offset for all portals */
|
||||
qman_liodn_fixup(pcfg->channel);
|
||||
|
||||
#ifndef __rtems__
|
||||
pcfg->iommu_domain = NULL;
|
||||
#endif /* __rtems__ */
|
||||
portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
|
||||
portal_set_cpu(pcfg, pcfg->cpu);
|
||||
p = qman_create_affine_portal(pcfg, NULL);
|
||||
if (p) {
|
||||
u32 irq_sources = 0;
|
||||
if (!p) {
|
||||
dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
|
||||
__func__, pcfg->cpu);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Determine what should be interrupt-vs-poll driven */
|
||||
#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
|
||||
irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
|
||||
@ -420,48 +163,29 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
|
||||
irq_sources |= QM_PIRQ_DQRI;
|
||||
#endif
|
||||
qman_p_irqsource_add(p, irq_sources);
|
||||
pr_info("Portal %sinitialised, cpu %d\n",
|
||||
|
||||
#ifndef __rtems__
|
||||
pcfg->public_cfg.is_shared ? "(shared) " : "",
|
||||
#else /* __rtems__ */
|
||||
"",
|
||||
spin_lock(&qman_lock);
|
||||
if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
|
||||
/* all assigned portals are initialized now */
|
||||
qman_init_cgr_all();
|
||||
}
|
||||
|
||||
if (!qman_dma_portal)
|
||||
qman_dma_portal = p;
|
||||
|
||||
spin_unlock(&qman_lock);
|
||||
#endif /* __rtems__ */
|
||||
pcfg->public_cfg.cpu);
|
||||
} else
|
||||
pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
|
||||
|
||||
dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
static void init_slave(int cpu)
|
||||
{
|
||||
struct qman_portal *p;
|
||||
struct cpumask oldmask = *tsk_cpus_allowed(current);
|
||||
|
||||
set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
|
||||
p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
|
||||
if (!p)
|
||||
pr_err("Slave portal failure on cpu %d\n", cpu);
|
||||
else
|
||||
pr_info("Portal (slave) initialised, cpu %d\n", cpu);
|
||||
set_cpus_allowed_ptr(current, &oldmask);
|
||||
if (shared_portals_idx >= num_shared_portals)
|
||||
shared_portals_idx = 0;
|
||||
}
|
||||
|
||||
static struct cpumask want_unshared __initdata;
|
||||
static struct cpumask want_shared __initdata;
|
||||
|
||||
static int __init parse_qportals(char *str)
|
||||
{
|
||||
return parse_portals_bootarg(str, &want_shared, &want_unshared,
|
||||
"qportals");
|
||||
}
|
||||
__setup("qportals=", parse_qportals);
|
||||
|
||||
static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
|
||||
unsigned int cpu)
|
||||
{
|
||||
#ifdef CONFIG_FSL_PAMU /* TODO */
|
||||
struct pamu_stash_attribute stash_attr;
|
||||
int ret;
|
||||
|
||||
@ -471,77 +195,196 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
|
||||
ret = iommu_domain_set_attr(pcfg->iommu_domain,
|
||||
DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to update pamu stash setting\n");
|
||||
dev_err(pcfg->dev,
|
||||
"Failed to update pamu stash setting\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_FSL_QMAN_CONFIG
|
||||
if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
|
||||
#endif
|
||||
pr_warn("Failed to update portal's stash request queue\n");
|
||||
qman_set_sdest(pcfg->channel, cpu);
|
||||
}
|
||||
|
||||
static void qman_offline_cpu(unsigned int cpu)
|
||||
#ifndef __rtems__
|
||||
static int qman_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
struct qman_portal *p;
|
||||
const struct qm_portal_config *pcfg;
|
||||
|
||||
p = (struct qman_portal *)affine_portals[cpu];
|
||||
p = affine_portals[cpu];
|
||||
if (p) {
|
||||
pcfg = qman_get_qm_portal_config(p);
|
||||
if (pcfg) {
|
||||
irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
|
||||
irq_set_affinity(pcfg->irq, cpumask_of(0));
|
||||
qman_portal_update_sdest(pcfg, 0);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void qman_online_cpu(unsigned int cpu)
|
||||
static int qman_online_cpu(unsigned int cpu)
|
||||
{
|
||||
struct qman_portal *p;
|
||||
const struct qm_portal_config *pcfg;
|
||||
|
||||
p = (struct qman_portal *)affine_portals[cpu];
|
||||
p = affine_portals[cpu];
|
||||
if (p) {
|
||||
pcfg = qman_get_qm_portal_config(p);
|
||||
if (pcfg) {
|
||||
irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
|
||||
irq_set_affinity(pcfg->irq, cpumask_of(cpu));
|
||||
qman_portal_update_sdest(pcfg, cpu);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
static int qman_portal_probe(struct platform_device *pdev)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct qm_portal_config *pcfg;
|
||||
struct resource *addr_phys[2];
|
||||
void __iomem *va;
|
||||
int irq, cpu, err;
|
||||
u32 val;
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
qman_online_cpu(cpu);
|
||||
break;
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
|
||||
if (!pcfg)
|
||||
return -ENOMEM;
|
||||
|
||||
pcfg->dev = dev;
|
||||
|
||||
addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
|
||||
DPAA_PORTAL_CE);
|
||||
if (!addr_phys[0]) {
|
||||
dev_err(dev, "Can't get %s property 'reg::CE'\n",
|
||||
node->full_name);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
|
||||
DPAA_PORTAL_CI);
|
||||
if (!addr_phys[1]) {
|
||||
dev_err(dev, "Can't get %s property 'reg::CI'\n",
|
||||
node->full_name);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
err = of_property_read_u32(node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(dev, "Can't get %s property 'cell-index'\n",
|
||||
node->full_name);
|
||||
return err;
|
||||
}
|
||||
pcfg->channel = val;
|
||||
pcfg->cpu = -1;
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq <= 0) {
|
||||
dev_err(dev, "Can't get %s IRQ\n", node->full_name);
|
||||
return -ENXIO;
|
||||
}
|
||||
pcfg->irq = irq;
|
||||
|
||||
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
|
||||
if (!va) {
|
||||
dev_err(dev, "ioremap::CE failed\n");
|
||||
goto err_ioremap1;
|
||||
}
|
||||
|
||||
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
|
||||
|
||||
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
|
||||
_PAGE_GUARDED | _PAGE_NO_CACHE);
|
||||
if (!va) {
|
||||
dev_err(dev, "ioremap::CI failed\n");
|
||||
goto err_ioremap2;
|
||||
}
|
||||
|
||||
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
|
||||
|
||||
pcfg->pools = qm_get_pools_sdqcr();
|
||||
|
||||
spin_lock(&qman_lock);
|
||||
cpu = cpumask_next_zero(-1, &portal_cpus);
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
/* unassigned portal, skip init */
|
||||
spin_unlock(&qman_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpu, &portal_cpus);
|
||||
spin_unlock(&qman_lock);
|
||||
pcfg->cpu = cpu;
|
||||
|
||||
if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
|
||||
dev_err(dev, "dma_set_mask() failed\n");
|
||||
goto err_portal_init;
|
||||
}
|
||||
|
||||
if (!init_pcfg(pcfg)) {
|
||||
dev_err(dev, "portal init failed\n");
|
||||
goto err_portal_init;
|
||||
}
|
||||
|
||||
/* clear irq affinity if assigned cpu is offline */
|
||||
if (!cpu_online(cpu))
|
||||
qman_offline_cpu(cpu);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
|
||||
return 0;
|
||||
|
||||
err_portal_init:
|
||||
iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
|
||||
err_ioremap2:
|
||||
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
|
||||
err_ioremap1:
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static struct notifier_block qman_hotplug_cpu_notifier = {
|
||||
.notifier_call = qman_hotplug_cpu_callback,
|
||||
static const struct of_device_id qman_portal_ids[] = {
|
||||
{
|
||||
.compatible = "fsl,qman-portal",
|
||||
},
|
||||
{}
|
||||
};
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
MODULE_DEVICE_TABLE(of, qman_portal_ids);
|
||||
|
||||
#ifdef __rtems__
|
||||
static struct platform_driver qman_portal_driver = {
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.of_match_table = qman_portal_ids,
|
||||
},
|
||||
.probe = qman_portal_probe,
|
||||
};
|
||||
|
||||
static int __init qman_portal_driver_register(struct platform_driver *drv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = platform_driver_register(drv);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
||||
"soc/qman_portal:online",
|
||||
qman_online_cpu, qman_offline_cpu);
|
||||
if (ret < 0) {
|
||||
pr_err("qman: failed to register hotplug callbacks.\n");
|
||||
platform_driver_unregister(drv);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_driver(qman_portal_driver,
|
||||
qman_portal_driver_register, platform_driver_unregister);
|
||||
#else /* __rtems__ */
|
||||
#include <bsp/fdt.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
static struct qm_portal_config qman_configs[NR_CPUS];
|
||||
static void
|
||||
qman_init_portals(void)
|
||||
|
||||
void
|
||||
qman_sysinit_portals(void)
|
||||
{
|
||||
const char *fdt = bsp_fdt_get();
|
||||
struct device_node dn;
|
||||
@ -575,7 +418,7 @@ qman_init_portals(void)
|
||||
struct qm_portal_config *pcfg = &qman_configs[cpu];
|
||||
struct qman_portal *portal;
|
||||
struct resource res;
|
||||
const u32 *channel;
|
||||
u32 val;
|
||||
|
||||
if (node < 0)
|
||||
panic("qman: missing portal in FDT");
|
||||
@ -600,197 +443,29 @@ qman_init_portals(void)
|
||||
BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
|
||||
(uintptr_t)&qoriq_qman_portal[2][0]);
|
||||
|
||||
pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL);
|
||||
if (pcfg->public_cfg.irq == NO_IRQ)
|
||||
ret = of_property_read_u32(&dn, "cell-index", &val);
|
||||
if (ret != 0)
|
||||
panic("qman: no cell-index");
|
||||
pcfg->channel = val;
|
||||
|
||||
pcfg->irq = of_irq_to_resource(&dn, 0, NULL);
|
||||
if (pcfg->irq == NO_IRQ)
|
||||
panic("qman: no portal interrupt");
|
||||
|
||||
channel = of_get_property(&dn, "fsl,qman-channel-id", &ret);
|
||||
if (channel == NULL || ret != 4)
|
||||
panic("qman: no portal channel ID");
|
||||
pcfg->public_cfg.channel = *channel;
|
||||
|
||||
pcfg->public_cfg.cpu = cpu;
|
||||
pcfg->public_cfg.pools = pools_sdqcr;
|
||||
pcfg->cpu = cpu;
|
||||
pcfg->pools = qm_get_pools_sdqcr();
|
||||
|
||||
portal = init_pcfg(pcfg);
|
||||
if (portal == NULL)
|
||||
panic("qman: cannot create portal");
|
||||
|
||||
qman_portal_update_sdest(pcfg, cpu);
|
||||
|
||||
node = fdt_next_subnode(fdt, node);
|
||||
dn.offset = node;
|
||||
}
|
||||
|
||||
/* all assigned portals are initialized now */
|
||||
qman_init_cgr_all();
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
#ifndef __rtems__
|
||||
__init int qman_init(void)
|
||||
{
|
||||
struct cpumask slave_cpus;
|
||||
struct cpumask unshared_cpus = *cpu_none_mask;
|
||||
struct cpumask shared_cpus = *cpu_none_mask;
|
||||
LIST_HEAD(unshared_pcfgs);
|
||||
LIST_HEAD(shared_pcfgs);
|
||||
struct device_node *dn;
|
||||
struct qm_portal_config *pcfg;
|
||||
struct qman_portal *p;
|
||||
int cpu, ret;
|
||||
struct cpumask offline_cpus;
|
||||
|
||||
/* Initialise the QMan (CCSR) device */
|
||||
for_each_compatible_node(dn, NULL, "fsl,qman") {
|
||||
if (!qman_init_ccsr(dn))
|
||||
pr_info("Err interrupt handler present\n");
|
||||
else
|
||||
pr_err("CCSR setup failed\n");
|
||||
}
|
||||
#else /* __rtems__ */
|
||||
int
|
||||
qman_init(struct device_node *dn)
|
||||
{
|
||||
struct device_node of_dns;
|
||||
int ret;
|
||||
#endif /* __rtems__ */
|
||||
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
|
||||
/* Setup lookup table for FQ demux */
|
||||
ret = qman_setup_fq_lookup_table(qman_fqd_size()/64);
|
||||
if (ret)
|
||||
return ret;
|
||||
#endif
|
||||
|
||||
/* Get qman ip revision */
|
||||
qman_get_ip_revision(dn);
|
||||
if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
|
||||
qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
|
||||
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
|
||||
qm_channel_pme = QMAN_CHANNEL_PME_REV3;
|
||||
}
|
||||
|
||||
/* Parse pool channels into the SDQCR mask. (Must happen before portals
|
||||
* are initialised.) */
|
||||
for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
|
||||
ret = fsl_pool_channel_range_sdqcr(dn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
|
||||
/* Initialise portals. See bman_driver.c for comments */
|
||||
for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
|
||||
if (!of_device_is_available(dn))
|
||||
continue;
|
||||
pcfg = parse_pcfg(dn);
|
||||
if (pcfg) {
|
||||
pcfg->public_cfg.pools = pools_sdqcr;
|
||||
list_add_tail(&pcfg->list, &unused_pcfgs);
|
||||
}
|
||||
}
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpumask_test_cpu(cpu, &want_shared)) {
|
||||
pcfg = get_pcfg(&unused_pcfgs);
|
||||
if (!pcfg)
|
||||
break;
|
||||
pcfg->public_cfg.cpu = cpu;
|
||||
list_add_tail(&pcfg->list, &shared_pcfgs);
|
||||
cpumask_set_cpu(cpu, &shared_cpus);
|
||||
}
|
||||
if (cpumask_test_cpu(cpu, &want_unshared)) {
|
||||
if (cpumask_test_cpu(cpu, &shared_cpus))
|
||||
continue;
|
||||
pcfg = get_pcfg(&unused_pcfgs);
|
||||
if (!pcfg)
|
||||
break;
|
||||
pcfg->public_cfg.cpu = cpu;
|
||||
list_add_tail(&pcfg->list, &unshared_pcfgs);
|
||||
cpumask_set_cpu(cpu, &unshared_cpus);
|
||||
}
|
||||
}
|
||||
if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
pcfg = get_pcfg(&unused_pcfgs);
|
||||
if (!pcfg)
|
||||
break;
|
||||
pcfg->public_cfg.cpu = cpu;
|
||||
list_add_tail(&pcfg->list, &unshared_pcfgs);
|
||||
cpumask_set_cpu(cpu, &unshared_cpus);
|
||||
}
|
||||
}
|
||||
cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
|
||||
cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
|
||||
if (cpumask_empty(&slave_cpus)) {
|
||||
if (!list_empty(&shared_pcfgs)) {
|
||||
cpumask_or(&unshared_cpus, &unshared_cpus,
|
||||
&shared_cpus);
|
||||
cpumask_clear(&shared_cpus);
|
||||
list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
|
||||
INIT_LIST_HEAD(&shared_pcfgs);
|
||||
}
|
||||
} else {
|
||||
if (list_empty(&shared_pcfgs)) {
|
||||
pcfg = get_pcfg(&unshared_pcfgs);
|
||||
if (!pcfg) {
|
||||
pr_crit("No portals available!\n");
|
||||
return 0;
|
||||
}
|
||||
cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
|
||||
cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
|
||||
list_add_tail(&pcfg->list, &shared_pcfgs);
|
||||
}
|
||||
}
|
||||
list_for_each_entry(pcfg, &unshared_pcfgs, list) {
|
||||
pcfg->public_cfg.is_shared = 0;
|
||||
p = init_pcfg(pcfg);
|
||||
}
|
||||
list_for_each_entry(pcfg, &shared_pcfgs, list) {
|
||||
pcfg->public_cfg.is_shared = 1;
|
||||
p = init_pcfg(pcfg);
|
||||
if (p)
|
||||
shared_portals[num_shared_portals++] = p;
|
||||
}
|
||||
if (!cpumask_empty(&slave_cpus))
|
||||
for_each_cpu(cpu, &slave_cpus)
|
||||
init_slave(cpu);
|
||||
#else /* __rtems__ */
|
||||
qman_init_portals();
|
||||
#endif /* __rtems__ */
|
||||
pr_info("Portals initialised\n");
|
||||
#ifndef __rtems__
|
||||
cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
|
||||
for_each_cpu(cpu, &offline_cpus)
|
||||
qman_offline_cpu(cpu);
|
||||
#endif /* __rtems__ */
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
__init int qman_resource_init(void)
|
||||
{
|
||||
#ifdef __rtems__
|
||||
struct device_node of_dns;
|
||||
#endif /* __rtems__ */
|
||||
struct device_node *dn;
|
||||
int ret;
|
||||
|
||||
/* Initialise FQID allocation ranges */
|
||||
for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
|
||||
ret = fsl_fqid_range_init(dn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/* Initialise CGRID allocation ranges */
|
||||
for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
|
||||
ret = fsl_cgrid_range_init(dn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/* Parse pool channels into the allocator. (Must happen after portals
|
||||
* are initialised.) */
|
||||
for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
|
||||
ret = fsl_pool_channel_range_init(dn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -34,93 +34,213 @@
|
||||
|
||||
#include <soc/fsl/qman.h>
|
||||
#include <linux/iommu.h>
|
||||
|
||||
#if defined(CONFIG_FSL_PAMU)
|
||||
#include <asm/fsl_pamu_stash.h>
|
||||
#endif
|
||||
|
||||
struct qm_mcr_querywq {
|
||||
u8 verb;
|
||||
u8 result;
|
||||
u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
|
||||
u8 __reserved[28];
|
||||
u32 wq_len[8];
|
||||
} __packed;
|
||||
|
||||
static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
|
||||
{
|
||||
return wq->channel_wq >> 3;
|
||||
}
|
||||
|
||||
struct __qm_mcr_querycongestion {
|
||||
u32 state[8];
|
||||
};
|
||||
|
||||
/* "Query Congestion Group State" */
|
||||
struct qm_mcr_querycongestion {
|
||||
u8 verb;
|
||||
u8 result;
|
||||
u8 __reserved[30];
|
||||
/* Access this struct using qman_cgrs_get() */
|
||||
struct __qm_mcr_querycongestion state;
|
||||
} __packed;
|
||||
|
||||
/* "Query CGR" */
|
||||
struct qm_mcr_querycgr {
|
||||
u8 verb;
|
||||
u8 result;
|
||||
u16 __reserved1;
|
||||
struct __qm_mc_cgr cgr; /* CGR fields */
|
||||
u8 __reserved2[6];
|
||||
u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
|
||||
__be32 i_bcnt_lo; /* low 32-bits of 40-bit */
|
||||
u8 __reserved3[3];
|
||||
u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
|
||||
__be32 a_bcnt_lo; /* low 32-bits of 40-bit */
|
||||
__be32 cscn_targ_swp[4];
|
||||
} __packed;
|
||||
|
||||
static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
|
||||
{
|
||||
return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
|
||||
}
|
||||
static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
|
||||
{
|
||||
return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
|
||||
}
|
||||
|
||||
/* "Query FQ Non-Programmable Fields" */
|
||||
|
||||
struct qm_mcr_queryfq_np {
|
||||
u8 verb;
|
||||
u8 result;
|
||||
u8 __reserved1;
|
||||
u8 state; /* QM_MCR_NP_STATE_*** */
|
||||
u32 fqd_link; /* 24-bit, _res2[24-31] */
|
||||
u16 odp_seq; /* 14-bit, _res3[14-15] */
|
||||
u16 orp_nesn; /* 14-bit, _res4[14-15] */
|
||||
u16 orp_ea_hseq; /* 15-bit, _res5[15] */
|
||||
u16 orp_ea_tseq; /* 15-bit, _res6[15] */
|
||||
u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
|
||||
u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
|
||||
u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
|
||||
u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
|
||||
u8 __reserved2[5];
|
||||
u8 is; /* 1-bit, _res12[1-7] */
|
||||
u16 ics_surp;
|
||||
u32 byte_cnt;
|
||||
u32 frm_cnt; /* 24-bit, _res13[24-31] */
|
||||
u32 __reserved3;
|
||||
u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
|
||||
u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
|
||||
u16 __reserved4;
|
||||
u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
|
||||
u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
|
||||
u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
|
||||
} __packed;
|
||||
|
||||
#define QM_MCR_NP_STATE_FE 0x10
|
||||
#define QM_MCR_NP_STATE_R 0x08
|
||||
#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
|
||||
#define QM_MCR_NP_STATE_OOS 0x00
|
||||
#define QM_MCR_NP_STATE_RETIRED 0x01
|
||||
#define QM_MCR_NP_STATE_TEN_SCHED 0x02
|
||||
#define QM_MCR_NP_STATE_TRU_SCHED 0x03
|
||||
#define QM_MCR_NP_STATE_PARKED 0x04
|
||||
#define QM_MCR_NP_STATE_ACTIVE 0x05
|
||||
#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
|
||||
#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
|
||||
#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
|
||||
#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
|
||||
#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
|
||||
|
||||
enum qm_mcr_queryfq_np_masks {
|
||||
qm_mcr_fqd_link_mask = BIT(24)-1,
|
||||
qm_mcr_odp_seq_mask = BIT(14)-1,
|
||||
qm_mcr_orp_nesn_mask = BIT(14)-1,
|
||||
qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
|
||||
qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
|
||||
qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
|
||||
qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
|
||||
qm_mcr_pfdr_hptr_mask = BIT(24)-1,
|
||||
qm_mcr_pfdr_tptr_mask = BIT(24)-1,
|
||||
qm_mcr_is_mask = BIT(1)-1,
|
||||
qm_mcr_frm_cnt_mask = BIT(24)-1,
|
||||
};
|
||||
#define qm_mcr_np_get(np, field) \
|
||||
((np)->field & (qm_mcr_##field##_mask))
|
||||
|
||||
/* Congestion Groups */
|
||||
|
||||
/* This wrapper represents a bit-array for the state of the 256 QMan congestion
|
||||
/*
|
||||
* This wrapper represents a bit-array for the state of the 256 QMan congestion
|
||||
* groups. Is also used as a *mask* for congestion groups, eg. so we ignore
|
||||
* those that don't concern us. We harness the structure and accessor details
|
||||
* already used in the management command to query congestion groups.
|
||||
*/
|
||||
#define CGR_BITS_PER_WORD 5
|
||||
#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
|
||||
#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
|
||||
#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
|
||||
|
||||
struct qman_cgrs {
|
||||
struct __qm_mcr_querycongestion q;
|
||||
};
|
||||
|
||||
static inline void qman_cgrs_init(struct qman_cgrs *c)
|
||||
{
|
||||
memset(c, 0, sizeof(*c));
|
||||
}
|
||||
|
||||
static inline void qman_cgrs_fill(struct qman_cgrs *c)
|
||||
{
|
||||
memset(c, 0xff, sizeof(*c));
|
||||
}
|
||||
static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
|
||||
|
||||
static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
|
||||
{
|
||||
return QM_MCR_QUERYCONGESTION(&c->q, num);
|
||||
}
|
||||
static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
|
||||
{
|
||||
c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
|
||||
}
|
||||
static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
|
||||
{
|
||||
c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
|
||||
}
|
||||
static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
|
||||
{
|
||||
while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
|
||||
;
|
||||
return num;
|
||||
return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
|
||||
}
|
||||
|
||||
static inline void qman_cgrs_cp(struct qman_cgrs *dest,
|
||||
const struct qman_cgrs *src)
|
||||
{
|
||||
*dest = *src;
|
||||
}
|
||||
|
||||
static inline void qman_cgrs_and(struct qman_cgrs *dest,
|
||||
const struct qman_cgrs *a, const struct qman_cgrs *b)
|
||||
{
|
||||
int ret;
|
||||
u32 *_d = dest->q.__state;
|
||||
const u32 *_a = a->q.__state;
|
||||
const u32 *_b = b->q.__state;
|
||||
u32 *_d = dest->q.state;
|
||||
const u32 *_a = a->q.state;
|
||||
const u32 *_b = b->q.state;
|
||||
|
||||
for (ret = 0; ret < 8; ret++)
|
||||
*(_d++) = *(_a++) & *(_b++);
|
||||
*_d++ = *_a++ & *_b++;
|
||||
}
|
||||
|
||||
static inline void qman_cgrs_xor(struct qman_cgrs *dest,
|
||||
const struct qman_cgrs *a, const struct qman_cgrs *b)
|
||||
{
|
||||
int ret;
|
||||
u32 *_d = dest->q.__state;
|
||||
const u32 *_a = a->q.__state;
|
||||
const u32 *_b = b->q.__state;
|
||||
u32 *_d = dest->q.state;
|
||||
const u32 *_a = a->q.state;
|
||||
const u32 *_b = b->q.state;
|
||||
|
||||
for (ret = 0; ret < 8; ret++)
|
||||
*(_d++) = *(_a++) ^ *(_b++);
|
||||
*_d++ = *_a++ ^ *_b++;
|
||||
}
|
||||
|
||||
/* used by CCSR and portal interrupt code */
|
||||
enum qm_isr_reg {
|
||||
qm_isr_status = 0,
|
||||
qm_isr_enable = 1,
|
||||
qm_isr_disable = 2,
|
||||
qm_isr_inhibit = 3
|
||||
};
|
||||
void qman_init_cgr_all(void);
|
||||
|
||||
struct qm_portal_config {
|
||||
/* Corenet portal addresses;
|
||||
* [0]==cache-enabled, [1]==cache-inhibited. */
|
||||
__iomem void *addr_virt[2];
|
||||
/*
|
||||
* Corenet portal addresses;
|
||||
* [0]==cache-enabled, [1]==cache-inhibited.
|
||||
*/
|
||||
void __iomem *addr_virt[2];
|
||||
#ifndef __rtems__
|
||||
struct resource addr_phys[2];
|
||||
struct device dev;
|
||||
struct device *dev;
|
||||
struct iommu_domain *iommu_domain;
|
||||
/* Allow these to be joined in lists */
|
||||
struct list_head list;
|
||||
#endif /* __rtems__ */
|
||||
/* User-visible portal configuration settings */
|
||||
struct qman_portal_config public_cfg;
|
||||
/* portal is affined to this cpu */
|
||||
int cpu;
|
||||
/* portal interrupt line */
|
||||
int irq;
|
||||
/*
|
||||
* the portal's dedicated channel id, used initialising
|
||||
* frame queues to target this portal when scheduled
|
||||
*/
|
||||
u16 channel;
|
||||
/*
|
||||
* mask of pool channels this portal has dequeue access to
|
||||
* (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
|
||||
*/
|
||||
u32 pools;
|
||||
};
|
||||
|
||||
/* Revision info (for errata and feature handling) */
|
||||
@ -131,57 +251,70 @@ struct qm_portal_config {
|
||||
#define QMAN_REV31 0x0301
|
||||
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
|
||||
|
||||
extern u16 qman_portal_max;
|
||||
#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
|
||||
extern struct gen_pool *qm_fqalloc; /* FQID allocator */
|
||||
extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
|
||||
extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
|
||||
u32 qm_get_pools_sdqcr(void);
|
||||
|
||||
#ifdef CONFIG_FSL_QMAN_CONFIG
|
||||
/* Hooks from qman_driver.c to qman_config.c */
|
||||
int qman_init_ccsr(struct device_node *node);
|
||||
int qman_wq_alloc(void);
|
||||
void qman_liodn_fixup(u16 channel);
|
||||
int qman_set_sdest(u16 channel, unsigned int cpu_idx);
|
||||
size_t qman_fqd_size(void);
|
||||
#endif
|
||||
|
||||
int qm_set_wpm(int wpm);
|
||||
int qm_get_wpm(int *wpm);
|
||||
|
||||
/* Hooks from qman_driver.c in to qman_high.c */
|
||||
struct qman_portal *qman_create_portal(
|
||||
struct qman_portal *portal,
|
||||
const struct qm_portal_config *config,
|
||||
const struct qman_cgrs *cgrs);
|
||||
void qman_set_sdest(u16 channel, unsigned int cpu_idx);
|
||||
|
||||
struct qman_portal *qman_create_affine_portal(
|
||||
const struct qm_portal_config *config,
|
||||
const struct qman_cgrs *cgrs);
|
||||
struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
|
||||
int cpu);
|
||||
const struct qm_portal_config *qman_destroy_affine_portal(void);
|
||||
void qman_destroy_portal(struct qman_portal *qm);
|
||||
|
||||
/* This CGR feature is supported by h/w and required by unit-tests and the
|
||||
* debugfs hooks, so is implemented in the driver. However it allows an explicit
|
||||
* corruption of h/w fields by s/w that are usually incorruptible (because the
|
||||
* counters are usually maintained entirely within h/w). As such, we declare
|
||||
* this API internally. */
|
||||
int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
|
||||
struct qm_mcr_cgrtestwrite *result);
|
||||
/*
|
||||
* qman_query_fq - Queries FQD fields (via h/w query command)
|
||||
* @fq: the frame queue object to be queried
|
||||
* @fqd: storage for the queried FQD fields
|
||||
*/
|
||||
int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
|
||||
|
||||
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
|
||||
/* If the fq object pointer is greater than the size of context_b field,
|
||||
* than a lookup table is required. */
|
||||
int qman_setup_fq_lookup_table(size_t num_entries);
|
||||
#endif
|
||||
/*
|
||||
* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
|
||||
* NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
|
||||
* FQID(n) to fill in the frame queue ID.
|
||||
*/
|
||||
#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
|
||||
#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
|
||||
#define QM_VDQCR_EXACT 0x40000000
|
||||
#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
|
||||
#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
|
||||
#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
|
||||
#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
|
||||
|
||||
#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
|
||||
#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
|
||||
#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
|
||||
|
||||
/*
|
||||
* qman_volatile_dequeue - Issue a volatile dequeue command
|
||||
* @fq: the frame queue object to dequeue from
|
||||
* @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
|
||||
* @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
|
||||
*
|
||||
* Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
|
||||
* The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
|
||||
* the VDQCR is already in use, otherwise returns non-zero for failure. If
|
||||
* QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
|
||||
* the VDQCR command has finished executing (ie. once the callback for the last
|
||||
* DQRR entry resulting from the VDQCR command has been called). If not using
|
||||
* the FINISH flag, completion can be determined either by detecting the
|
||||
* presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
|
||||
* in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
|
||||
* for the QMAN_FQ_STATE_VDQCR bit to disappear.
|
||||
*/
|
||||
int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
|
||||
|
||||
int qman_alloc_fq_table(u32 num_fqids);
|
||||
|
||||
/*************************************************/
|
||||
/* QMan s/w corenet portal, low-level i/face */
|
||||
/*************************************************/
|
||||
|
||||
/* Note: most functions are only used by the high-level interface, so are
|
||||
* inlined from qman.h. The stuff below is for use by other parts of the
|
||||
* driver. */
|
||||
|
||||
/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
|
||||
/*
|
||||
* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
|
||||
* dequeue TYPE. Choose TOKEN (8-bit).
|
||||
* If SOURCE == CHANNELS,
|
||||
* Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
|
||||
@ -216,42 +349,8 @@ int qman_setup_fq_lookup_table(size_t num_entries);
|
||||
#define QM_VDQCR_FQID_MASK 0x00ffffff
|
||||
#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
|
||||
|
||||
/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
|
||||
* If MODE==SCHEDULED
|
||||
* Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
|
||||
* If CHANNELS,
|
||||
* Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
|
||||
* You can choose DEDICATED_PRECEDENCE if the portal channel should have
|
||||
* priority.
|
||||
* If SPECIFICWQ,
|
||||
* Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
|
||||
* channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
|
||||
* work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
|
||||
* same value.
|
||||
* If MODE==UNSCHEDULED
|
||||
* Choose FQID().
|
||||
*/
|
||||
#define QM_PDQCR_MODE_SCHEDULED 0x0
|
||||
#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
|
||||
#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
|
||||
#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
|
||||
#define QM_PDQCR_COUNT_EXACT1 0x0
|
||||
#define QM_PDQCR_COUNT_UPTO3 0x20000000
|
||||
#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
|
||||
#define QM_PDQCR_TYPE_MASK 0x03000000
|
||||
#define QM_PDQCR_TYPE_NULL 0x0
|
||||
#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
|
||||
#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
|
||||
#define QM_PDQCR_TYPE_ACTIVE 0x03000000
|
||||
#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
|
||||
#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
|
||||
#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
|
||||
#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
|
||||
#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
|
||||
#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
|
||||
#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
|
||||
|
||||
/* Used by all portal interrupt registers except 'inhibit'
|
||||
/*
|
||||
* Used by all portal interrupt registers except 'inhibit'
|
||||
* Channels with frame availability
|
||||
*/
|
||||
#define QM_PIRQ_DQAVAIL 0x0000ffff
|
||||
@ -263,31 +362,10 @@ int qman_setup_fq_lookup_table(size_t num_entries);
|
||||
/* This mask contains all the "irqsource" bits visible to API users */
|
||||
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
|
||||
|
||||
/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
|
||||
* the disable register" rather than "disable the ability to write". */
|
||||
#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
|
||||
#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
|
||||
#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
|
||||
#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
|
||||
#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
|
||||
#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
|
||||
/* TODO: unfortunate name-clash here, reword? */
|
||||
#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
|
||||
#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
|
||||
|
||||
#ifdef CONFIG_FSL_QMAN_CONFIG
|
||||
int qman_have_ccsr(void);
|
||||
#else
|
||||
#define qman_have_ccsr 0
|
||||
#endif
|
||||
|
||||
#ifndef __rtems__
|
||||
__init int qman_init(void);
|
||||
#else /* __rtems__ */
|
||||
int qman_init(struct device_node *dn);
|
||||
#endif /* __rtems__ */
|
||||
__init int qman_resource_init(void);
|
||||
|
||||
extern void *affine_portals[NR_CPUS];
|
||||
extern struct qman_portal *affine_portals[NR_CPUS];
|
||||
extern struct qman_portal *qman_dma_portal;
|
||||
const struct qm_portal_config *qman_get_qm_portal_config(
|
||||
struct qman_portal *portal);
|
||||
#ifdef __rtems__
|
||||
void qman_sysinit_portals(void);
|
||||
#endif /* __rtems__ */
|
||||
|
@ -1,61 +0,0 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "qman_test.h"
|
||||
|
||||
MODULE_AUTHOR("Geoff Thorpe");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_DESCRIPTION("QMan testing");
|
||||
|
||||
static int test_init(void)
|
||||
{
|
||||
int loop = 1;
|
||||
|
||||
while (loop--) {
|
||||
#ifdef CONFIG_FSL_QMAN_TEST_STASH
|
||||
qman_test_stash();
|
||||
#endif
|
||||
#ifdef CONFIG_FSL_QMAN_TEST_API
|
||||
qman_test_api();
|
||||
#endif
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void test_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
module_init(test_init);
|
||||
module_exit(test_exit);
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -28,17 +28,9 @@
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include "qman_priv.h"
|
||||
|
||||
#include <soc/fsl/qman.h>
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
void qman_test_stash(void);
|
||||
void qman_test_api(void);
|
||||
int qman_test_stash(void);
|
||||
int qman_test_api(void);
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -34,10 +34,6 @@
|
||||
|
||||
#include "qman_test.h"
|
||||
|
||||
/*************/
|
||||
/* constants */
|
||||
/*************/
|
||||
|
||||
#define CGR_ID 27
|
||||
#define POOL_ID 2
|
||||
#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
|
||||
@ -51,21 +47,13 @@
|
||||
#define PORTAL_OPAQUE ((void *)0xf00dbeef)
|
||||
#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
|
||||
|
||||
/*************************************/
|
||||
/* Predeclarations (eg. for fq_base) */
|
||||
/*************************************/
|
||||
|
||||
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
|
||||
struct qman_fq *,
|
||||
const struct qm_dqrr_entry *);
|
||||
static void cb_ern(struct qman_portal *, struct qman_fq *,
|
||||
const struct qm_mr_entry *);
|
||||
const union qm_mr_entry *);
|
||||
static void cb_fqs(struct qman_portal *, struct qman_fq *,
|
||||
const struct qm_mr_entry *);
|
||||
|
||||
/***************/
|
||||
/* global vars */
|
||||
/***************/
|
||||
const union qm_mr_entry *);
|
||||
|
||||
static struct qm_fd fd, fd_dq;
|
||||
static struct qman_fq fq_base = {
|
||||
@ -76,67 +64,68 @@ static struct qman_fq fq_base = {
|
||||
static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
|
||||
static int retire_complete, sdqcr_complete;
|
||||
|
||||
/**********************/
|
||||
/* internal functions */
|
||||
/**********************/
|
||||
|
||||
/* Helpers for initialising and "incrementing" a frame descriptor */
|
||||
static void fd_init(struct qm_fd *__fd)
|
||||
static void fd_init(struct qm_fd *fd)
|
||||
{
|
||||
qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
|
||||
__fd->format = qm_fd_contig_big;
|
||||
__fd->length29 = 0x0000ffff;
|
||||
__fd->cmd = 0xfeedf00d;
|
||||
qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
|
||||
qm_fd_set_contig_big(fd, 0x0000ffff);
|
||||
fd->cmd = cpu_to_be32(0xfeedf00d);
|
||||
}
|
||||
|
||||
static void fd_inc(struct qm_fd *__fd)
|
||||
static void fd_inc(struct qm_fd *fd)
|
||||
{
|
||||
u64 t = qm_fd_addr_get64(__fd);
|
||||
u64 t = qm_fd_addr_get64(fd);
|
||||
int z = t >> 40;
|
||||
unsigned int len, off;
|
||||
enum qm_fd_format fmt;
|
||||
|
||||
t <<= 1;
|
||||
if (z)
|
||||
t |= 1;
|
||||
qm_fd_addr_set64(__fd, t);
|
||||
__fd->length29--;
|
||||
__fd->cmd++;
|
||||
qm_fd_addr_set64(fd, t);
|
||||
|
||||
fmt = qm_fd_get_format(fd);
|
||||
off = qm_fd_get_offset(fd);
|
||||
len = qm_fd_get_length(fd);
|
||||
len--;
|
||||
qm_fd_set_param(fd, fmt, off, len);
|
||||
|
||||
fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
|
||||
}
|
||||
|
||||
/* The only part of the 'fd' we can't memcmp() is the ppid */
|
||||
static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
|
||||
static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
|
||||
{
|
||||
int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
|
||||
bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
|
||||
|
||||
if (!r)
|
||||
r = a->format - b->format;
|
||||
if (!r)
|
||||
r = a->opaque - b->opaque;
|
||||
if (!r)
|
||||
r = a->cmd - b->cmd;
|
||||
return r;
|
||||
neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
|
||||
neq |= a->cfg != b->cfg;
|
||||
neq |= a->cmd != b->cmd;
|
||||
|
||||
return neq;
|
||||
}
|
||||
|
||||
/********/
|
||||
/* test */
|
||||
/********/
|
||||
|
||||
static void do_enqueues(struct qman_fq *fq)
|
||||
static int do_enqueues(struct qman_fq *fq)
|
||||
{
|
||||
unsigned int loop;
|
||||
int err = 0;
|
||||
|
||||
for (loop = 0; loop < NUM_ENQUEUES; loop++) {
|
||||
if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
|
||||
(((loop + 1) == NUM_ENQUEUES) ?
|
||||
QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
|
||||
panic("qman_enqueue() failed\n");
|
||||
if (qman_enqueue(fq, &fd)) {
|
||||
pr_crit("qman_enqueue() failed\n");
|
||||
err = -EIO;
|
||||
}
|
||||
fd_inc(&fd);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void qman_test_api(void)
|
||||
int qman_test_api(void)
|
||||
{
|
||||
u32 flags;
|
||||
int res;
|
||||
u32 flags, frmcnt;
|
||||
int err;
|
||||
struct qman_fq *fq = &fq_base;
|
||||
|
||||
pr_info("%s(): Starting\n", __func__);
|
||||
@ -144,57 +133,93 @@ void qman_test_api(void)
|
||||
fd_init(&fd_dq);
|
||||
|
||||
/* Initialise (parked) FQ */
|
||||
if (qman_create_fq(0, FQ_FLAGS, fq))
|
||||
panic("qman_create_fq() failed\n");
|
||||
if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
|
||||
panic("qman_init_fq() failed\n");
|
||||
|
||||
err = qman_create_fq(0, FQ_FLAGS, fq);
|
||||
if (err) {
|
||||
pr_crit("qman_create_fq() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
|
||||
if (err) {
|
||||
pr_crit("qman_init_fq() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
/* Do enqueues + VDQCR, twice. (Parked FQ) */
|
||||
do_enqueues(fq);
|
||||
err = do_enqueues(fq);
|
||||
if (err)
|
||||
goto failed;
|
||||
pr_info("VDQCR (till-empty);\n");
|
||||
if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
|
||||
QM_VDQCR_NUMFRAMES_TILLEMPTY))
|
||||
panic("qman_volatile_dequeue() failed\n");
|
||||
do_enqueues(fq);
|
||||
frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
|
||||
err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
|
||||
if (err) {
|
||||
pr_crit("qman_volatile_dequeue() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
err = do_enqueues(fq);
|
||||
if (err)
|
||||
goto failed;
|
||||
pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
|
||||
if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
|
||||
QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
|
||||
panic("qman_volatile_dequeue() failed\n");
|
||||
frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
|
||||
err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
|
||||
if (err) {
|
||||
pr_crit("qman_volatile_dequeue() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
|
||||
NUM_ENQUEUES);
|
||||
if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
|
||||
QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
|
||||
panic("qman_volatile_dequeue() failed\n");
|
||||
frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
|
||||
err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
|
||||
if (err) {
|
||||
pr_err("qman_volatile_dequeue() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
do_enqueues(fq);
|
||||
err = do_enqueues(fq);
|
||||
if (err)
|
||||
goto failed;
|
||||
pr_info("scheduled dequeue (till-empty)\n");
|
||||
if (qman_schedule_fq(fq))
|
||||
panic("qman_schedule_fq() failed\n");
|
||||
err = qman_schedule_fq(fq);
|
||||
if (err) {
|
||||
pr_crit("qman_schedule_fq() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
wait_event(waitqueue, sdqcr_complete);
|
||||
|
||||
/* Retire and OOS the FQ */
|
||||
res = qman_retire_fq(fq, &flags);
|
||||
if (res < 0)
|
||||
panic("qman_retire_fq() failed\n");
|
||||
err = qman_retire_fq(fq, &flags);
|
||||
if (err < 0) {
|
||||
pr_crit("qman_retire_fq() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
wait_event(waitqueue, retire_complete);
|
||||
if (flags & QMAN_FQ_STATE_BLOCKOOS)
|
||||
panic("leaking frames\n");
|
||||
if (qman_oos_fq(fq))
|
||||
panic("qman_oos_fq() failed\n");
|
||||
qman_destroy_fq(fq, 0);
|
||||
if (flags & QMAN_FQ_STATE_BLOCKOOS) {
|
||||
err = -EIO;
|
||||
pr_crit("leaking frames\n");
|
||||
goto failed;
|
||||
}
|
||||
err = qman_oos_fq(fq);
|
||||
if (err) {
|
||||
pr_crit("qman_oos_fq() failed\n");
|
||||
goto failed;
|
||||
}
|
||||
qman_destroy_fq(fq);
|
||||
pr_info("%s(): Finished\n", __func__);
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
WARN_ON(1);
|
||||
return err;
|
||||
}
|
||||
|
||||
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
|
||||
struct qman_fq *fq,
|
||||
const struct qm_dqrr_entry *dq)
|
||||
{
|
||||
if (fd_cmp(&fd_dq, &dq->fd)) {
|
||||
if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
|
||||
pr_err("BADNESS: dequeued frame doesn't match;\n");
|
||||
BUG();
|
||||
return qman_cb_dqrr_consume;
|
||||
}
|
||||
fd_inc(&fd_dq);
|
||||
if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
|
||||
if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
|
||||
sdqcr_complete = 1;
|
||||
wake_up(&waitqueue);
|
||||
}
|
||||
@ -202,18 +227,22 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
|
||||
}
|
||||
|
||||
static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
|
||||
const struct qm_mr_entry *msg)
|
||||
const union qm_mr_entry *msg)
|
||||
{
|
||||
panic("cb_ern() unimplemented");
|
||||
pr_crit("cb_ern() unimplemented");
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
|
||||
const struct qm_mr_entry *msg)
|
||||
const union qm_mr_entry *msg)
|
||||
{
|
||||
u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
|
||||
|
||||
if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
|
||||
panic("unexpected FQS message");
|
||||
if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
|
||||
pr_crit("unexpected FQS message");
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
#ifndef __rtems__
|
||||
pr_info("Retirement message received\n");
|
||||
#endif /* __rtems__ */
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -35,14 +35,15 @@
|
||||
#include "qman_test.h"
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
#ifdef __rtems__
|
||||
#include <rtems/malloc.h>
|
||||
#undef msleep
|
||||
#define msleep(x) usleep((x) * 1000)
|
||||
#define L1_CACHE_BYTES 64
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/* Algorithm:
|
||||
/*
|
||||
* Algorithm:
|
||||
*
|
||||
* Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
|
||||
* an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
|
||||
@ -86,23 +87,28 @@
|
||||
* initialisation targets the correct cpu.
|
||||
*/
|
||||
|
||||
/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
|
||||
* the fn from irq context, which is too restrictive). */
|
||||
/*
|
||||
* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
|
||||
* the fn from irq context, which is too restrictive).
|
||||
*/
|
||||
struct bstrap {
|
||||
void (*fn)(void);
|
||||
int (*fn)(void);
|
||||
atomic_t started;
|
||||
};
|
||||
static int bstrap_fn(void *__bstrap)
|
||||
static int bstrap_fn(void *bs)
|
||||
{
|
||||
struct bstrap *bstrap = __bstrap;
|
||||
struct bstrap *bstrap = bs;
|
||||
int err;
|
||||
|
||||
atomic_inc(&bstrap->started);
|
||||
bstrap->fn();
|
||||
err = bstrap->fn();
|
||||
if (err)
|
||||
return err;
|
||||
while (!kthread_should_stop())
|
||||
msleep(1);
|
||||
msleep(20);
|
||||
return 0;
|
||||
}
|
||||
static int on_all_cpus(void (*fn)(void))
|
||||
static int on_all_cpus(int (*fn)(void))
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@ -127,12 +133,14 @@ static int on_all_cpus(void (*fn)(void))
|
||||
return -ENOMEM;
|
||||
kthread_bind(k, cpu);
|
||||
wake_up_process(k);
|
||||
/* If we call kthread_stop() before the "wake up" has had an
|
||||
/*
|
||||
* If we call kthread_stop() before the "wake up" has had an
|
||||
* effect, then the thread may exit with -EINTR without ever
|
||||
* running the function. So poll until it's started before
|
||||
* requesting it to stop. */
|
||||
* requesting it to stop.
|
||||
*/
|
||||
while (!atomic_read(&bstrap.started))
|
||||
msleep(10);
|
||||
msleep(20);
|
||||
ret = kthread_stop(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -172,8 +180,10 @@ struct hp_cpu {
|
||||
struct list_head handlers;
|
||||
/* list node for linking us into 'hp_cpu_list' */
|
||||
struct list_head node;
|
||||
/* when repeatedly scanning 'hp_list', each time linking the n'th
|
||||
* handlers together, this is used as per-cpu iterator state */
|
||||
/*
|
||||
* when repeatedly scanning 'hp_list', each time linking the n'th
|
||||
* handlers together, this is used as per-cpu iterator state
|
||||
*/
|
||||
struct hp_handler *iterator;
|
||||
};
|
||||
|
||||
@ -182,7 +192,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
|
||||
|
||||
/* links together the hp_cpu structs, in first-come first-serve order. */
|
||||
static LIST_HEAD(hp_cpu_list);
|
||||
static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
|
||||
static DEFINE_SPINLOCK(hp_lock);
|
||||
|
||||
static unsigned int hp_cpu_list_length;
|
||||
|
||||
@ -202,6 +212,9 @@ static u32 *frame_ptr;
|
||||
static dma_addr_t frame_dma;
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/* needed for dma_map*() */
|
||||
static const struct qm_portal_config *pcfg;
|
||||
|
||||
/* the main function waits on this */
|
||||
static DECLARE_WAIT_QUEUE_HEAD(queue);
|
||||
|
||||
@ -217,22 +230,28 @@ static inline u32 do_lfsr(u32 prev)
|
||||
return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
|
||||
}
|
||||
|
||||
static void allocate_frame_data(void)
|
||||
static int allocate_frame_data(void)
|
||||
{
|
||||
u32 lfsr = HP_FIRST_WORD;
|
||||
int loop;
|
||||
#ifndef __rtems__
|
||||
struct platform_device *pdev = platform_device_alloc("foobar", -1);
|
||||
|
||||
if (!pdev)
|
||||
panic("platform_device_alloc() failed");
|
||||
if (platform_device_add(pdev))
|
||||
panic("platform_device_add() failed");
|
||||
#ifndef __rtems__
|
||||
if (!qman_dma_portal) {
|
||||
pr_crit("portal not available\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
pcfg = qman_get_qm_portal_config(qman_dma_portal);
|
||||
#else /* __rtems__ */
|
||||
pcfg = qman_get_qm_portal_config(qman_get_affine_portal(0));
|
||||
#endif /* __rtems__ */
|
||||
|
||||
#ifndef __rtems__
|
||||
__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
|
||||
if (!__frame_ptr)
|
||||
panic("kmalloc() failed");
|
||||
frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
|
||||
~(unsigned long)63);
|
||||
return -ENOMEM;
|
||||
|
||||
frame_ptr = PTR_ALIGN(__frame_ptr, 64);
|
||||
#else /* __rtems__ */
|
||||
frame_ptr = rtems_heap_allocate_aligned_with_boundary(4 * HP_NUM_WORDS, 64, 0);
|
||||
if (frame_ptr == NULL)
|
||||
@ -242,37 +261,50 @@ static void allocate_frame_data(void)
|
||||
frame_ptr[loop] = lfsr;
|
||||
lfsr = do_lfsr(lfsr);
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
|
||||
frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
|
||||
DMA_BIDIRECTIONAL);
|
||||
platform_device_del(pdev);
|
||||
platform_device_put(pdev);
|
||||
if (dma_mapping_error(pcfg->dev, frame_dma)) {
|
||||
pr_crit("dma mapping failure\n");
|
||||
kfree(__frame_ptr);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
#endif /* __rtems__ */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void deallocate_frame_data(void)
|
||||
{
|
||||
#ifndef __rtems__
|
||||
kfree(__frame_ptr);
|
||||
dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
|
||||
DMA_BIDIRECTIONAL);
|
||||
#endif /* __rtems__ */
|
||||
}
|
||||
|
||||
static inline void process_frame_data(struct hp_handler *handler,
|
||||
static inline int process_frame_data(struct hp_handler *handler,
|
||||
const struct qm_fd *fd)
|
||||
{
|
||||
u32 *p = handler->frame_ptr;
|
||||
u32 lfsr = HP_FIRST_WORD;
|
||||
int loop;
|
||||
|
||||
if (qm_fd_addr_get64(fd) != handler->addr)
|
||||
panic("bad frame address");
|
||||
if (qm_fd_addr_get64(fd) != handler->addr) {
|
||||
pr_crit("bad frame address, [%llX != %llX]\n",
|
||||
qm_fd_addr_get64(fd), handler->addr);
|
||||
return -EIO;
|
||||
}
|
||||
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
|
||||
*p ^= handler->rx_mixer;
|
||||
if (*p != lfsr)
|
||||
panic("corrupt frame data");
|
||||
if (*p != lfsr) {
|
||||
pr_crit("corrupt frame data");
|
||||
return -EIO;
|
||||
}
|
||||
*p ^= handler->tx_mixer;
|
||||
lfsr = do_lfsr(lfsr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
|
||||
@ -281,9 +313,15 @@ static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
|
||||
{
|
||||
struct hp_handler *handler = (struct hp_handler *)fq;
|
||||
|
||||
process_frame_data(handler, &dqrr->fd);
|
||||
if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
|
||||
panic("qman_enqueue() failed");
|
||||
if (process_frame_data(handler, &dqrr->fd)) {
|
||||
WARN_ON(1);
|
||||
goto skip;
|
||||
}
|
||||
if (qman_enqueue(&handler->tx, &dqrr->fd)) {
|
||||
pr_crit("qman_enqueue() failed");
|
||||
WARN_ON(1);
|
||||
}
|
||||
skip:
|
||||
return qman_cb_dqrr_consume;
|
||||
}
|
||||
|
||||
@ -295,20 +333,24 @@ static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
|
||||
|
||||
process_frame_data(handler, &dqrr->fd);
|
||||
if (++loop_counter < HP_LOOPS) {
|
||||
if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
|
||||
panic("qman_enqueue() failed");
|
||||
if (qman_enqueue(&handler->tx, &dqrr->fd)) {
|
||||
pr_crit("qman_enqueue() failed");
|
||||
WARN_ON(1);
|
||||
goto skip;
|
||||
}
|
||||
} else {
|
||||
pr_info("Received final (%dth) frame\n", loop_counter);
|
||||
wake_up(&queue);
|
||||
}
|
||||
skip:
|
||||
return qman_cb_dqrr_consume;
|
||||
}
|
||||
|
||||
static void create_per_cpu_handlers(void)
|
||||
static int create_per_cpu_handlers(void)
|
||||
{
|
||||
struct hp_handler *handler;
|
||||
int loop;
|
||||
struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
|
||||
struct hp_cpu *hp_cpu = this_cpu_ptr(hp_cpus);
|
||||
|
||||
hp_cpu->processor_id = smp_processor_id();
|
||||
spin_lock(&hp_lock);
|
||||
@ -318,8 +360,11 @@ static void create_per_cpu_handlers(void)
|
||||
INIT_LIST_HEAD(&hp_cpu->handlers);
|
||||
for (loop = 0; loop < HP_PER_CPU; loop++) {
|
||||
handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
|
||||
if (!handler)
|
||||
panic("kmem_cache_alloc() failed");
|
||||
if (!handler) {
|
||||
pr_crit("kmem_cache_alloc() failed");
|
||||
WARN_ON(1);
|
||||
return -EIO;
|
||||
}
|
||||
handler->processor_id = hp_cpu->processor_id;
|
||||
#ifndef __rtems__
|
||||
handler->addr = frame_dma;
|
||||
@ -329,31 +374,39 @@ static void create_per_cpu_handlers(void)
|
||||
handler->frame_ptr = frame_ptr;
|
||||
list_add_tail(&handler->node, &hp_cpu->handlers);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void destroy_per_cpu_handlers(void)
|
||||
static int destroy_per_cpu_handlers(void)
|
||||
{
|
||||
struct list_head *loop, *tmp;
|
||||
struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
|
||||
struct hp_cpu *hp_cpu = this_cpu_ptr(hp_cpus);
|
||||
|
||||
spin_lock(&hp_lock);
|
||||
list_del(&hp_cpu->node);
|
||||
spin_unlock(&hp_lock);
|
||||
list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
|
||||
u32 flags;
|
||||
u32 flags = 0;
|
||||
struct hp_handler *handler = list_entry(loop, struct hp_handler,
|
||||
node);
|
||||
if (qman_retire_fq(&handler->rx, &flags))
|
||||
panic("qman_retire_fq(rx) failed");
|
||||
BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
|
||||
if (qman_oos_fq(&handler->rx))
|
||||
panic("qman_oos_fq(rx) failed");
|
||||
qman_destroy_fq(&handler->rx, 0);
|
||||
qman_destroy_fq(&handler->tx, 0);
|
||||
if (qman_retire_fq(&handler->rx, &flags) ||
|
||||
(flags & QMAN_FQ_STATE_BLOCKOOS)) {
|
||||
pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
|
||||
WARN_ON(1);
|
||||
return -EIO;
|
||||
}
|
||||
if (qman_oos_fq(&handler->rx)) {
|
||||
pr_crit("qman_oos_fq(rx) failed");
|
||||
WARN_ON(1);
|
||||
return -EIO;
|
||||
}
|
||||
qman_destroy_fq(&handler->rx);
|
||||
qman_destroy_fq(&handler->tx);
|
||||
qman_release_fqid(handler->fqid_rx);
|
||||
list_del(&handler->node);
|
||||
kmem_cache_free(hp_handler_slab, handler);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u8 num_cachelines(u32 offset)
|
||||
@ -369,36 +422,59 @@ static inline u8 num_cachelines(u32 offset)
|
||||
#define STASH_CTX_CL \
|
||||
num_cachelines(offsetof(struct hp_handler, fqid_rx))
|
||||
|
||||
static void init_handler(void *__handler)
|
||||
static int init_handler(void *h)
|
||||
{
|
||||
struct qm_mcc_initfq opts;
|
||||
struct hp_handler *handler = __handler;
|
||||
struct hp_handler *handler = h;
|
||||
int err;
|
||||
|
||||
BUG_ON(handler->processor_id != smp_processor_id());
|
||||
if (handler->processor_id != smp_processor_id()) {
|
||||
err = -EIO;
|
||||
goto failed;
|
||||
}
|
||||
/* Set up rx */
|
||||
memset(&handler->rx, 0, sizeof(handler->rx));
|
||||
if (handler == special_handler)
|
||||
handler->rx.cb.dqrr = special_dqrr;
|
||||
else
|
||||
handler->rx.cb.dqrr = normal_dqrr;
|
||||
if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
|
||||
panic("qman_create_fq(rx) failed");
|
||||
err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
|
||||
if (err) {
|
||||
pr_crit("qman_create_fq(rx) failed");
|
||||
goto failed;
|
||||
}
|
||||
memset(&opts, 0, sizeof(opts));
|
||||
opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
|
||||
opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
|
||||
opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
|
||||
opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
|
||||
if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
|
||||
QMAN_INITFQ_FLAG_LOCAL, &opts))
|
||||
panic("qman_init_fq(rx) failed");
|
||||
opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
|
||||
QM_INITFQ_WE_CONTEXTA);
|
||||
opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
|
||||
qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
|
||||
err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
|
||||
QMAN_INITFQ_FLAG_LOCAL, &opts);
|
||||
if (err) {
|
||||
pr_crit("qman_init_fq(rx) failed");
|
||||
goto failed;
|
||||
}
|
||||
/* Set up tx */
|
||||
memset(&handler->tx, 0, sizeof(handler->tx));
|
||||
if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
|
||||
&handler->tx))
|
||||
panic("qman_create_fq(tx) failed");
|
||||
err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
|
||||
&handler->tx);
|
||||
if (err) {
|
||||
pr_crit("qman_create_fq(tx) failed");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
static void init_phase2(void)
|
||||
return 0;
|
||||
failed:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void init_handler_cb(void *h)
|
||||
{
|
||||
if (init_handler(h))
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static int init_phase2(void)
|
||||
{
|
||||
int loop;
|
||||
u32 fqid = 0;
|
||||
@ -408,7 +484,7 @@ static void init_phase2(void)
|
||||
|
||||
for (loop = 0; loop < HP_PER_CPU; loop++) {
|
||||
list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
if (!loop)
|
||||
hp_cpu->iterator = list_first_entry(
|
||||
@ -421,9 +497,11 @@ static void init_phase2(void)
|
||||
/* Rx FQID is the previous handler's Tx FQID */
|
||||
hp_cpu->iterator->fqid_rx = fqid;
|
||||
/* Allocate new FQID for Tx */
|
||||
ret = qman_alloc_fqid(&fqid);
|
||||
if (ret)
|
||||
panic("qman_alloc_fqid() failed");
|
||||
err = qman_alloc_fqid(&fqid);
|
||||
if (err) {
|
||||
pr_crit("qman_alloc_fqid() failed");
|
||||
return err;
|
||||
}
|
||||
hp_cpu->iterator->fqid_tx = fqid;
|
||||
/* Rx mixer is the previous handler's Tx mixer */
|
||||
hp_cpu->iterator->rx_mixer = lfsr;
|
||||
@ -435,16 +513,18 @@ static void init_phase2(void)
|
||||
/* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
|
||||
hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
|
||||
handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
|
||||
BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
|
||||
if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
|
||||
return 1;
|
||||
handler->fqid_rx = fqid;
|
||||
handler->rx_mixer = lfsr;
|
||||
/* and tag it as our "special" handler */
|
||||
special_handler = handler;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void init_phase3(void)
|
||||
static int init_phase3(void)
|
||||
{
|
||||
int loop;
|
||||
int loop, err;
|
||||
struct hp_cpu *hp_cpu;
|
||||
|
||||
for (loop = 0; loop < HP_PER_CPU; loop++) {
|
||||
@ -458,45 +538,69 @@ static void init_phase3(void)
|
||||
hp_cpu->iterator->node.next,
|
||||
struct hp_handler, node);
|
||||
preempt_disable();
|
||||
if (hp_cpu->processor_id == smp_processor_id())
|
||||
init_handler(hp_cpu->iterator);
|
||||
else
|
||||
if (hp_cpu->processor_id == smp_processor_id()) {
|
||||
err = init_handler(hp_cpu->iterator);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
smp_call_function_single(hp_cpu->processor_id,
|
||||
init_handler, hp_cpu->iterator, 1);
|
||||
init_handler_cb, hp_cpu->iterator, 1);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void send_first_frame(void *ignore)
|
||||
static int send_first_frame(void *ignore)
|
||||
{
|
||||
u32 *p = special_handler->frame_ptr;
|
||||
u32 lfsr = HP_FIRST_WORD;
|
||||
int loop;
|
||||
int loop, err;
|
||||
struct qm_fd fd;
|
||||
|
||||
BUG_ON(special_handler->processor_id != smp_processor_id());
|
||||
if (special_handler->processor_id != smp_processor_id()) {
|
||||
err = -EIO;
|
||||
goto failed;
|
||||
}
|
||||
memset(&fd, 0, sizeof(fd));
|
||||
qm_fd_addr_set64(&fd, special_handler->addr);
|
||||
fd.format = qm_fd_contig_big;
|
||||
fd.length29 = HP_NUM_WORDS * 4;
|
||||
qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
|
||||
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
|
||||
if (*p != lfsr)
|
||||
panic("corrupt frame data");
|
||||
if (*p != lfsr) {
|
||||
err = -EIO;
|
||||
pr_crit("corrupt frame data");
|
||||
goto failed;
|
||||
}
|
||||
*p ^= special_handler->tx_mixer;
|
||||
lfsr = do_lfsr(lfsr);
|
||||
}
|
||||
pr_info("Sending first frame\n");
|
||||
if (qman_enqueue(&special_handler->tx, &fd, 0))
|
||||
panic("qman_enqueue() failed");
|
||||
err = qman_enqueue(&special_handler->tx, &fd);
|
||||
if (err) {
|
||||
pr_crit("qman_enqueue() failed");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
void qman_test_stash(void)
|
||||
return 0;
|
||||
failed:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void send_first_frame_cb(void *ignore)
|
||||
{
|
||||
if (send_first_frame(NULL))
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
int qman_test_stash(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
#ifndef __rtems__
|
||||
if (cpumask_weight(cpu_online_mask) < 2) {
|
||||
pr_info("%s(): skip - only 1 CPU\n", __func__);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
@ -507,34 +611,57 @@ void qman_test_stash(void)
|
||||
hp_handler_slab = kmem_cache_create("hp_handler_slab",
|
||||
sizeof(struct hp_handler), L1_CACHE_BYTES,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!hp_handler_slab)
|
||||
panic("kmem_cache_create() failed");
|
||||
if (!hp_handler_slab) {
|
||||
err = -EIO;
|
||||
pr_crit("kmem_cache_create() failed");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
allocate_frame_data();
|
||||
err = allocate_frame_data();
|
||||
if (err)
|
||||
goto failed;
|
||||
|
||||
/* Init phase 1 */
|
||||
pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
|
||||
if (on_all_cpus(create_per_cpu_handlers))
|
||||
panic("on_each_cpu() failed");
|
||||
if (on_all_cpus(create_per_cpu_handlers)) {
|
||||
err = -EIO;
|
||||
pr_crit("on_each_cpu() failed");
|
||||
goto failed;
|
||||
}
|
||||
pr_info("Number of cpus: %d, total of %d handlers\n",
|
||||
hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
|
||||
|
||||
init_phase2();
|
||||
err = init_phase2();
|
||||
if (err)
|
||||
goto failed;
|
||||
|
||||
init_phase3();
|
||||
err = init_phase3();
|
||||
if (err)
|
||||
goto failed;
|
||||
|
||||
preempt_disable();
|
||||
if (special_handler->processor_id == smp_processor_id())
|
||||
send_first_frame(NULL);
|
||||
else
|
||||
if (special_handler->processor_id == smp_processor_id()) {
|
||||
err = send_first_frame(NULL);
|
||||
if (err)
|
||||
goto failed;
|
||||
} else {
|
||||
smp_call_function_single(special_handler->processor_id,
|
||||
send_first_frame, NULL, 1);
|
||||
send_first_frame_cb, NULL, 1);
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
wait_event(queue, loop_counter == HP_LOOPS);
|
||||
deallocate_frame_data();
|
||||
if (on_all_cpus(destroy_per_cpu_handlers))
|
||||
panic("on_each_cpu() failed");
|
||||
if (on_all_cpus(destroy_per_cpu_handlers)) {
|
||||
err = -EIO;
|
||||
pr_crit("on_each_cpu() failed");
|
||||
goto failed;
|
||||
}
|
||||
kmem_cache_destroy(hp_handler_slab);
|
||||
pr_info("%s(): Finished\n", __func__);
|
||||
|
||||
return 0;
|
||||
failed:
|
||||
WARN_ON(1);
|
||||
return err;
|
||||
}
|
||||
|
@ -1,309 +0,0 @@
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <rtems/bsd/local/opt_dpaa.h>
|
||||
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "qman_priv.h"
|
||||
|
||||
/* --- FQID Pool --- */
|
||||
|
||||
struct qman_fqid_pool {
|
||||
/* Base and size of the FQID range */
|
||||
u32 fqid_base;
|
||||
u32 total;
|
||||
/* Number of FQIDs currently "allocated" */
|
||||
u32 used;
|
||||
/* Allocation optimisation. When 'used<total', it is the index of an
|
||||
* available FQID. Otherwise there are no available FQIDs, and this
|
||||
* will be set when the next deallocation occurs. */
|
||||
u32 next;
|
||||
/* A bit-field representation of the FQID range. */
|
||||
unsigned long *bits;
|
||||
};
|
||||
|
||||
#define QLONG_BYTES sizeof(unsigned long)
|
||||
#define QLONG_BITS (QLONG_BYTES * 8)
|
||||
/* Number of 'longs' required for the given number of bits */
|
||||
#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS)
|
||||
/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
|
||||
#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES)
|
||||
/* And in bits */
|
||||
#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS)
|
||||
|
||||
struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
|
||||
{
|
||||
struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
|
||||
unsigned int i;
|
||||
|
||||
BUG_ON(!num);
|
||||
if (!pool)
|
||||
return NULL;
|
||||
pool->fqid_base = fqid_start;
|
||||
pool->total = num;
|
||||
pool->used = 0;
|
||||
pool->next = 0;
|
||||
pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
|
||||
if (!pool->bits) {
|
||||
kfree(pool);
|
||||
return NULL;
|
||||
}
|
||||
/* If num is not an even multiple of QLONG_BITS (or even 8, for
|
||||
* byte-oriented searching) then we fill the trailing bits with 1, to
|
||||
* make them look allocated (permanently). */
|
||||
for (i = num + 1; i < QNUM_BITS(num); i++)
|
||||
set_bit(i, pool->bits);
|
||||
return pool;
|
||||
}
|
||||
EXPORT_SYMBOL(qman_fqid_pool_create);
|
||||
|
||||
int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
|
||||
{
|
||||
int ret = pool->used;
|
||||
|
||||
kfree(pool->bits);
|
||||
kfree(pool);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(qman_fqid_pool_destroy);
|
||||
|
||||
int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (pool->used == pool->total)
|
||||
return -ENOMEM;
|
||||
*fqid = pool->fqid_base + pool->next;
|
||||
ret = test_and_set_bit(pool->next, pool->bits);
|
||||
BUG_ON(ret);
|
||||
if (++pool->used == pool->total)
|
||||
return 0;
|
||||
pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
|
||||
if (pool->next >= pool->total)
|
||||
pool->next = find_first_zero_bit(pool->bits, pool->total);
|
||||
BUG_ON(pool->next >= pool->total);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(qman_fqid_pool_alloc);
|
||||
|
||||
void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
fqid -= pool->fqid_base;
|
||||
ret = test_and_clear_bit(fqid, pool->bits);
|
||||
BUG_ON(!ret);
|
||||
if (pool->used-- == pool->total)
|
||||
pool->next = fqid;
|
||||
}
|
||||
EXPORT_SYMBOL(qman_fqid_pool_free);
|
||||
|
||||
u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
|
||||
{
|
||||
return pool->used;
|
||||
}
|
||||
EXPORT_SYMBOL(qman_fqid_pool_used);
|
||||
|
||||
static DECLARE_DPAA_RESOURCE(fqalloc); /* FQID allocator */
|
||||
static DECLARE_DPAA_RESOURCE(qpalloc); /* pool-channel allocator */
|
||||
static DECLARE_DPAA_RESOURCE(cgralloc); /* CGR ID allocator */
|
||||
|
||||
/* FQID allocator front-end */
|
||||
|
||||
int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
|
||||
{
|
||||
return dpaa_resource_new(&fqalloc, result, count, align, partial);
|
||||
}
|
||||
EXPORT_SYMBOL(qman_alloc_fqid_range);
|
||||
|
||||
static int fq_cleanup(u32 fqid)
|
||||
{
|
||||
return qman_shutdown_fq(fqid) == 0;
|
||||
}
|
||||
|
||||
void qman_release_fqid_range(u32 fqid, u32 count)
|
||||
{
|
||||
u32 total_invalid = dpaa_resource_release(&fqalloc,
|
||||
fqid, count, fq_cleanup);
|
||||
|
||||
if (total_invalid)
|
||||
pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
|
||||
fqid, fqid + count - 1, count, total_invalid);
|
||||
}
|
||||
EXPORT_SYMBOL(qman_release_fqid_range);
|
||||
|
||||
int qman_reserve_fqid_range(u32 fqid, u32 count)
|
||||
{
|
||||
return dpaa_resource_reserve(&fqalloc, fqid, count);
|
||||
}
|
||||
EXPORT_SYMBOL(qman_reserve_fqid_range);
|
||||
|
||||
void qman_seed_fqid_range(u32 fqid, u32 count)
|
||||
{
|
||||
dpaa_resource_seed(&fqalloc, fqid, count);
|
||||
}
|
||||
EXPORT_SYMBOL(qman_seed_fqid_range);
|
||||
|
||||
/* Pool-channel allocator front-end */
|
||||
|
||||
int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
|
||||
{
|
||||
return dpaa_resource_new(&qpalloc, result, count, align, partial);
|
||||
}
|
||||
EXPORT_SYMBOL(qman_alloc_pool_range);
|
||||
|
||||
static int qpool_cleanup(u32 qp)
|
||||
{
|
||||
/* We query all FQDs starting from
|
||||
* FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
|
||||
* whose destination channel is the pool-channel being released.
|
||||
* When a non-OOS FQD is found we attempt to clean it up */
|
||||
struct qman_fq fq = {
|
||||
.fqid = 1
|
||||
};
|
||||
int err;
|
||||
|
||||
do {
|
||||
struct qm_mcr_queryfq_np np;
|
||||
|
||||
err = qman_query_fq_np(&fq, &np);
|
||||
if (err)
|
||||
/* FQID range exceeded, found no problems */
|
||||
return 1;
|
||||
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
|
||||
struct qm_fqd fqd;
|
||||
|
||||
err = qman_query_fq(&fq, &fqd);
|
||||
BUG_ON(err);
|
||||
if (fqd.dest.channel == qp) {
|
||||
/* The channel is the FQ's target, clean it */
|
||||
if (qman_shutdown_fq(fq.fqid) != 0)
|
||||
/* Couldn't shut down the FQ
|
||||
so the pool must be leaked */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/* Move to the next FQID */
|
||||
fq.fqid++;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
void qman_release_pool_range(u32 qp, u32 count)
|
||||
{
|
||||
u32 total_invalid = dpaa_resource_release(&qpalloc,
|
||||
qp, count, qpool_cleanup);
|
||||
|
||||
if (total_invalid) {
|
||||
/* Pool channels are almost always used individually */
|
||||
if (count == 1)
|
||||
pr_err("Pool channel 0x%x had %d leaks\n",
|
||||
qp, total_invalid);
|
||||
else
|
||||
pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
|
||||
qp, qp + count - 1, count, total_invalid);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(qman_release_pool_range);
|
||||
|
||||
void qman_seed_pool_range(u32 poolid, u32 count)
|
||||
{
|
||||
dpaa_resource_seed(&qpalloc, poolid, count);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(qman_seed_pool_range);
|
||||
|
||||
int qman_reserve_pool_range(u32 poolid, u32 count)
|
||||
{
|
||||
return dpaa_resource_reserve(&qpalloc, poolid, count);
|
||||
}
|
||||
EXPORT_SYMBOL(qman_reserve_pool_range);
|
||||
|
||||
|
||||
/* CGR ID allocator front-end */
|
||||
|
||||
int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
|
||||
{
|
||||
return dpaa_resource_new(&cgralloc, result, count, align, partial);
|
||||
}
|
||||
EXPORT_SYMBOL(qman_alloc_cgrid_range);
|
||||
|
||||
static int cqr_cleanup(u32 cgrid)
|
||||
{
|
||||
/* We query all FQDs starting from
|
||||
* FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
|
||||
* whose CGR is the CGR being released.
|
||||
*/
|
||||
struct qman_fq fq = {
|
||||
.fqid = 1
|
||||
};
|
||||
int err;
|
||||
|
||||
do {
|
||||
struct qm_mcr_queryfq_np np;
|
||||
|
||||
err = qman_query_fq_np(&fq, &np);
|
||||
if (err)
|
||||
/* FQID range exceeded, found no problems */
|
||||
return 1;
|
||||
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
|
||||
struct qm_fqd fqd;
|
||||
|
||||
err = qman_query_fq(&fq, &fqd);
|
||||
BUG_ON(err);
|
||||
if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
|
||||
(fqd.cgid == cgrid)) {
|
||||
pr_err("CRGID 0x%x is being used by FQID 0x%x,"
|
||||
" CGR will be leaked\n",
|
||||
cgrid, fq.fqid);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
/* Move to the next FQID */
|
||||
fq.fqid++;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
void qman_release_cgrid_range(u32 cgrid, u32 count)
|
||||
{
|
||||
u32 total_invalid = dpaa_resource_release(&cgralloc,
|
||||
cgrid, count, cqr_cleanup);
|
||||
if (total_invalid)
|
||||
pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
|
||||
cgrid, cgrid + count - 1, count, total_invalid);
|
||||
}
|
||||
EXPORT_SYMBOL(qman_release_cgrid_range);
|
||||
|
||||
void qman_seed_cgrid_range(u32 cgrid, u32 count)
|
||||
{
|
||||
dpaa_resource_seed(&cgralloc, cgrid, count);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(qman_seed_cgrid_range);
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
|
||||
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -31,436 +31,87 @@
|
||||
#ifndef __FSL_BMAN_H
|
||||
#define __FSL_BMAN_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Enable blocking waits */
|
||||
#define FSL_DPA_CAN_WAIT 1
|
||||
#define FSL_DPA_CAN_WAIT_SYNC 1
|
||||
|
||||
/* Last updated for v00.79 of the BG */
|
||||
|
||||
/* Portal processing (interrupt) sources */
|
||||
#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
|
||||
#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */
|
||||
|
||||
/* This wrapper represents a bit-array for the depletion state of the 64 BMan
|
||||
* buffer pools. */
|
||||
struct bman_depletion {
|
||||
u32 __state[2];
|
||||
};
|
||||
#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
|
||||
#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
|
||||
#define __bmdep_word(x) ((x) >> 5)
|
||||
#define __bmdep_shift(x) ((x) & 0x1f)
|
||||
#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
|
||||
static inline void bman_depletion_init(struct bman_depletion *c)
|
||||
{
|
||||
c->__state[0] = c->__state[1] = 0;
|
||||
}
|
||||
static inline void bman_depletion_fill(struct bman_depletion *c)
|
||||
{
|
||||
c->__state[0] = c->__state[1] = ~0;
|
||||
}
|
||||
static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
|
||||
{
|
||||
return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
|
||||
}
|
||||
static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
|
||||
{
|
||||
c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
|
||||
}
|
||||
static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
|
||||
{
|
||||
c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
|
||||
}
|
||||
|
||||
/* --- BMan data structures (and associated constants) --- */
|
||||
|
||||
/* Represents s/w corenet portal mapped data structures */
|
||||
struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
|
||||
struct bm_mc_command; /* MC (Management Command) command */
|
||||
struct bm_mc_result; /* MC result */
|
||||
|
||||
/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
|
||||
* pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
|
||||
* BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
|
||||
/* wrapper for 48-bit buffers */
|
||||
struct bm_buffer {
|
||||
union {
|
||||
struct {
|
||||
u8 __reserved1;
|
||||
u8 bpid;
|
||||
u16 hi; /* High 16-bits of 48-bit address */
|
||||
u32 lo; /* Low 32-bits of 48-bit address */
|
||||
};
|
||||
struct {
|
||||
u64 __notaddress:16;
|
||||
u64 addr:48;
|
||||
__be16 bpid; /* hi 8-bits reserved */
|
||||
__be16 hi; /* High 16-bits of 48-bit address */
|
||||
__be32 lo; /* Low 32-bits of 48-bit address */
|
||||
};
|
||||
__be64 data;
|
||||
};
|
||||
} __aligned(8);
|
||||
static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
|
||||
{
|
||||
return buf->addr;
|
||||
}
|
||||
/*
|
||||
* Restore the 48 bit address previously stored in BMan
|
||||
* hardware pools as a dma_addr_t
|
||||
*/
|
||||
static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
|
||||
{
|
||||
return (dma_addr_t)buf->addr;
|
||||
return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
|
||||
}
|
||||
/* Macro, so we compile better if 'v' isn't always 64-bit */
|
||||
#define bm_buffer_set64(buf, v) \
|
||||
do { \
|
||||
struct bm_buffer *__buf931 = (buf); \
|
||||
__buf931->hi = upper_32_bits(v); \
|
||||
__buf931->lo = lower_32_bits(v); \
|
||||
} while (0)
|
||||
|
||||
/* See 1.5.3.5.4: "Release Command" */
|
||||
struct bm_rcr_entry {
|
||||
union {
|
||||
struct {
|
||||
u8 __dont_write_directly__verb;
|
||||
u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
|
||||
u8 __reserved1[62];
|
||||
};
|
||||
struct bm_buffer bufs[8];
|
||||
};
|
||||
} __packed;
|
||||
#define BM_RCR_VERB_VBIT 0x80
|
||||
#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
|
||||
#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
|
||||
#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
|
||||
#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
|
||||
static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
|
||||
{
|
||||
return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
|
||||
}
|
||||
|
||||
/* See 1.5.3.1: "Acquire Command" */
|
||||
/* See 1.5.3.2: "Query Command" */
|
||||
struct bm_mcc_acquire {
|
||||
u8 bpid;
|
||||
u8 __reserved1[62];
|
||||
} __packed;
|
||||
struct bm_mcc_query {
|
||||
u8 __reserved2[63];
|
||||
} __packed;
|
||||
struct bm_mc_command {
|
||||
u8 __dont_write_directly__verb;
|
||||
union {
|
||||
struct bm_mcc_acquire acquire;
|
||||
struct bm_mcc_query query;
|
||||
};
|
||||
} __packed;
|
||||
#define BM_MCC_VERB_VBIT 0x80
|
||||
#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
|
||||
#define BM_MCC_VERB_CMD_ACQUIRE 0x10
|
||||
#define BM_MCC_VERB_CMD_QUERY 0x40
|
||||
#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
|
||||
static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr)
|
||||
{
|
||||
buf->hi = cpu_to_be16(upper_32_bits(addr));
|
||||
buf->lo = cpu_to_be32(lower_32_bits(addr));
|
||||
}
|
||||
|
||||
/* See 1.5.3.3: "Acquire Response" */
|
||||
/* See 1.5.3.4: "Query Response" */
|
||||
struct bm_pool_state {
|
||||
u8 __reserved1[32];
|
||||
/* "availability state" and "depletion state" */
|
||||
struct {
|
||||
u8 __reserved1[8];
|
||||
/* Access using bman_depletion_***() */
|
||||
struct bman_depletion state;
|
||||
} as, ds;
|
||||
};
|
||||
struct bm_mc_result {
|
||||
union {
|
||||
struct {
|
||||
u8 verb;
|
||||
u8 __reserved1[63];
|
||||
};
|
||||
union {
|
||||
struct {
|
||||
u8 __reserved1;
|
||||
u8 bpid;
|
||||
u8 __reserved2[62];
|
||||
};
|
||||
struct bm_buffer bufs[8];
|
||||
} acquire;
|
||||
struct bm_pool_state query;
|
||||
};
|
||||
} __packed;
|
||||
#define BM_MCR_VERB_VBIT 0x80
|
||||
#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
|
||||
#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
|
||||
#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
|
||||
#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
|
||||
#define BM_MCR_VERB_CMD_ERR_ECC 0x70
|
||||
#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
|
||||
/* Determine the "availability state" of pool 'p' from a query result 'r' */
|
||||
#define BM_MCR_QUERY_AVAILABILITY(r, p) \
|
||||
bman_depletion_get(&r->query.as.state, p)
|
||||
/* Determine the "depletion state" of pool 'p' from a query result 'r' */
|
||||
#define BM_MCR_QUERY_DEPLETION(r, p) \
|
||||
bman_depletion_get(&r->query.ds.state, p)
|
||||
static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf)
|
||||
{
|
||||
return be16_to_cpu(buf->bpid) & 0xff;
|
||||
}
|
||||
|
||||
/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
|
||||
static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid)
|
||||
{
|
||||
buf->bpid = cpu_to_be16(bpid & 0xff);
|
||||
}
|
||||
|
||||
/* Managed portal, high-level i/face */
|
||||
|
||||
/* Portal and Buffer Pools */
|
||||
|
||||
/* Represents a managed portal */
|
||||
struct bman_portal;
|
||||
|
||||
/* This object type represents BMan buffer pools. */
|
||||
struct bman_pool;
|
||||
|
||||
struct bman_portal_config {
|
||||
/* This is used for any "core-affine" portals, ie. default portals
|
||||
* associated to the corresponding cpu. -1 implies that there is no core
|
||||
* affinity configured. */
|
||||
int cpu;
|
||||
/* portal interrupt line */
|
||||
int irq;
|
||||
#ifndef __rtems__
|
||||
/* Is this portal shared? (If so, it has coarser locking and demuxes
|
||||
* processing on behalf of other CPUs.) */
|
||||
int is_shared;
|
||||
#endif /* __rtems__ */
|
||||
/* These are the buffer pool IDs that may be used via this portal. */
|
||||
struct bman_depletion mask;
|
||||
};
|
||||
|
||||
/* This callback type is used when handling pool depletion entry/exit. The
|
||||
* 'cb_ctx' value is the opaque value associated with the pool object in
|
||||
* bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
|
||||
* depletion-exit. */
|
||||
typedef void (*bman_cb_depletion)(struct bman_portal *bm,
|
||||
struct bman_pool *pool, void *cb_ctx, int depleted);
|
||||
|
||||
/* This struct specifies parameters for a bman_pool object. */
|
||||
struct bman_pool_params {
|
||||
/* index of the buffer pool to encapsulate (0-63), ignored if
|
||||
* BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
|
||||
u32 bpid;
|
||||
/* bit-mask of BMAN_POOL_FLAG_*** options */
|
||||
u32 flags;
|
||||
/* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
|
||||
bman_cb_depletion cb;
|
||||
/* opaque user value passed as a parameter to 'cb' */
|
||||
void *cb_ctx;
|
||||
/* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
|
||||
* this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
|
||||
* when run in the control plane (which controls BMan CCSR). This array
|
||||
* matches the definition of bm_pool_set(). */
|
||||
u32 thresholds[4];
|
||||
};
|
||||
|
||||
/* Flags to bman_new_pool() */
|
||||
#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
|
||||
#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
|
||||
#define BMAN_POOL_FLAG_DEPLETION 0x00000004 /* track depletion entry/exit */
|
||||
#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
|
||||
#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
|
||||
#define BMAN_POOL_FLAG_STOCKPILE 0x00000020 /* stockpile to reduce hw ops */
|
||||
|
||||
/* Flags to bman_release() */
|
||||
#ifdef FSL_DPA_CAN_WAIT
|
||||
#define BMAN_RELEASE_FLAG_WAIT 0x00000001 /* wait if RCR is full */
|
||||
#ifndef __rtems__
|
||||
#define BMAN_RELEASE_FLAG_WAIT_INT 0x00000002 /* if we wait, interruptible? */
|
||||
#endif /* __rtems__ */
|
||||
#ifdef FSL_DPA_CAN_WAIT_SYNC
|
||||
#define BMAN_RELEASE_FLAG_WAIT_SYNC 0x00000004 /* if wait, until consumed? */
|
||||
#endif
|
||||
#endif
|
||||
#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
|
||||
|
||||
/* Flags to bman_acquire() */
|
||||
#define BMAN_ACQUIRE_FLAG_STOCKPILE 0x00000001 /* no hw op, stockpile only */
|
||||
|
||||
/* Portal Management */
|
||||
|
||||
/**
|
||||
* bman_get_portal_config - get portal configuration settings
|
||||
*
|
||||
* This returns a read-only view of the current cpu's affine portal settings.
|
||||
*/
|
||||
const struct bman_portal_config *bman_get_portal_config(void);
|
||||
|
||||
/**
|
||||
* bman_irqsource_get - return the portal work that is interrupt-driven
|
||||
*
|
||||
* Returns a bitmask of BM_PIRQ_**I processing sources that are currently
|
||||
* enabled for interrupt handling on the current cpu's affine portal. These
|
||||
* sources will trigger the portal interrupt and the interrupt handler (or a
|
||||
* tasklet/bottom-half it defers to) will perform the corresponding processing
|
||||
* work. The bman_poll_***() functions will only process sources that are not in
|
||||
* this bitmask. If the current CPU is sharing a portal hosted on another CPU,
|
||||
* this always returns zero.
|
||||
*/
|
||||
u32 bman_irqsource_get(void);
|
||||
|
||||
/**
|
||||
* bman_irqsource_add - add processing sources to be interrupt-driven
|
||||
* @bits: bitmask of BM_PIRQ_**I processing sources
|
||||
*
|
||||
* Adds processing sources that should be interrupt-driven (rather than
|
||||
* processed via bman_poll_***() functions). Returns zero for success, or
|
||||
* -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
|
||||
int bman_irqsource_add(u32 bits);
|
||||
|
||||
/**
|
||||
* bman_irqsource_remove - remove processing sources from being interrupt-driven
|
||||
* @bits: bitmask of BM_PIRQ_**I processing sources
|
||||
*
|
||||
* Removes processing sources from being interrupt-driven, so that they will
|
||||
* instead be processed via bman_poll_***() functions. Returns zero for success,
|
||||
* or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
|
||||
int bman_irqsource_remove(u32 bits);
|
||||
|
||||
#ifndef __rtems__
|
||||
/**
|
||||
* bman_affine_cpus - return a mask of cpus that have affine portals
|
||||
*/
|
||||
const cpumask_t *bman_affine_cpus(void);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/**
|
||||
* bman_poll_slow - process anything that isn't interrupt-driven.
|
||||
*
|
||||
* This function does any portal processing that isn't interrupt-driven. If the
|
||||
* current CPU is sharing a portal hosted on another CPU, this function will
|
||||
* return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
|
||||
* indicating what interrupt sources were actually processed by the call.
|
||||
*
|
||||
* NB, unlike the legacy wrapper bman_poll(), this function will
|
||||
* deterministically check for the presence of portal processing work and do it,
|
||||
* which implies some latency even if there's nothing to do. The bman_poll()
|
||||
* wrapper on the other hand (like the qman_poll() wrapper) attenuates this by
|
||||
* checking for (and doing) portal processing infrequently. Ie. such that
|
||||
* qman_poll() and bman_poll() can be called from core-processing loops. Use
|
||||
* bman_poll_slow() when you yourself are deciding when to incur the overhead of
|
||||
* processing.
|
||||
*/
|
||||
u32 bman_poll_slow(void);
|
||||
|
||||
/**
|
||||
* bman_poll - process anything that isn't interrupt-driven.
|
||||
*
|
||||
* Dispatcher logic on a cpu can use this to trigger any maintenance of the
|
||||
* affine portal. This function does whatever processing is not triggered by
|
||||
* interrupts. This is a legacy wrapper that can be used in core-processing
|
||||
* loops but mitigates the performance overhead of portal processing by
|
||||
* adaptively bypassing true portal processing most of the time. (Processing is
|
||||
* done once every 10 calls if the previous processing revealed that work needed
|
||||
* to be done, or once very 1000 calls if the previous processing revealed no
|
||||
* work needed doing.) If you wish to control this yourself, call
|
||||
* bman_poll_slow() instead, which always checks for portal processing work.
|
||||
*/
|
||||
void bman_poll(void);
|
||||
|
||||
/**
|
||||
* bman_rcr_is_empty - Determine if portal's RCR is empty
|
||||
*
|
||||
* For use in situations where a cpu-affine caller needs to determine when all
|
||||
* releases for the local portal have been processed by BMan but can't use the
|
||||
* BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
|
||||
* The function forces tracking of RCR consumption (which normally doesn't
|
||||
* happen until release processing needs to find space to put new release
|
||||
* commands), and returns zero if the ring still has unprocessed entries,
|
||||
* non-zero if it is empty.
|
||||
*/
|
||||
int bman_rcr_is_empty(void);
|
||||
|
||||
/**
|
||||
* bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
|
||||
* @result: is set by the API to the base BPID of the allocated range
|
||||
* @count: the number of BPIDs required
|
||||
* @align: required alignment of the allocated range
|
||||
* @partial: non-zero if the API can return fewer than @count BPIDs
|
||||
*
|
||||
* Returns the number of buffer pools allocated, or a negative error code. If
|
||||
* @partial is non zero, the allocation request may return a smaller range of
|
||||
* BPs than requested (though alignment will be as requested). If @partial is
|
||||
* zero, the return value will either be 'count' or negative.
|
||||
*/
|
||||
int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
|
||||
static inline int bman_alloc_bpid(u32 *result)
|
||||
{
|
||||
int ret = bman_alloc_bpid_range(result, 1, 0, 0);
|
||||
|
||||
return (ret > 0) ? 0 : ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* bman_release_bpid_range - Release the specified range of buffer pool IDs
|
||||
* @bpid: the base BPID of the range to deallocate
|
||||
* @count: the number of BPIDs in the range
|
||||
*
|
||||
* This function can also be used to seed the allocator with ranges of BPIDs
|
||||
* that it can subsequently allocate from.
|
||||
*/
|
||||
void bman_release_bpid_range(u32 bpid, u32 count);
|
||||
static inline void bman_release_bpid(u32 bpid)
|
||||
{
|
||||
bman_release_bpid_range(bpid, 1);
|
||||
}
|
||||
|
||||
int bman_reserve_bpid_range(u32 bpid, u32 count);
|
||||
static inline int bman_reserve_bpid(u32 bpid)
|
||||
{
|
||||
return bman_reserve_bpid_range(bpid, 1);
|
||||
}
|
||||
|
||||
void bman_seed_bpid_range(u32 bpid, u32 count);
|
||||
|
||||
|
||||
int bman_shutdown_pool(u32 bpid);
|
||||
|
||||
/* Pool management */
|
||||
#define BM_POOL_MAX 64 /* max # of buffer pools */
|
||||
|
||||
/**
|
||||
* bman_new_pool - Allocates a Buffer Pool object
|
||||
* @params: parameters specifying the buffer pool ID and behaviour
|
||||
*
|
||||
* Creates a pool object for the given @params. A portal and the depletion
|
||||
* callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
|
||||
* is set. NB, the fields from @params are copied into the new pool object, so
|
||||
* the structure provided by the caller can be released or reused after the
|
||||
* function returns.
|
||||
* Creates a pool object, and returns a reference to it or NULL on error.
|
||||
*/
|
||||
struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
|
||||
struct bman_pool *bman_new_pool(void);
|
||||
|
||||
/**
|
||||
* bman_free_pool - Deallocates a Buffer Pool object
|
||||
* @pool: the pool object to release
|
||||
*
|
||||
*/
|
||||
void bman_free_pool(struct bman_pool *pool);
|
||||
|
||||
/**
|
||||
* bman_get_params - Returns a pool object's parameters.
|
||||
* bman_get_bpid - Returns a pool object's BPID.
|
||||
* @pool: the pool object
|
||||
*
|
||||
* The returned pointer refers to state within the pool object so must not be
|
||||
* modified and can no longer be read once the pool object is destroyed.
|
||||
* The returned value is the index of the encapsulated buffer pool,
|
||||
* in the range of [0, @BM_POOL_MAX-1].
|
||||
*/
|
||||
const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
|
||||
int bman_get_bpid(const struct bman_pool *pool);
|
||||
|
||||
/**
|
||||
* bman_release - Release buffer(s) to the buffer pool
|
||||
* @pool: the buffer pool object to release to
|
||||
* @bufs: an array of buffers to release
|
||||
* @num: the number of buffers in @bufs (1-8)
|
||||
* @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
|
||||
*
|
||||
* Adds the given buffers to RCR entries. If the portal @p was created with the
|
||||
* "COMPACT" flag, then it will be using a compaction algorithm to improve
|
||||
* utilisation of RCR. As such, these buffers may join an existing ring entry
|
||||
* and/or it may not be issued right away so as to allow future releases to join
|
||||
* the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
|
||||
* behaviour by committing the RCR entry (or entries) right away. If the RCR
|
||||
* ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
|
||||
* is selected, in which case it will sleep waiting for space to become
|
||||
* available in RCR. If the function receives a signal before such time (and
|
||||
* BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
|
||||
* it returns zero.
|
||||
* Adds the given buffers to RCR entries. If the RCR ring is unresponsive,
|
||||
* the function will return -ETIMEDOUT. Otherwise, it returns zero.
|
||||
*/
|
||||
int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
|
||||
u32 flags);
|
||||
int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
|
||||
|
||||
/**
|
||||
* bman_acquire - Acquire buffer(s) from a buffer pool
|
||||
@ -473,52 +124,6 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
|
||||
* negative error code if a h/w error or pool starvation was encountered. In
|
||||
* the latter case, the content of @bufs is undefined.
|
||||
*/
|
||||
int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
|
||||
u32 flags);
|
||||
|
||||
/**
|
||||
* bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
|
||||
* @pool: the buffer pool object the stockpile belongs
|
||||
* @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
|
||||
*
|
||||
* Adds stockpile buffers to RCR entries until the stockpile is empty.
|
||||
* The return value will be a negative error code if a h/w error occurred.
|
||||
* If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
|
||||
* -EAGAIN will be returned.
|
||||
*/
|
||||
int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
|
||||
|
||||
/**
|
||||
* bman_query_pools - Query all buffer pool states
|
||||
* @state: storage for the queried availability and depletion states
|
||||
*/
|
||||
int bman_query_pools(struct bm_pool_state *state);
|
||||
|
||||
#ifdef CONFIG_FSL_BMAN
|
||||
/**
|
||||
* bman_query_free_buffers - Query how many free buffers are in buffer pool
|
||||
* @pool: the buffer pool object to query
|
||||
*
|
||||
* Return the number of the free buffers
|
||||
*/
|
||||
u32 bman_query_free_buffers(struct bman_pool *pool);
|
||||
|
||||
/**
|
||||
* bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
|
||||
* @pool: the buffer pool object to which the thresholds will be set
|
||||
* @thresholds: the new thresholds
|
||||
*/
|
||||
int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* The below bman_p_***() variant might be called in a situation that the cpu
|
||||
* which the portal affine to is not online yet.
|
||||
* @bman_portal specifies which portal the API will use.
|
||||
*/
|
||||
int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
|
||||
|
||||
#endif /* __FSL_BMAN_H */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -34,4 +34,6 @@
|
||||
#define ____cacheline_aligned __cacheline_aligned
|
||||
#endif
|
||||
|
||||
#define L1_CACHE_BYTES PPC_DEFAULT_CACHE_LINE_SIZE
|
||||
|
||||
#endif /* __ASM_CACHE_H */
|
||||
|
@ -45,7 +45,7 @@
|
||||
#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
|
||||
#define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1)))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define GENMASK(lo, hi) (((2UL << ((hi) - (lo))) - 1UL) << (lo))
|
||||
#define GENMASK(hi, lo) (((2UL << ((hi) - (lo))) - 1UL) << (lo))
|
||||
#define BITS_PER_BYTE 8
|
||||
|
||||
static inline int
|
||||
|
@ -67,7 +67,6 @@
|
||||
#define typeof(x) __typeof(x)
|
||||
|
||||
#define uninitialized_var(x) x = x
|
||||
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
|
||||
#define __maybe_unused __unused
|
||||
#define __always_unused __unused
|
||||
#define __must_check __result_use_check
|
||||
|
0
rtemsbsd/powerpc/include/linux/cpu.h
Normal file
0
rtemsbsd/powerpc/include/linux/cpu.h
Normal file
64
rtemsbsd/powerpc/include/linux/cpumask.h
Normal file
64
rtemsbsd/powerpc/include/linux/cpumask.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (c) 2017 embedded brains GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_CPUMASK_H
|
||||
#define _LINUX_CPUMASK_H
|
||||
|
||||
#include <rtems.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
/* FIXME */
|
||||
#define for_each_cpu(cpu, mask) \
|
||||
for ((cpu) = 0; (cpu) < rtems_get_processor_count(); ++(cpu))
|
||||
|
||||
/* FIXME */
|
||||
#define for_each_cpu_not(cpu, mask) \
|
||||
for ((cpu) = 0; (cpu) < rtems_get_processor_count(); ++(cpu))
|
||||
|
||||
/* FIXME */
|
||||
#define for_each_cpu_and(cpu, mask, and) \
|
||||
for ((cpu) = 0; (cpu) < rtems_get_processor_count(); ++(cpu))
|
||||
|
||||
/* FIXME */
|
||||
#define for_each_possible_cpu(cpu) \
|
||||
for_each_cpu((cpu), 0)
|
||||
|
||||
/* FIXME */
|
||||
#define for_each_online_cpu(cpu) \
|
||||
for_each_cpu((cpu), 0)
|
||||
|
||||
/* FIXME */
|
||||
#define for_each_present_cpu(cpu) \
|
||||
for_each_cpu((cpu), 0)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* _LINUX_CPUMASK_H */
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016 embedded brains GmbH
|
||||
* Copyright (c) 2017 embedded brains GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -24,30 +24,25 @@
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _FDT_PHY
|
||||
#define _FDT_PHY
|
||||
#ifndef _LINUX_CRC32_H
|
||||
#define _LINUX_CRC32_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <sys/libkern.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
struct fdt_mdio_device {
|
||||
int (*read)(struct fdt_mdio_device *dev, int phy, int reg);
|
||||
int (*write)(struct fdt_mdio_device *dev, int phy, int reg, int val);
|
||||
};
|
||||
static inline u32
|
||||
crc32_le(u32 crc, unsigned char const *buf, size_t size)
|
||||
{
|
||||
|
||||
struct fdt_phy_device {
|
||||
int phy;
|
||||
struct fdt_mdio_device *mdio_dev;
|
||||
};
|
||||
|
||||
struct fdt_phy_device *fdt_phy_obtain(int device_node);
|
||||
|
||||
void fdt_phy_release(struct fdt_phy_device *phy_dev);
|
||||
return (crc32_raw(buf, size, crc));
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* _FDT_PHY */
|
||||
|
||||
#endif /* _LINUX_CRC32_H */
|
@ -48,7 +48,7 @@
|
||||
enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED, IRQ_WAKE_THREAD, };
|
||||
typedef enum irqreturn irqreturn_t;
|
||||
|
||||
#include <stdio.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/of.h>
|
||||
@ -85,14 +85,20 @@ devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size)
|
||||
#define devm_alloc_percpu(dev, type) \
|
||||
devm_kzalloc(dev, sizeof(type) * rtems_get_processor_count(), GFP_KERNEL)
|
||||
|
||||
#define dev_crit(dev, fmt, ...) \
|
||||
do { (void)dev; printf(fmt, ##__VA_ARGS__); } while (0)
|
||||
|
||||
#define dev_err(dev, fmt, ...) \
|
||||
do { (void)dev; fprintf(stderr, fmt, ##__VA_ARGS__); } while (0)
|
||||
do { (void)dev; printf(fmt, ##__VA_ARGS__); } while (0)
|
||||
|
||||
#define dev_dbg(dev, fmt, ...) \
|
||||
do { (void)dev; printf(fmt, ##__VA_ARGS__); } while (0)
|
||||
|
||||
#define dev_warn(dev, fmt, ...) \
|
||||
do { (void)dev; fprintf(stderr, fmt, ##__VA_ARGS__); } while (0)
|
||||
do { (void)dev; printf(fmt, ##__VA_ARGS__); } while (0)
|
||||
|
||||
#define dev_info(dev, fmt, ...) \
|
||||
do { (void)dev; fprintf(stderr, fmt, ##__VA_ARGS__); } while (0)
|
||||
do { (void)dev; printf(fmt, ##__VA_ARGS__); } while (0)
|
||||
|
||||
static inline struct device *
|
||||
get_device(struct device *dev)
|
||||
|
0
rtemsbsd/powerpc/include/linux/fsl/guts.h
Normal file
0
rtemsbsd/powerpc/include/linux/fsl/guts.h
Normal file
130
rtemsbsd/powerpc/include/linux/genalloc.h
Normal file
130
rtemsbsd/powerpc/include/linux/genalloc.h
Normal file
@ -0,0 +1,130 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2008 Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
***************************************************************************/
|
||||
|
||||
#ifndef _LINUX_GENALLOC_H
|
||||
#define _LINUX_GENALLOC_H
|
||||
|
||||
#include <sys/blist.h>
|
||||
#include <sys/malloc.h>
|
||||
|
||||
struct gen_pool {
|
||||
blist_t gen_list;
|
||||
daddr_t gen_base;
|
||||
int gen_chunk_shift;
|
||||
struct mtx gen_lock;
|
||||
};
|
||||
|
||||
static inline struct gen_pool *
|
||||
gen_pool_create(int min_alloc_order, int nid)
|
||||
{
|
||||
struct gen_pool *gp;
|
||||
|
||||
gp = malloc(sizeof(*gp), M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (gp == NULL)
|
||||
return (NULL);
|
||||
|
||||
gp->gen_chunk_shift = min_alloc_order;
|
||||
mtx_init(&gp->gen_lock, "genpool", NULL, MTX_DEF);
|
||||
return (gp);
|
||||
}
|
||||
|
||||
static inline int
|
||||
gen_pool_add_virt(struct gen_pool *gp, daddr_t virt, daddr_t phys,
|
||||
size_t size, int nid)
|
||||
{
|
||||
|
||||
(void)phys;
|
||||
(void)nid;
|
||||
|
||||
if (gp->gen_base != 0)
|
||||
return (-ENOMEM);
|
||||
|
||||
gp->gen_list = blist_create(size >> gp->gen_chunk_shift, M_NOWAIT);
|
||||
if (gp->gen_list == NULL)
|
||||
return (-ENOMEM);
|
||||
|
||||
gp->gen_base = virt;
|
||||
blist_free(gp->gen_list, 0, size >> gp->gen_chunk_shift);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline int
|
||||
gen_pool_add(struct gen_pool *gp, daddr_t addr, size_t size, int nid)
|
||||
{
|
||||
|
||||
return (gen_pool_add_virt(gp, addr, -1, size, nid));
|
||||
}
|
||||
|
||||
static inline daddr_t
|
||||
gen_pool_alloc(struct gen_pool *gp, size_t size)
|
||||
{
|
||||
int chunks;
|
||||
daddr_t blkno;
|
||||
|
||||
chunks = (size + (1 << gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
|
||||
mtx_lock(&gp->gen_lock);
|
||||
blkno = blist_alloc(gp->gen_list, chunks);
|
||||
mtx_unlock(&gp->gen_lock);
|
||||
|
||||
if (blkno == SWAPBLK_NONE)
|
||||
return (0);
|
||||
|
||||
return (gp->gen_base + ((1 << gp->gen_chunk_shift) * blkno));
|
||||
}
|
||||
|
||||
static inline void
|
||||
gen_pool_free(struct gen_pool *gp, daddr_t address, size_t size)
|
||||
{
|
||||
int chunks;
|
||||
daddr_t blkno;
|
||||
|
||||
chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
|
||||
blkno = (address - gp->gen_base) / (1 << gp->gen_chunk_shift);
|
||||
mtx_lock(&gp->gen_lock);
|
||||
blist_free(gp->gen_list, blkno, chunks);
|
||||
mtx_unlock(&gp->gen_lock);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
gen_pool_destroy(struct gen_pool *gp)
|
||||
{
|
||||
blist_destroy(gp->gen_list);
|
||||
free(gp, M_DEVBUF);
|
||||
}
|
||||
|
||||
static inline struct gen_pool *
|
||||
devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid,
|
||||
const char *name)
|
||||
{
|
||||
|
||||
(void)dev;
|
||||
(void)name;
|
||||
return (gen_pool_create(min_alloc_order, nid));
|
||||
}
|
||||
|
||||
#endif /* _LINUX_GENALLOC_H */
|
@ -41,6 +41,13 @@ struct resource {
|
||||
#define IORESOURCE_IRQ 0x00000400
|
||||
#define IORESOURCE_MEM 0x00000420
|
||||
|
||||
static inline resource_size_t
|
||||
resource_size(const struct resource *res)
|
||||
{
|
||||
|
||||
return (res->end - res->start + 1);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
0
rtemsbsd/powerpc/include/linux/libfdt_env.h
Normal file
0
rtemsbsd/powerpc/include/linux/libfdt_env.h
Normal file
@ -70,8 +70,6 @@
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_object.h>
|
||||
|
||||
#define prefetch(x)
|
||||
|
||||
struct list_head {
|
||||
struct list_head *next;
|
||||
struct list_head *prev;
|
||||
|
41
rtemsbsd/powerpc/include/linux/mii.h
Normal file
41
rtemsbsd/powerpc/include/linux/mii.h
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (c) 2017 embedded brains GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_MII_H
|
||||
#define _LINUX_MII_H
|
||||
|
||||
#include <dev/mii/mii.h>
|
||||
|
||||
#define BMCR_SPEED1000 BMCR_SPEED1
|
||||
#define BMCR_SPEED100 BMCR_SPEED0
|
||||
#define BMCR_SPEED10 0
|
||||
#define BMCR_ANENABLE BMCR_AUTOEN
|
||||
#define BMCR_ANRESTART BMCR_STARTNEG
|
||||
#define BMCR_FULLDPLX BMCR_FDX
|
||||
|
||||
#define MII_ADVERTISE MII_ANAR
|
||||
|
||||
#endif /* _LINUX_MII_H */
|
@ -50,6 +50,8 @@ netdev_priv(struct net_device *net_dev)
|
||||
#define netdev_err(...) do { } while (0)
|
||||
#define netdev_dbg(...) do { } while (0)
|
||||
|
||||
#define netif_msg_drv(p) 1
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015 embedded brains GmbH
|
||||
* Copyright (c) 2015, 2017 embedded brains GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -27,7 +27,7 @@
|
||||
#ifndef _LINUX_OF_H
|
||||
#define _LINUX_OF_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <libfdt.h>
|
||||
|
||||
@ -59,6 +59,25 @@ of_node_put(struct device_node *dn)
|
||||
const void *of_get_property(const struct device_node *dn, const char *name,
|
||||
int *len);
|
||||
|
||||
/* FIXME: If we need the property, then more work is to do */
|
||||
#define of_find_property(dn, name, len) of_get_property(dn, name, len)
|
||||
|
||||
int of_property_read_u32_array(const struct device_node *dn, const char *name,
|
||||
u32 *vals, size_t nz);
|
||||
|
||||
static inline int
|
||||
of_property_read_u32(const struct device_node *dn, const char *name, u32 *val)
|
||||
{
|
||||
|
||||
return (of_property_read_u32_array(dn, name, val, 1));
|
||||
}
|
||||
|
||||
struct device_node *of_parse_phandle(struct device_node *dns,
|
||||
struct device_node *dn, const char *phandle_name, int index);
|
||||
|
||||
int of_count_phandle_with_args(struct device_node *dn, const char *list_name,
|
||||
const char *cells_name);
|
||||
|
||||
bool of_device_is_available(const struct device_node *dn);
|
||||
|
||||
int of_device_is_compatible(const struct device_node *dn, const char *name);
|
||||
|
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright (c) 2017 embedded brains GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_OF_MDIO_H
|
||||
#define _LINUX_OF_MDIO_H
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
struct phy_device *of_phy_find_device(struct device_node *dn);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* _LINUX_OF_MDIO_H */
|
@ -33,6 +33,8 @@
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
int of_get_phy_mode(struct device_node *dn);
|
||||
|
||||
const void *of_get_mac_address(struct device_node *dn);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -27,6 +27,7 @@
|
||||
#ifndef _LINUX_PERCPU_H
|
||||
#define _LINUX_PERCPU_H
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/threads.h>
|
||||
|
||||
#include <rtems/score/threaddispatch.h>
|
||||
@ -41,8 +42,8 @@ extern "C" {
|
||||
#define per_cpu(_designator, _cpu) \
|
||||
(_designator[_cpu])
|
||||
|
||||
#define this_cpu_ptr(_ptr_designator) \
|
||||
(&(*_ptr_designator)[_CPU_SMP_Get_current_processor()])
|
||||
#define this_cpu_ptr(_designator) \
|
||||
(&_designator[_CPU_SMP_Get_current_processor()])
|
||||
|
||||
#define get_cpu_var(_designator) \
|
||||
(*({ Per_CPU_Control *_cpu_self = _Thread_Dispatch_disable(); \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015 embedded brains GmbH
|
||||
* Copyright (c) 2015, 2017 embedded brains GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -27,7 +27,10 @@
|
||||
#ifndef _LINUX_PHY_H
|
||||
#define _LINUX_PHY_H
|
||||
|
||||
#include <sys/queue.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@ -66,6 +69,50 @@ typedef enum {
|
||||
#define SPEED_56000 56000
|
||||
#define SPEED_100000 100000
|
||||
|
||||
#define SUPPORTED_10000baseT_Full (1U << 0)
|
||||
#define SUPPORTED_1000baseT_Full (1U << 1)
|
||||
#define SUPPORTED_100baseT_Full (1U << 2)
|
||||
#define SUPPORTED_100baseT_Half (1U << 3)
|
||||
#define SUPPORTED_10baseT_Full (1U << 4)
|
||||
#define SUPPORTED_10baseT_Half (1U << 5)
|
||||
#define SUPPORTED_Asym_Pause (1U << 6)
|
||||
#define SUPPORTED_Autoneg (1U << 7)
|
||||
#define SUPPORTED_MII (1U << 8)
|
||||
#define SUPPORTED_Pause (1U << 9)
|
||||
|
||||
struct mdio_bus {
|
||||
int (*read)(struct mdio_bus *bus, int phy, int reg);
|
||||
int (*write)(struct mdio_bus *bus, int phy, int reg, int val);
|
||||
SLIST_ENTRY(mdio_bus) next;
|
||||
int node;
|
||||
};
|
||||
|
||||
struct phy_device {
|
||||
struct {
|
||||
struct device dev;
|
||||
int addr;
|
||||
struct mdio_bus *bus;
|
||||
} mdio;
|
||||
};
|
||||
|
||||
static inline int
|
||||
phy_read(struct phy_device *phy_dev, int reg)
|
||||
{
|
||||
struct mdio_bus *mdio_dev;
|
||||
|
||||
mdio_dev = phy_dev->mdio.bus;
|
||||
return ((*mdio_dev->read)(mdio_dev, phy_dev->mdio.addr, (int)reg));
|
||||
}
|
||||
|
||||
static inline int
|
||||
phy_write(struct phy_device *phy_dev, int reg, int val)
|
||||
{
|
||||
struct mdio_bus *mdio_dev;
|
||||
|
||||
mdio_dev = phy_dev->mdio.bus;
|
||||
return ((*mdio_dev->write)(mdio_dev, phy_dev->mdio.addr, reg, val));
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
@ -38,11 +38,10 @@ struct platform_device {
|
||||
void *platform_data;
|
||||
};
|
||||
|
||||
struct resource *platform_get_resource_impl(struct platform_device *dev,
|
||||
unsigned int type, unsigned int num, struct resource *res);
|
||||
struct resource *platform_get_resource(struct resource *res,
|
||||
struct platform_device *dev, unsigned int type, unsigned int num);
|
||||
|
||||
#define platform_get_resource(dev, type, num) \
|
||||
platform_get_resource_impl(dev, type, num, &platform_resource)
|
||||
int platform_get_irq(struct platform_device *dev, unsigned int num);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
0
rtemsbsd/powerpc/include/linux/prefetch.h
Normal file
0
rtemsbsd/powerpc/include/linux/prefetch.h
Normal file
0
rtemsbsd/powerpc/include/linux/sched/signal.h
Normal file
0
rtemsbsd/powerpc/include/linux/sched/signal.h
Normal file
@ -33,6 +33,7 @@
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <vm/uma.h>
|
||||
|
||||
|
@ -58,7 +58,7 @@ typedef unsigned gfp_t;
|
||||
typedef uint64_t loff_t;
|
||||
typedef uint64_t resource_size_t;
|
||||
|
||||
typedef u64 phys_addr_t;
|
||||
typedef uint64_t phys_addr_t;
|
||||
|
||||
#define DECLARE_BITMAP(n, bits) \
|
||||
unsigned long n[howmany(bits, sizeof(long) * 8)]
|
||||
|
@ -93,6 +93,7 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifndef __rtems__
|
||||
#define wait_event_interruptible(q, cond) \
|
||||
({ \
|
||||
void *c = &(q).wchan; \
|
||||
@ -114,6 +115,13 @@ do { \
|
||||
} \
|
||||
-_error; \
|
||||
})
|
||||
#else /* __rtems__ */
|
||||
#define wait_event_interruptible(q, cond) \
|
||||
({ \
|
||||
wait_event(q, cond); \
|
||||
0; \
|
||||
})
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static inline int
|
||||
waitqueue_active(wait_queue_head_t *q)
|
||||
|
@ -100,6 +100,8 @@ static inline int queue_work(struct workqueue_struct *q, struct work_struct *wor
|
||||
return !taskqueue_enqueue((q)->taskqueue, &(work)->work_task);
|
||||
}
|
||||
|
||||
#define queue_work_on(cpu, q, work) queue_work(q, work)
|
||||
|
||||
static inline void
|
||||
_delayed_work_fn(void *arg)
|
||||
{
|
||||
|
@ -43,6 +43,32 @@ of_get_property(const struct device_node *dn, const char *name, int *len)
|
||||
return (fdt_getprop(fdt, dn->offset, name, len));
|
||||
}
|
||||
|
||||
int
|
||||
of_property_read_u32_array(const struct device_node *dn, const char *name,
|
||||
u32 *vals, size_t n)
|
||||
{
|
||||
const u32 *prop_vals;
|
||||
int len;
|
||||
|
||||
prop_vals = of_get_property(dn, name, &len);
|
||||
if (prop_vals == NULL) {
|
||||
return (-EINVAL);
|
||||
}
|
||||
|
||||
if (len != n * sizeof(*vals)) {
|
||||
return (-EOVERFLOW);
|
||||
}
|
||||
|
||||
while (n > 0) {
|
||||
*vals = fdt32_to_cpu(*prop_vals);
|
||||
++vals;
|
||||
++prop_vals;
|
||||
--n;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
bool
|
||||
of_device_is_available(const struct device_node *dn)
|
||||
{
|
||||
@ -95,6 +121,48 @@ of_find_compatible_node(struct device_node *dns, const struct device_node *dn,
|
||||
}
|
||||
}
|
||||
|
||||
struct device_node *
|
||||
of_parse_phandle(struct device_node *dns, struct device_node *dn,
|
||||
const char *phandle_name, int index)
|
||||
{
|
||||
const void *fdt = bsp_fdt_get();
|
||||
const fdt32_t *phandle;
|
||||
int node;
|
||||
int len;
|
||||
|
||||
phandle = fdt_getprop(fdt, dn->offset, phandle_name, &len);
|
||||
if (phandle == NULL || (len / (int) sizeof(*phandle)) <= index) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
node = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(phandle[index]));
|
||||
if (node < 0) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
dns->offset = node;
|
||||
dns->full_name = NULL;
|
||||
return (dns);
|
||||
}
|
||||
|
||||
int
|
||||
of_count_phandle_with_args(struct device_node *dn, const char *list_name,
|
||||
const char *cells_name)
|
||||
{
|
||||
const void *fdt = bsp_fdt_get();
|
||||
const fdt32_t *phandle;
|
||||
int len;
|
||||
|
||||
BSD_ASSERT(cells_name == NULL);
|
||||
|
||||
phandle = fdt_getprop(fdt, dn->offset, list_name, &len);
|
||||
if (phandle == NULL) {
|
||||
return (-ENOENT);
|
||||
}
|
||||
|
||||
return (len / (int)sizeof(*phandle));
|
||||
}
|
||||
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
@ -208,11 +276,63 @@ of_irq_to_resource(struct device_node *dn, int index,
|
||||
/* FIXME */
|
||||
irq -= 16;
|
||||
#endif
|
||||
|
||||
if (res != NULL) {
|
||||
res->start = irq;
|
||||
res->end = irq;
|
||||
}
|
||||
|
||||
return (irq);
|
||||
}
|
||||
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
static const char * const phy_modes[] = {
|
||||
[PHY_INTERFACE_MODE_MII] = "mii",
|
||||
[PHY_INTERFACE_MODE_GMII] = "gmii",
|
||||
[PHY_INTERFACE_MODE_SGMII] = "sgmii",
|
||||
[PHY_INTERFACE_MODE_TBI] = "tbi",
|
||||
[PHY_INTERFACE_MODE_REVMII] = "rev-mii",
|
||||
[PHY_INTERFACE_MODE_RMII] = "rmii",
|
||||
[PHY_INTERFACE_MODE_RGMII] = "rgmii",
|
||||
[PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
|
||||
[PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
|
||||
[PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
|
||||
[PHY_INTERFACE_MODE_RTBI] = "rtbi",
|
||||
[PHY_INTERFACE_MODE_SMII] = "smii",
|
||||
[PHY_INTERFACE_MODE_XGMII] = "xgmii",
|
||||
[PHY_INTERFACE_MODE_MOCA] = "moca",
|
||||
[PHY_INTERFACE_MODE_QSGMII] = "qsgmii"
|
||||
};
|
||||
|
||||
int
|
||||
of_get_phy_mode(struct device_node *dn)
|
||||
{
|
||||
const void *fdt = bsp_fdt_get();
|
||||
int len;
|
||||
const char *p;
|
||||
int i;
|
||||
|
||||
p = fdt_getprop(fdt, dn->offset, "phy-mode", &len);
|
||||
|
||||
if (p == NULL) {
|
||||
p = fdt_getprop(fdt, dn->offset, "phy-connection-type", &len);
|
||||
}
|
||||
|
||||
if (p == NULL) {
|
||||
return (-ENODEV);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(phy_modes); i++) {
|
||||
if (phy_modes[i] != NULL && strcmp(p, phy_modes[i]) == 0) {
|
||||
return (i);
|
||||
}
|
||||
}
|
||||
|
||||
return (-ENODEV);
|
||||
}
|
||||
|
||||
static const void *
|
||||
get_mac_address(struct device_node *dn, const char *name)
|
||||
@ -289,9 +409,35 @@ const uint8_t bitrev_nibbles[16] = {
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
struct resource *
|
||||
platform_get_resource_impl(struct platform_device *dev,
|
||||
unsigned int type, unsigned int num, struct resource *res)
|
||||
platform_get_resource(struct resource *res, struct platform_device *pdev,
|
||||
unsigned int type, unsigned int num)
|
||||
{
|
||||
struct device_node *dn;
|
||||
int ret;
|
||||
|
||||
return (res);
|
||||
dn = pdev->dev.of_node;
|
||||
|
||||
switch (type) {
|
||||
case IORESOURCE_MEM:
|
||||
ret = of_address_to_resource(dn, num, res);
|
||||
if (ret == 0)
|
||||
return res;
|
||||
case IORESOURCE_IRQ:
|
||||
ret = of_irq_to_resource(dn, num, res);
|
||||
if (ret >= 0)
|
||||
return res;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
int platform_get_irq(struct platform_device *pdev, unsigned int num)
|
||||
{
|
||||
struct resource res_storage;
|
||||
struct resource *res;
|
||||
|
||||
res = platform_get_resource(&res_storage, pdev, IORESOURCE_IRQ, num);
|
||||
return (res != NULL ? res->start : -ENXIO);
|
||||
}
|
||||
|
@ -45,7 +45,6 @@
|
||||
#include <linux/phy.h>
|
||||
|
||||
#include "../../../../../../../../linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h"
|
||||
#include "../../../../../../../../linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h"
|
||||
|
||||
#define FMAN_MAC_LOCK(sc) mtx_lock(&(sc)->mtx)
|
||||
#define FMAN_MAC_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
|
||||
@ -55,9 +54,9 @@
|
||||
CSUM_UDP_IPV6)
|
||||
|
||||
struct fman_mac_sgt {
|
||||
char priv[DPA_TX_PRIV_DATA_SIZE];
|
||||
char priv[DPAA_TX_PRIV_DATA_SIZE];
|
||||
struct fman_prs_result prs;
|
||||
struct qm_sg_entry sg[DPA_SGT_MAX_ENTRIES];
|
||||
struct qm_sg_entry sg[DPAA_SGT_MAX_ENTRIES];
|
||||
struct mbuf *m;
|
||||
};
|
||||
|
||||
@ -109,11 +108,11 @@ fman_mac_txstart_locked(struct ifnet *ifp, struct fman_mac_softc *sc)
|
||||
struct mbuf *m;
|
||||
struct mbuf *n;
|
||||
struct qm_fd fd;
|
||||
struct dpa_priv_s *priv;
|
||||
struct dpaa_priv *priv;
|
||||
struct qman_fq *egress_fq;
|
||||
int queue = 0;
|
||||
size_t i;
|
||||
uintptr_t addr;
|
||||
int err;
|
||||
|
||||
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
|
||||
if (m == NULL) {
|
||||
@ -122,20 +121,16 @@ fman_mac_txstart_locked(struct ifnet *ifp, struct fman_mac_softc *sc)
|
||||
|
||||
sgt = uma_zalloc(sc->sgt_zone, M_NOWAIT);
|
||||
if (sgt == NULL) {
|
||||
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
|
||||
if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
|
||||
m_freem(m);
|
||||
continue;
|
||||
}
|
||||
|
||||
clear_fd(&fd);
|
||||
fd.bpid = 0xff;
|
||||
fd.offset = offsetof(struct fman_mac_sgt, sg);
|
||||
fd.format = qm_fd_sg;
|
||||
fd.length20 = m->m_pkthdr.len;
|
||||
fd.cmd |= FM_FD_CMD_FCO;
|
||||
addr = (uintptr_t)sgt;
|
||||
fd.addr_hi = (u8)upper_32_bits(addr);
|
||||
fd.addr_lo = lower_32_bits(addr);
|
||||
qm_fd_clear_fd(&fd);
|
||||
qm_fd_set_sg(&fd, offsetof(struct fman_mac_sgt, sg), m->m_pkthdr.len);
|
||||
fd.bpid = FSL_DPAA_BPID_INV;
|
||||
fd.cmd |= cpu_to_be32(FM_FD_CMD_FCO);
|
||||
qm_fd_addr_set64(&fd, (uintptr_t)sgt);
|
||||
fman_mac_enable_tx_csum(m, &fd, &sgt->prs);
|
||||
|
||||
repeat_with_collapsed_mbuf_chain:
|
||||
@ -143,31 +138,27 @@ repeat_with_collapsed_mbuf_chain:
|
||||
i = 0;
|
||||
n = m;
|
||||
|
||||
while (n != NULL && i < DPA_SGT_MAX_ENTRIES) {
|
||||
while (n != NULL && i < DPAA_SGT_MAX_ENTRIES) {
|
||||
int len = n->m_len;
|
||||
|
||||
if (len > 0) {
|
||||
sgt->sg[i].bpid = 0xff;
|
||||
qm_sg_entry_set_len(&sgt->sg[i], len);
|
||||
sgt->sg[i].bpid = FSL_DPAA_BPID_INV;
|
||||
sgt->sg[i].offset = 0;
|
||||
sgt->sg[i].length = len;
|
||||
sgt->sg[i].extension = 0;
|
||||
sgt->sg[i].final = 0;
|
||||
addr = mtod(n, uintptr_t);
|
||||
sgt->sg[i].addr_hi = (u8)upper_32_bits(addr);
|
||||
sgt->sg[i].addr_lo =
|
||||
cpu_to_be32(lower_32_bits(addr));
|
||||
qm_sg_entry_set64(&sgt->sg[i],
|
||||
mtod(n, uintptr_t));
|
||||
++i;
|
||||
}
|
||||
|
||||
n = n->m_next;
|
||||
}
|
||||
|
||||
if (n != NULL && i == DPA_SGT_MAX_ENTRIES) {
|
||||
if (n != NULL && i == DPAA_SGT_MAX_ENTRIES) {
|
||||
struct mbuf *c;
|
||||
|
||||
c = m_collapse(m, M_NOWAIT, DPA_SGT_MAX_ENTRIES);
|
||||
c = m_collapse(m, M_NOWAIT, DPAA_SGT_MAX_ENTRIES);
|
||||
if (c == NULL) {
|
||||
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
|
||||
if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
|
||||
m_freem(m);
|
||||
uma_zfree(sc->sgt_zone, sgt);
|
||||
continue;
|
||||
@ -177,12 +168,24 @@ repeat_with_collapsed_mbuf_chain:
|
||||
goto repeat_with_collapsed_mbuf_chain;
|
||||
}
|
||||
|
||||
sgt->sg[i - 1].final = 1;
|
||||
sgt->sg[i - 1].cfg |= cpu_to_be32(QM_SG_FIN);
|
||||
sgt->m = m;
|
||||
priv = netdev_priv(&sc->mac_dev.net_dev);
|
||||
egress_fq = priv->egress_fqs[queue];
|
||||
fd.cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
|
||||
qman_enqueue(egress_fq, &fd, QMAN_ENQUEUE_FLAG_WAIT);
|
||||
fd.cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
|
||||
|
||||
for (i = 0; i < DPAA_ENQUEUE_RETRIES; ++i) {
|
||||
err = qman_enqueue(egress_fq, &fd);
|
||||
if (err != -EBUSY) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(err < 0)) {
|
||||
if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
|
||||
m_freem(m);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -273,8 +276,10 @@ fman_mac_init_locked(struct fman_mac_softc *sc)
|
||||
error = dpa_eth_priv_start(&sc->mac_dev.net_dev);
|
||||
BSD_ASSERT(error == 0);
|
||||
|
||||
if (sc->mii_softc != NULL) {
|
||||
mii_mediachg(sc->mii_softc);
|
||||
callout_reset(&sc->fman_mac_callout, hz, fman_mac_tick, sc);
|
||||
}
|
||||
|
||||
fman_mac_set_multi(sc);
|
||||
}
|
||||
@ -351,7 +356,13 @@ fman_mac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
||||
case SIOCSIFMEDIA:
|
||||
case SIOCGIFMEDIA:
|
||||
mii = sc->mii_softc;
|
||||
|
||||
if (mii != NULL) {
|
||||
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
|
||||
} else {
|
||||
error = EINVAL;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
error = ether_ioctl(ifp, cmd, data);
|
||||
@ -393,8 +404,8 @@ int
|
||||
fman_mac_dev_attach(device_t dev)
|
||||
{
|
||||
struct fman_mac_softc *sc;
|
||||
struct fman_ivars *ivars;
|
||||
struct ifnet *ifp;
|
||||
struct phy_device *phy_dev;
|
||||
int error;
|
||||
|
||||
sc = device_get_softc(dev);
|
||||
@ -435,12 +446,11 @@ fman_mac_dev_attach(device_t dev)
|
||||
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
|
||||
|
||||
/* Attach the MII driver if necessary */
|
||||
ivars = device_get_ivars(dev);
|
||||
sc->phy_dev = fdt_phy_obtain(ivars->of_dev.dev.of_node->offset);
|
||||
if (sc->phy_dev != NULL) {
|
||||
phy_dev = sc->mac_dev.phy_dev;
|
||||
if (phy_dev != NULL) {
|
||||
error = mii_attach(dev, &sc->miibus, ifp,
|
||||
fman_mac_media_change, fman_mac_media_status,
|
||||
BMSR_DEFCAPMASK, sc->phy_dev->phy, MII_OFFSET_ANY, 0);
|
||||
BMSR_DEFCAPMASK, phy_dev->mdio.addr, MII_OFFSET_ANY, 0);
|
||||
if (error != 0) {
|
||||
goto error_2;
|
||||
}
|
||||
@ -487,28 +497,24 @@ int
|
||||
fman_mac_miibus_read_reg(device_t dev, int phy, int reg)
|
||||
{
|
||||
struct fman_mac_softc *sc;
|
||||
struct fdt_phy_device *phy_dev;
|
||||
struct fdt_mdio_device *mdio_dev;
|
||||
struct phy_device *phy_dev;
|
||||
|
||||
sc = device_get_softc(dev);
|
||||
phy_dev = sc->phy_dev;
|
||||
BSD_ASSERT(phy == phy_dev->phy);
|
||||
mdio_dev = phy_dev->mdio_dev;
|
||||
return ((*mdio_dev->read)(mdio_dev, phy, reg));
|
||||
phy_dev = sc->mac_dev.phy_dev;
|
||||
BSD_ASSERT(phy == phy_dev->mdio.addr);
|
||||
return (phy_read(phy_dev, reg));
|
||||
}
|
||||
|
||||
int
|
||||
fman_mac_miibus_write_reg(device_t dev, int phy, int reg, int val)
|
||||
{
|
||||
struct fman_mac_softc *sc;
|
||||
struct fdt_phy_device *phy_dev;
|
||||
struct fdt_mdio_device *mdio_dev;
|
||||
struct phy_device *phy_dev;
|
||||
|
||||
sc = device_get_softc(dev);
|
||||
phy_dev = sc->phy_dev;
|
||||
BSD_ASSERT(phy == phy_dev->phy);
|
||||
mdio_dev = phy_dev->mdio_dev;
|
||||
return ((*mdio_dev->write)(mdio_dev, phy, reg, val));
|
||||
phy_dev = sc->mac_dev.phy_dev;
|
||||
BSD_ASSERT(phy == phy_dev->mdio.addr);
|
||||
return (phy_write(phy_dev, reg, val));
|
||||
}
|
||||
|
||||
void
|
||||
@ -562,236 +568,12 @@ fman_mac_miibus_statchg(device_t dev)
|
||||
(*mac_dev->adjust_link)(mac_dev, speed);
|
||||
}
|
||||
|
||||
static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
|
||||
{
|
||||
struct bm_buffer bmb[8];
|
||||
u8 i;
|
||||
|
||||
memset(bmb, 0, sizeof(bmb));
|
||||
|
||||
for (i = 0; i < 8; ++i) {
|
||||
struct mbuf *m;
|
||||
|
||||
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
|
||||
if (unlikely(m == NULL)) {
|
||||
goto cl_alloc_failed;
|
||||
}
|
||||
|
||||
RTEMS_STATIC_ASSERT(DPA_BP_RAW_SIZE == MCLBYTES, DPA_BP_RAW_SIZE);
|
||||
*(struct mbuf **)(mtod(m, char *) + DPA_MBUF_POINTER_OFFSET) = m;
|
||||
|
||||
bm_buffer_set64(&bmb[i], mtod(m, uintptr_t));
|
||||
}
|
||||
|
||||
release_bufs:
|
||||
/* Release the buffers. In case bman is busy, keep trying
|
||||
* until successful. bman_release() is guaranteed to succeed
|
||||
* in a reasonable amount of time
|
||||
*/
|
||||
while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
|
||||
cpu_relax();
|
||||
return i;
|
||||
|
||||
cl_alloc_failed:
|
||||
bm_buffer_set64(&bmb[i], 0);
|
||||
/* Avoid releasing a completely null buffer; bman_release() requires
|
||||
* at least one buffer.
|
||||
*/
|
||||
if (likely(i))
|
||||
goto release_bufs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
|
||||
static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
|
||||
{
|
||||
int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
|
||||
*count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
|
||||
}
|
||||
|
||||
int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Give each CPU an allotment of "config_count" buffers */
|
||||
#ifndef __rtems__
|
||||
for_each_possible_cpu(i) {
|
||||
#else /* __rtems__ */
|
||||
for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
|
||||
#endif /* __rtems__ */
|
||||
int j;
|
||||
|
||||
/* Although we access another CPU's counters here
|
||||
* we do it at boot time so it is safe
|
||||
*/
|
||||
for (j = 0; j < dpa_bp->config_count; j += 8)
|
||||
dpa_bp_add_8_bufs(dpa_bp, i);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Add buffers/(pages) for Rx processing whenever bpool count falls below
|
||||
* REFILL_THRESHOLD.
|
||||
*/
|
||||
int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
|
||||
{
|
||||
int count = *countptr;
|
||||
int new_bufs;
|
||||
|
||||
if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
|
||||
do {
|
||||
new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
|
||||
if (unlikely(!new_bufs)) {
|
||||
/* Avoid looping forever if we've temporarily
|
||||
* run out of memory. We'll try again at the
|
||||
* next NAPI cycle.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
count += new_bufs;
|
||||
} while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
|
||||
|
||||
*countptr = count;
|
||||
if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mbuf *
|
||||
addr_to_mbuf(dma_addr_t addr)
|
||||
{
|
||||
void *vaddr = phys_to_virt(addr);
|
||||
|
||||
return (*(struct mbuf **)(vaddr + DPA_MBUF_POINTER_OFFSET));
|
||||
}
|
||||
|
||||
static struct mbuf *
|
||||
contig_fd_to_mbuf(const struct qm_fd *fd, struct ifnet *ifp)
|
||||
{
|
||||
struct mbuf *m;
|
||||
ssize_t fd_off = dpa_fd_offset(fd);
|
||||
dma_addr_t addr = qm_fd_addr(fd);
|
||||
|
||||
m = addr_to_mbuf(addr);
|
||||
m->m_pkthdr.rcvif = ifp;
|
||||
m->m_pkthdr.len = m->m_len = dpa_fd_length(fd);
|
||||
m->m_data = mtod(m, char *) + fd_off;
|
||||
|
||||
return (m);
|
||||
}
|
||||
|
||||
static void
|
||||
dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, dma_addr_t addr, int *count_ptr)
|
||||
{
|
||||
struct bm_buffer bmb;
|
||||
|
||||
bm_buffer_set64(&bmb, addr);
|
||||
|
||||
while (bman_release(dpa_bp->pool, &bmb, 1, 0))
|
||||
cpu_relax();
|
||||
|
||||
++(*count_ptr);
|
||||
}
|
||||
|
||||
static struct mbuf *
|
||||
sg_fd_to_mbuf(struct dpa_bp *dpa_bp, const struct qm_fd *fd,
|
||||
struct ifnet *ifp, int *count_ptr)
|
||||
{
|
||||
ssize_t fd_off = dpa_fd_offset(fd);
|
||||
dma_addr_t addr = qm_fd_addr(fd);
|
||||
const struct qm_sg_entry *sgt;
|
||||
int i;
|
||||
int len;
|
||||
struct mbuf *m;
|
||||
struct mbuf *last;
|
||||
|
||||
sgt = (const struct qm_sg_entry *)((char *)phys_to_virt(addr) + fd_off);
|
||||
len = 0;
|
||||
|
||||
for (i = 0; i < DPA_SGT_MAX_ENTRIES; ++i) {
|
||||
dma_addr_t sg_addr;
|
||||
int sg_len;
|
||||
struct mbuf *n;
|
||||
|
||||
BSD_ASSERT(sgt[i].extension == 0);
|
||||
BSD_ASSERT(dpa_bp == dpa_bpid2pool(sgt[i].bpid));
|
||||
|
||||
sg_addr = qm_sg_addr(&sgt[i]);
|
||||
n = addr_to_mbuf(sg_addr);
|
||||
|
||||
sg_len = sgt[i].length;
|
||||
len += sg_len;
|
||||
|
||||
if (i == 0) {
|
||||
m = n;
|
||||
} else {
|
||||
last->m_next = n;
|
||||
}
|
||||
|
||||
n->m_len = sg_len;
|
||||
m->m_data = mtod(m, char *) + sgt[i].offset;
|
||||
last = n;
|
||||
|
||||
--(*count_ptr);
|
||||
|
||||
if (sgt[i].final) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
m->m_pkthdr.rcvif = ifp;
|
||||
m->m_pkthdr.len = len;
|
||||
|
||||
dpa_bp_recycle_frag(dpa_bp, addr, count_ptr);
|
||||
|
||||
return (m);
|
||||
}
|
||||
|
||||
void
|
||||
_dpa_rx(struct net_device *net_dev, struct qman_portal *portal,
|
||||
const struct dpa_priv_s *priv, struct dpa_percpu_priv_s *percpu_priv,
|
||||
const struct qm_fd *fd, u32 fqid, int *count_ptr)
|
||||
{
|
||||
struct dpa_bp *dpa_bp;
|
||||
struct mbuf *m;
|
||||
struct ifnet *ifp;
|
||||
|
||||
ifp = net_dev->ifp;
|
||||
|
||||
if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
|
||||
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
|
||||
dpa_fd_release(net_dev, fd);
|
||||
return;
|
||||
}
|
||||
|
||||
dpa_bp = priv->dpa_bp;
|
||||
BSD_ASSERT(dpa_bp == dpa_bpid2pool(fd->bpid));
|
||||
|
||||
if (likely(fd->format == qm_fd_contig)) {
|
||||
m = contig_fd_to_mbuf(fd, ifp);
|
||||
} else {
|
||||
BSD_ASSERT(fd->format == qm_fd_sg);
|
||||
m = sg_fd_to_mbuf(dpa_bp, fd, ifp, count_ptr);
|
||||
}
|
||||
|
||||
/* Account for either the contig buffer or the SGT buffer (depending on
|
||||
* which case we were in) having been removed from the pool.
|
||||
*/
|
||||
(*count_ptr)--;
|
||||
|
||||
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
|
||||
(*ifp->if_input)(ifp, m);
|
||||
}
|
||||
|
||||
void _dpa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd)
|
||||
void dpaa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd)
|
||||
{
|
||||
struct fman_mac_softc *sc;
|
||||
struct fman_mac_sgt *sgt;
|
||||
|
||||
BSD_ASSERT(fd->format == qm_fd_sg);
|
||||
BSD_ASSERT(qm_fd_get_format(fd) == qm_fd_sg);
|
||||
|
||||
sc = ifp->if_softc;
|
||||
sgt = (struct fman_mac_sgt *)qm_fd_addr(fd);
|
||||
|
@ -43,8 +43,7 @@
|
||||
#include <dev/mii/miivar.h>
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#include <fdt_phy.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
#include "mac.h"
|
||||
|
||||
|
@ -29,8 +29,6 @@
|
||||
|
||||
#include <machine/rtems-bsd-kernel-space.h>
|
||||
|
||||
#include <fdt_phy.h>
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/time.h>
|
||||
@ -45,17 +43,13 @@
|
||||
|
||||
#include <bsp/fdt.h>
|
||||
|
||||
#include <linux/of_mdio.h>
|
||||
|
||||
#define MDIO_LOCK() mtx_lock(&mdio.mutex)
|
||||
#define MDIO_UNLOCK() mtx_unlock(&mdio.mutex)
|
||||
|
||||
struct mdio_device {
|
||||
struct fdt_mdio_device base;
|
||||
SLIST_ENTRY(mdio_device) next;
|
||||
int node;
|
||||
};
|
||||
|
||||
static struct {
|
||||
SLIST_HEAD(, mdio_device) instances;
|
||||
SLIST_HEAD(, mdio_bus) instances;
|
||||
struct mtx mutex;
|
||||
} mdio = {
|
||||
.instances = SLIST_HEAD_INITIALIZER(mdio.instances)
|
||||
@ -133,8 +127,8 @@ struct fman_mdio_regs {
|
||||
#define MDIO_CTRL_REG_ADDR(x) ((x) & 0x1fU)
|
||||
#define MDIO_CTRL_PHY_ADDR(x) (((x) & 0x1fU) << 5)
|
||||
|
||||
struct fman_mdio_device {
|
||||
struct mdio_device base;
|
||||
struct fman_mdio_bus {
|
||||
struct mdio_bus base;
|
||||
volatile struct fman_mdio_regs *regs;
|
||||
};
|
||||
|
||||
@ -163,14 +157,14 @@ fman_mdio_wait(volatile struct fman_mdio_regs *regs)
|
||||
}
|
||||
|
||||
static int
|
||||
fman_mdio_read(struct fdt_mdio_device *base, int phy, int reg)
|
||||
fman_mdio_read(struct mdio_bus *base, int phy, int reg)
|
||||
{
|
||||
struct fman_mdio_device *fm;
|
||||
struct fman_mdio_bus *fm;
|
||||
volatile struct fman_mdio_regs *regs;
|
||||
int val;
|
||||
int err;
|
||||
|
||||
fm = (struct fman_mdio_device *)base;
|
||||
fm = (struct fman_mdio_bus *)base;
|
||||
regs = fm->regs;
|
||||
|
||||
MDIO_LOCK();
|
||||
@ -205,13 +199,13 @@ fman_mdio_read(struct fdt_mdio_device *base, int phy, int reg)
|
||||
}
|
||||
|
||||
static int
|
||||
fman_mdio_write(struct fdt_mdio_device *base, int phy, int reg, int val)
|
||||
fman_mdio_write(struct mdio_bus *base, int phy, int reg, int val)
|
||||
{
|
||||
struct fman_mdio_device *fm;
|
||||
struct fman_mdio_bus *fm;
|
||||
volatile struct fman_mdio_regs *regs;
|
||||
int err;
|
||||
|
||||
fm = (struct fman_mdio_device *)base;
|
||||
fm = (struct fman_mdio_bus *)base;
|
||||
regs = fm->regs;
|
||||
|
||||
MDIO_LOCK();
|
||||
@ -238,26 +232,27 @@ fman_mdio_write(struct fdt_mdio_device *base, int phy, int reg, int val)
|
||||
return (0);
|
||||
}
|
||||
|
||||
static struct mdio_device *
|
||||
static struct mdio_bus *
|
||||
create_fman_mdio(const void *fdt, int mdio_node)
|
||||
{
|
||||
struct fman_mdio_device *fm = NULL;
|
||||
struct fman_mdio_bus *fm = NULL;
|
||||
|
||||
fm = malloc(sizeof(*fm), M_TEMP, M_WAITOK | M_ZERO);
|
||||
if (fm == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
fm->base.read = fman_mdio_read;
|
||||
fm->base.write = fman_mdio_write;
|
||||
fm->base.node = mdio_node;
|
||||
fm->regs = (volatile struct fman_mdio_regs *)(uintptr_t)
|
||||
fdt_get_address(fdt, mdio_node);
|
||||
fm->base.base.read = fman_mdio_read;
|
||||
fm->base.base.write = fman_mdio_write;
|
||||
|
||||
return (&fm->base);
|
||||
}
|
||||
|
||||
static struct mdio_device *
|
||||
create_mdio_device(const void *fdt, int mdio_node)
|
||||
static struct mdio_bus *
|
||||
create_mdio_bus(const void *fdt, int mdio_node)
|
||||
{
|
||||
|
||||
if (fdt_node_check_compatible(fdt, mdio_node,
|
||||
@ -271,33 +266,33 @@ create_mdio_device(const void *fdt, int mdio_node)
|
||||
}
|
||||
|
||||
static int
|
||||
find_mdio_device(const void *fdt, int mdio_node,
|
||||
struct fdt_phy_device *phy_dev)
|
||||
find_mdio_bus(const void *fdt, int mdio_node,
|
||||
struct phy_device *phy_dev)
|
||||
{
|
||||
struct mdio_device *mdio_dev = NULL;
|
||||
struct mdio_bus *mdio_bus = NULL;
|
||||
|
||||
SLIST_FOREACH(mdio_dev, &mdio.instances, next) {
|
||||
if (mdio_dev->node == mdio_node) {
|
||||
SLIST_FOREACH(mdio_bus, &mdio.instances, next) {
|
||||
if (mdio_bus->node == mdio_node) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (mdio_dev == NULL) {
|
||||
mdio_dev = create_mdio_device(fdt, mdio_node);
|
||||
if (mdio_bus == NULL) {
|
||||
mdio_bus = create_mdio_bus(fdt, mdio_node);
|
||||
}
|
||||
|
||||
if (mdio_dev == NULL) {
|
||||
if (mdio_bus == NULL) {
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
phy_dev->mdio_dev = &mdio_dev->base;
|
||||
phy_dev->mdio.bus = mdio_bus;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static struct fdt_phy_device *
|
||||
static struct phy_device *
|
||||
phy_obtain(const void *fdt, int mdio_node, int phy)
|
||||
{
|
||||
struct fdt_phy_device *phy_dev;
|
||||
struct phy_device *phy_dev;
|
||||
int err;
|
||||
|
||||
phy_dev = malloc(sizeof(*phy_dev), M_TEMP, M_WAITOK | M_ZERO);
|
||||
@ -305,9 +300,9 @@ phy_obtain(const void *fdt, int mdio_node, int phy)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
phy_dev->phy = phy;
|
||||
phy_dev->mdio.addr = phy;
|
||||
MDIO_LOCK();
|
||||
err = find_mdio_device(fdt, mdio_node, phy_dev);
|
||||
err = find_mdio_bus(fdt, mdio_node, phy_dev);
|
||||
MDIO_UNLOCK();
|
||||
|
||||
if (err != 0) {
|
||||
@ -318,43 +313,25 @@ phy_obtain(const void *fdt, int mdio_node, int phy)
|
||||
return (phy_dev);
|
||||
}
|
||||
|
||||
struct fdt_phy_device *
|
||||
fdt_phy_obtain(int device_node)
|
||||
struct phy_device *
|
||||
of_phy_find_device(struct device_node *dn)
|
||||
{
|
||||
const void *fdt;
|
||||
const fdt32_t *phandle;
|
||||
const fdt32_t *phy;
|
||||
int len;
|
||||
int node;
|
||||
int mdio_node;
|
||||
|
||||
fdt = bsp_fdt_get();
|
||||
|
||||
phandle = fdt_getprop(fdt, device_node, "phy-handle", &len);
|
||||
if (phandle == NULL || len != sizeof(*phandle)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
node = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*phandle));
|
||||
if (node < 0) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
phy = fdt_getprop(fdt, node, "reg", &len);
|
||||
phy = fdt_getprop(fdt, dn->offset, "reg", &len);
|
||||
if (phy == NULL || len != sizeof(*phy)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
node = fdt_parent_offset(fdt, node);
|
||||
if (node < 0) {
|
||||
mdio_node = fdt_parent_offset(fdt, dn->offset);
|
||||
if (mdio_node < 0) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return (phy_obtain(fdt, node, (int)fdt32_to_cpu(*phy)));
|
||||
}
|
||||
|
||||
void
|
||||
fdt_phy_release(struct fdt_phy_device *phy_dev)
|
||||
{
|
||||
|
||||
free(phy_dev, M_TEMP);
|
||||
return (phy_obtain(fdt, mdio_node, (int)fdt32_to_cpu(*phy)));
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ fman_muram_offset_to_vbase(struct muram_info *muram, unsigned long offset)
|
||||
return (offset + muram->base);
|
||||
}
|
||||
|
||||
int
|
||||
unsigned long
|
||||
fman_muram_alloc(struct muram_info *muram, size_t size)
|
||||
{
|
||||
void *p;
|
||||
|
Loading…
x
Reference in New Issue
Block a user