mirror of
https://git.rtems.org/rtems-libbsd/
synced 2025-10-14 05:11:15 +08:00
Linux update to 4.12-rc1+
Linux baseline b23afd384801711ab6dbccd259cc14cb09a1dcaf.
This commit is contained in:
Submodule linux-org updated: 2774c204cd...b23afd3848
@@ -143,6 +143,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
|
||||
/* L4 Type field: TCP */
|
||||
#define FM_L4_PARSE_RESULT_TCP 0x20
|
||||
|
||||
/* FD status field indicating whether the FM Parser has attempted to validate
|
||||
* the L4 csum of the frame.
|
||||
* Note that having this bit set doesn't necessarily imply that the checksum
|
||||
* is valid. One would have to check the parse results to find that out.
|
||||
*/
|
||||
#define FM_FD_STAT_L4CV 0x00000004
|
||||
|
||||
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
|
||||
#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
|
||||
|
||||
@@ -267,6 +274,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
|
||||
* For conformity, we'll still declare GSO explicitly.
|
||||
*/
|
||||
net_dev->features |= NETIF_F_GSO;
|
||||
net_dev->features |= NETIF_F_RXCSUM;
|
||||
|
||||
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
||||
/* we do not want shared skbs on TX */
|
||||
@@ -372,6 +380,45 @@ static void dpaa_get_stats64(struct net_device *net_dev,
|
||||
}
|
||||
}
|
||||
|
||||
static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *tc)
|
||||
{
|
||||
struct dpaa_priv *priv = netdev_priv(net_dev);
|
||||
u8 num_tc;
|
||||
int i;
|
||||
|
||||
if (tc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
|
||||
num_tc = tc->mqprio->num_tc;
|
||||
|
||||
if (num_tc == priv->num_tc)
|
||||
return 0;
|
||||
|
||||
if (!num_tc) {
|
||||
netdev_reset_tc(net_dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (num_tc > DPAA_TC_NUM) {
|
||||
netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
|
||||
DPAA_TC_NUM);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
netdev_set_num_tc(net_dev, num_tc);
|
||||
|
||||
for (i = 0; i < num_tc; i++)
|
||||
netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
|
||||
i * DPAA_TC_TXQ_NUM);
|
||||
|
||||
out:
|
||||
priv->num_tc = num_tc ? : 1;
|
||||
netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
|
||||
{
|
||||
struct platform_device *of_dev;
|
||||
@@ -596,16 +643,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv)
|
||||
|
||||
/* Use multiple WQs for FQ assignment:
|
||||
* - Tx Confirmation queues go to WQ1.
|
||||
* - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
|
||||
* to be scheduled, in case there are many more FQs in WQ3).
|
||||
* - Rx Default and Tx queues go to WQ3 (no differentiation between
|
||||
* Rx and Tx traffic).
|
||||
* - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
|
||||
* to be scheduled, in case there are many more FQs in WQ6).
|
||||
* - Rx Default goes to WQ6.
|
||||
* - Tx queues go to different WQs depending on their priority. Equal
|
||||
* chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
|
||||
* WQ0 (highest priority).
|
||||
* This ensures that Tx-confirmed buffers are timely released. In particular,
|
||||
* it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
|
||||
* are greatly outnumbered by other FQs in the system, while
|
||||
* dequeue scheduling is round-robin.
|
||||
*/
|
||||
static inline void dpaa_assign_wq(struct dpaa_fq *fq)
|
||||
static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
|
||||
{
|
||||
switch (fq->fq_type) {
|
||||
case FQ_TYPE_TX_CONFIRM:
|
||||
@@ -614,11 +663,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq)
|
||||
break;
|
||||
case FQ_TYPE_RX_ERROR:
|
||||
case FQ_TYPE_TX_ERROR:
|
||||
fq->wq = 2;
|
||||
fq->wq = 5;
|
||||
break;
|
||||
case FQ_TYPE_RX_DEFAULT:
|
||||
fq->wq = 6;
|
||||
break;
|
||||
case FQ_TYPE_TX:
|
||||
fq->wq = 3;
|
||||
switch (idx / DPAA_TC_TXQ_NUM) {
|
||||
case 0:
|
||||
/* Low priority (best effort) */
|
||||
fq->wq = 6;
|
||||
break;
|
||||
case 1:
|
||||
/* Medium priority */
|
||||
fq->wq = 2;
|
||||
break;
|
||||
case 2:
|
||||
/* High priority */
|
||||
fq->wq = 1;
|
||||
break;
|
||||
case 3:
|
||||
/* Very high priority */
|
||||
fq->wq = 0;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Too many TX FQs: more than %d!\n",
|
||||
DPAA_ETH_TXQ_NUM);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Invalid FQ type %d for FQID %d!\n",
|
||||
@@ -646,7 +717,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
dpaa_assign_wq(dpaa_fq + i);
|
||||
dpaa_assign_wq(dpaa_fq + i, i);
|
||||
|
||||
return dpaa_fq;
|
||||
}
|
||||
@@ -968,7 +1039,7 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
|
||||
* Tx Confirmation FQs.
|
||||
*/
|
||||
if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
|
||||
initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
|
||||
initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
|
||||
|
||||
/* FQ placement */
|
||||
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
|
||||
@@ -1058,7 +1129,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
|
||||
/* Initialization common to all ingress queues */
|
||||
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
|
||||
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
|
||||
initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
|
||||
initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
|
||||
QM_FQCTRL_CTXASTASHING);
|
||||
initfq.fqd.context_a.stashing.exclusive =
|
||||
QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
|
||||
QM_STASHING_EXCL_ANNOTATION;
|
||||
@@ -1138,9 +1210,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list)
|
||||
}
|
||||
#endif /* __rtems__ */
|
||||
|
||||
static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
|
||||
struct dpaa_fq *defq,
|
||||
struct dpaa_buffer_layout *buf_layout)
|
||||
static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
|
||||
struct dpaa_fq *defq,
|
||||
struct dpaa_buffer_layout *buf_layout)
|
||||
{
|
||||
struct fman_buffer_prefix_content buf_prefix_content;
|
||||
struct fman_port_params params;
|
||||
@@ -1159,23 +1231,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
|
||||
params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
|
||||
|
||||
err = fman_port_config(port, ¶ms);
|
||||
if (err)
|
||||
if (err) {
|
||||
pr_err("%s: fman_port_config failed\n", __func__);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
|
||||
if (err)
|
||||
if (err) {
|
||||
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
|
||||
__func__);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = fman_port_init(port);
|
||||
if (err)
|
||||
pr_err("%s: fm_port_init failed\n", __func__);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
|
||||
size_t count, struct dpaa_fq *errq,
|
||||
struct dpaa_fq *defq,
|
||||
struct dpaa_buffer_layout *buf_layout)
|
||||
static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
|
||||
size_t count, struct dpaa_fq *errq,
|
||||
struct dpaa_fq *defq,
|
||||
struct dpaa_buffer_layout *buf_layout)
|
||||
{
|
||||
struct fman_buffer_prefix_content buf_prefix_content;
|
||||
struct fman_port_rx_params *rx_p;
|
||||
@@ -1203,32 +1281,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
|
||||
}
|
||||
|
||||
err = fman_port_config(port, ¶ms);
|
||||
if (err)
|
||||
if (err) {
|
||||
pr_err("%s: fman_port_config failed\n", __func__);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
|
||||
if (err)
|
||||
if (err) {
|
||||
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
|
||||
__func__);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = fman_port_init(port);
|
||||
if (err)
|
||||
pr_err("%s: fm_port_init failed\n", __func__);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void dpaa_eth_init_ports(struct mac_device *mac_dev,
|
||||
struct dpaa_bp **bps, size_t count,
|
||||
struct fm_port_fqs *port_fqs,
|
||||
struct dpaa_buffer_layout *buf_layout,
|
||||
struct device *dev)
|
||||
static int dpaa_eth_init_ports(struct mac_device *mac_dev,
|
||||
struct dpaa_bp **bps, size_t count,
|
||||
struct fm_port_fqs *port_fqs,
|
||||
struct dpaa_buffer_layout *buf_layout,
|
||||
struct device *dev)
|
||||
{
|
||||
struct fman_port *rxport = mac_dev->port[RX];
|
||||
struct fman_port *txport = mac_dev->port[TX];
|
||||
int err;
|
||||
|
||||
dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
|
||||
port_fqs->tx_defq, &buf_layout[TX]);
|
||||
dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
|
||||
port_fqs->rx_defq, &buf_layout[RX]);
|
||||
err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
|
||||
port_fqs->tx_defq, &buf_layout[TX]);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
|
||||
port_fqs->rx_defq, &buf_layout[RX]);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
|
||||
@@ -1639,6 +1729,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
|
||||
{
|
||||
/* The parser has run and performed L4 checksum validation.
|
||||
* We know there were no parser errors (and implicitly no
|
||||
* L4 csum error), otherwise we wouldn't be here.
|
||||
*/
|
||||
if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
|
||||
(be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
|
||||
return CHECKSUM_UNNECESSARY;
|
||||
|
||||
/* We're here because either the parser didn't run or the L4 checksum
|
||||
* was not verified. This may include the case of a UDP frame with
|
||||
* checksum zero or an L4 proto other than TCP/UDP
|
||||
*/
|
||||
return CHECKSUM_NONE;
|
||||
}
|
||||
|
||||
/* Build a linear skb around the received buffer.
|
||||
* We are guaranteed there is enough room at the end of the data buffer to
|
||||
* accommodate the shared info area of the skb.
|
||||
@@ -1669,7 +1776,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
|
||||
skb_reserve(skb, fd_off);
|
||||
skb_put(skb, qm_fd_get_length(fd));
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->ip_summed = rx_csum_offload(priv, fd);
|
||||
|
||||
return skb;
|
||||
|
||||
@@ -1729,7 +1836,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
||||
if (WARN_ON(unlikely(!skb)))
|
||||
goto free_buffers;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->ip_summed = rx_csum_offload(priv, fd);
|
||||
|
||||
/* Make sure forwarded skbs will have enough space
|
||||
* on Tx, if extra headers are added.
|
||||
@@ -2366,7 +2473,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
|
||||
#endif /* __rtems__ */
|
||||
struct net_device *net_dev;
|
||||
#ifndef __rtems__
|
||||
u32 fd_status = fd->status;
|
||||
u32 fd_status;
|
||||
#endif /* __rtems__ */
|
||||
struct dpaa_bp *dpaa_bp;
|
||||
struct dpaa_priv *priv;
|
||||
@@ -2673,6 +2780,7 @@ static const struct net_device_ops dpaa_ops = {
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_rx_mode = dpaa_set_rx_mode,
|
||||
.ndo_do_ioctl = dpaa_ioctl,
|
||||
.ndo_setup_tc = dpaa_setup_tc,
|
||||
};
|
||||
|
||||
static int dpaa_napi_add(struct net_device *net_dev)
|
||||
@@ -2975,8 +3083,10 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
|
||||
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
|
||||
|
||||
/* All real interfaces need their ports initialized */
|
||||
dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
|
||||
&priv->buf_layout[0], dev);
|
||||
err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
|
||||
&priv->buf_layout[0], dev);
|
||||
if (err)
|
||||
goto init_ports_failed;
|
||||
|
||||
priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
|
||||
if (!priv->percpu_priv) {
|
||||
@@ -2994,6 +3104,9 @@ dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
|
||||
}
|
||||
|
||||
#ifndef __rtems__
|
||||
priv->num_tc = 1;
|
||||
netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
|
||||
|
||||
/* Initialize NAPI */
|
||||
err = dpaa_napi_add(net_dev);
|
||||
if (err < 0)
|
||||
@@ -3017,6 +3130,7 @@ napi_add_failed:
|
||||
#endif /* __rtems__ */
|
||||
dpaa_napi_del(net_dev);
|
||||
alloc_percpu_failed:
|
||||
init_ports_failed:
|
||||
#ifndef __rtems__
|
||||
dpaa_fq_free(dev, &priv->dpaa_fq_list);
|
||||
#endif /* __rtems__ */
|
||||
|
@@ -39,7 +39,12 @@
|
||||
#include "mac.h"
|
||||
#include "dpaa_eth_trace.h"
|
||||
|
||||
#define DPAA_ETH_TXQ_NUM NR_CPUS
|
||||
/* Number of prioritised traffic classes */
|
||||
#define DPAA_TC_NUM 4
|
||||
/* Number of Tx queues per traffic class */
|
||||
#define DPAA_TC_TXQ_NUM NR_CPUS
|
||||
/* Total number of Tx queues */
|
||||
#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
|
||||
|
||||
#ifndef __rtems__
|
||||
#define DPAA_BPS_NUM 3 /* number of bpools per interface */
|
||||
@@ -161,6 +166,7 @@ struct dpaa_priv {
|
||||
struct list_head dpaa_fq_list;
|
||||
|
||||
#ifndef __rtems__
|
||||
u8 num_tc;
|
||||
u32 msg_enable; /* net_device message level */
|
||||
#endif /* __rtems__ */
|
||||
|
||||
|
@@ -67,6 +67,7 @@
|
||||
#define DMA_OFFSET 0x000C2000
|
||||
#define FPM_OFFSET 0x000C3000
|
||||
#define IMEM_OFFSET 0x000C4000
|
||||
#define HWP_OFFSET 0x000C7000
|
||||
#define CGP_OFFSET 0x000DB000
|
||||
|
||||
/* Exceptions bit map */
|
||||
@@ -226,6 +227,9 @@
|
||||
|
||||
#define QMI_GS_HALT_NOT_BUSY 0x00000002
|
||||
|
||||
/* HWP defines */
|
||||
#define HWP_RPIMAC_PEN 0x00000001
|
||||
|
||||
/* IRAM defines */
|
||||
#define IRAM_IADD_AIE 0x80000000
|
||||
#define IRAM_READY 0x80000000
|
||||
@@ -483,6 +487,12 @@ struct fman_dma_regs {
|
||||
u32 res00e0[0x400 - 56];
|
||||
};
|
||||
|
||||
struct fman_hwp_regs {
|
||||
u32 res0000[0x844 / 4]; /* 0x000..0x843 */
|
||||
u32 fmprrpimac; /* FM Parser Internal memory access control */
|
||||
u32 res[(0x1000 - 0x848) / 4]; /* 0x848..0xFFF */
|
||||
};
|
||||
|
||||
/* Structure that holds current FMan state.
|
||||
* Used for saving run time information.
|
||||
*/
|
||||
@@ -616,6 +626,7 @@ struct fman {
|
||||
struct fman_bmi_regs __iomem *bmi_regs;
|
||||
struct fman_qmi_regs __iomem *qmi_regs;
|
||||
struct fman_dma_regs __iomem *dma_regs;
|
||||
struct fman_hwp_regs __iomem *hwp_regs;
|
||||
fman_exceptions_cb *exception_cb;
|
||||
fman_bus_error_cb *bus_error_cb;
|
||||
/* Spinlock for FMan use */
|
||||
@@ -1009,6 +1020,12 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
|
||||
iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
|
||||
}
|
||||
|
||||
static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg)
|
||||
{
|
||||
/* enable HW Parser */
|
||||
iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac);
|
||||
}
|
||||
|
||||
static int enable(struct fman *fman, struct fman_cfg *cfg)
|
||||
{
|
||||
u32 cfg_reg = 0;
|
||||
@@ -1205,7 +1222,7 @@ static int fill_soc_specific_params(struct fman_state_struct *state)
|
||||
state->max_num_of_open_dmas = 32;
|
||||
state->fm_port_num_of_cg = 256;
|
||||
state->num_of_rx_ports = 6;
|
||||
state->total_fifo_size = 122 * 1024;
|
||||
state->total_fifo_size = 136 * 1024;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
@@ -1805,6 +1822,7 @@ static int fman_config(struct fman *fman)
|
||||
fman->bmi_regs = base_addr + BMI_OFFSET;
|
||||
fman->qmi_regs = base_addr + QMI_OFFSET;
|
||||
fman->dma_regs = base_addr + DMA_OFFSET;
|
||||
fman->hwp_regs = base_addr + HWP_OFFSET;
|
||||
fman->base_addr = base_addr;
|
||||
|
||||
spin_lock_init(&fman->spinlock);
|
||||
@@ -2076,6 +2094,9 @@ static int fman_init(struct fman *fman)
|
||||
/* Init QMI Registers */
|
||||
qmi_init(fman->qmi_regs, fman->cfg);
|
||||
|
||||
/* Init HW Parser */
|
||||
hwp_init(fman->hwp_regs);
|
||||
|
||||
err = enable(fman, cfg);
|
||||
if (err != 0)
|
||||
return err;
|
||||
|
@@ -143,14 +143,14 @@ enum fman_exceptions {
|
||||
struct fman_prs_result {
|
||||
u8 lpid; /* Logical port id */
|
||||
u8 shimr; /* Shim header result */
|
||||
u16 l2r; /* Layer 2 result */
|
||||
u16 l3r; /* Layer 3 result */
|
||||
__be16 l2r; /* Layer 2 result */
|
||||
__be16 l3r; /* Layer 3 result */
|
||||
u8 l4r; /* Layer 4 result */
|
||||
u8 cplan; /* Classification plan id */
|
||||
u16 nxthdr; /* Next Header */
|
||||
u16 cksum; /* Running-sum */
|
||||
__be16 nxthdr; /* Next Header */
|
||||
__be16 cksum; /* Running-sum */
|
||||
/* Flags&fragment-offset field of the last IP-header */
|
||||
u16 flags_frag_off;
|
||||
__be16 flags_frag_off;
|
||||
/* Routing type field of a IPV6 routing extension header */
|
||||
u8 route_type;
|
||||
/* Routing Extension Header Present; last bit is IP valid */
|
||||
|
@@ -385,6 +385,9 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
|
||||
|
||||
/* check RGMII support */
|
||||
if (iface == PHY_INTERFACE_MODE_RGMII ||
|
||||
iface == PHY_INTERFACE_MODE_RGMII_ID ||
|
||||
iface == PHY_INTERFACE_MODE_RGMII_RXID ||
|
||||
iface == PHY_INTERFACE_MODE_RGMII_TXID ||
|
||||
iface == PHY_INTERFACE_MODE_RMII)
|
||||
if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
|
||||
return -EINVAL;
|
||||
@@ -394,7 +397,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
|
||||
if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
|
||||
return -EINVAL;
|
||||
|
||||
is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
|
||||
is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
|
||||
iface == PHY_INTERFACE_MODE_RGMII_ID ||
|
||||
iface == PHY_INTERFACE_MODE_RGMII_RXID ||
|
||||
iface == PHY_INTERFACE_MODE_RGMII_TXID;
|
||||
is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
|
||||
is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
|
||||
|
||||
|
@@ -447,7 +447,10 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
|
||||
break;
|
||||
default:
|
||||
tmp |= IF_MODE_GMII;
|
||||
if (phy_if == PHY_INTERFACE_MODE_RGMII)
|
||||
if (phy_if == PHY_INTERFACE_MODE_RGMII ||
|
||||
phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
|
||||
phy_if == PHY_INTERFACE_MODE_RGMII_RXID ||
|
||||
phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
|
||||
tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
|
||||
}
|
||||
iowrite32be(tmp, ®s->if_mode);
|
||||
|
@@ -36,6 +36,7 @@
|
||||
#include "fman_mac.h"
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/phy_fixed.h>
|
||||
|
||||
struct fman_mac *memac_config(struct fman_mac_params *params);
|
||||
int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
|
||||
|
@@ -66,6 +66,7 @@
|
||||
|
||||
#define BMI_PORT_REGS_OFFSET 0
|
||||
#define QMI_PORT_REGS_OFFSET 0x400
|
||||
#define HWP_PORT_REGS_OFFSET 0x800
|
||||
|
||||
/* Default values */
|
||||
#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
|
||||
@@ -186,7 +187,7 @@
|
||||
#define NIA_ENG_BMI 0x00500000
|
||||
#define NIA_ENG_QMI_ENQ 0x00540000
|
||||
#define NIA_ENG_QMI_DEQ 0x00580000
|
||||
|
||||
#define NIA_ENG_HWP 0x00440000
|
||||
#define NIA_BMI_AC_ENQ_FRAME 0x00000002
|
||||
#define NIA_BMI_AC_TX_RELEASE 0x000002C0
|
||||
#define NIA_BMI_AC_RELEASE 0x000000C0
|
||||
@@ -321,6 +322,19 @@ struct fman_port_qmi_regs {
|
||||
u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
|
||||
};
|
||||
|
||||
#define HWP_HXS_COUNT 16
|
||||
#define HWP_HXS_PHE_REPORT 0x00000800
|
||||
#define HWP_HXS_PCAC_PSTAT 0x00000100
|
||||
#define HWP_HXS_PCAC_PSTOP 0x00000001
|
||||
struct fman_port_hwp_regs {
|
||||
struct {
|
||||
u32 ssa; /* Soft Sequence Attachment */
|
||||
u32 lcv; /* Line-up Enable Confirmation Mask */
|
||||
} pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */
|
||||
u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */
|
||||
u32 fmpr_pcac; /* Configuration Access Control */
|
||||
};
|
||||
|
||||
/* QMI dequeue prefetch modes */
|
||||
enum fman_port_deq_prefetch {
|
||||
FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
|
||||
@@ -440,6 +454,7 @@ struct fman_port {
|
||||
|
||||
union fman_port_bmi_regs __iomem *bmi_regs;
|
||||
struct fman_port_qmi_regs __iomem *qmi_regs;
|
||||
struct fman_port_hwp_regs __iomem *hwp_regs;
|
||||
|
||||
struct fman_sp_buffer_offsets buffer_offsets;
|
||||
|
||||
@@ -525,9 +540,12 @@ static int init_bmi_rx(struct fman_port *port)
|
||||
/* NIA */
|
||||
tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
|
||||
|
||||
tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
|
||||
tmp |= NIA_ENG_HWP;
|
||||
iowrite32be(tmp, ®s->fmbm_rfne);
|
||||
|
||||
/* Parser Next Engine NIA */
|
||||
iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, ®s->fmbm_rfpne);
|
||||
|
||||
/* Enqueue NIA */
|
||||
iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_rfene);
|
||||
|
||||
@@ -669,6 +687,50 @@ static int init_qmi(struct fman_port *port)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void stop_port_hwp(struct fman_port *port)
|
||||
{
|
||||
struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
|
||||
int cnt = 100;
|
||||
|
||||
iowrite32be(HWP_HXS_PCAC_PSTOP, ®s->fmpr_pcac);
|
||||
|
||||
while (cnt-- > 0 &&
|
||||
(ioread32be(®s->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
|
||||
udelay(10);
|
||||
if (!cnt)
|
||||
pr_err("Timeout stopping HW Parser\n");
|
||||
}
|
||||
|
||||
static void start_port_hwp(struct fman_port *port)
|
||||
{
|
||||
struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
|
||||
int cnt = 100;
|
||||
|
||||
iowrite32be(0, ®s->fmpr_pcac);
|
||||
|
||||
while (cnt-- > 0 &&
|
||||
!(ioread32be(®s->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
|
||||
udelay(10);
|
||||
if (!cnt)
|
||||
pr_err("Timeout starting HW Parser\n");
|
||||
}
|
||||
|
||||
static void init_hwp(struct fman_port *port)
|
||||
{
|
||||
struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
|
||||
int i;
|
||||
|
||||
stop_port_hwp(port);
|
||||
|
||||
for (i = 0; i < HWP_HXS_COUNT; i++) {
|
||||
/* enable HXS error reporting into FD[STATUS] PHE */
|
||||
iowrite32be(0x00000000, ®s->pmda[i].ssa);
|
||||
iowrite32be(0xffffffff, ®s->pmda[i].lcv);
|
||||
}
|
||||
|
||||
start_port_hwp(port);
|
||||
}
|
||||
|
||||
static int init(struct fman_port *port)
|
||||
{
|
||||
int err;
|
||||
@@ -677,6 +739,8 @@ static int init(struct fman_port *port)
|
||||
switch (port->port_type) {
|
||||
case FMAN_PORT_TYPE_RX:
|
||||
err = init_bmi_rx(port);
|
||||
if (!err)
|
||||
init_hwp(port);
|
||||
break;
|
||||
case FMAN_PORT_TYPE_TX:
|
||||
err = init_bmi_tx(port);
|
||||
@@ -690,7 +754,8 @@ static int init(struct fman_port *port)
|
||||
|
||||
/* Init QMI registers */
|
||||
err = init_qmi(port);
|
||||
return err;
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1251,7 +1316,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
|
||||
/* Allocate the FM driver's parameters structure */
|
||||
port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
|
||||
if (!port->cfg)
|
||||
goto err_params;
|
||||
return -EINVAL;
|
||||
|
||||
/* Initialize FM port parameters which will be kept by the driver */
|
||||
port->port_type = port->dts_params.type;
|
||||
@@ -1280,6 +1345,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
|
||||
/* set memory map pointers */
|
||||
port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
|
||||
port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
|
||||
port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
|
||||
|
||||
port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
|
||||
/* resource distribution. */
|
||||
@@ -1331,8 +1397,6 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
|
||||
|
||||
err_port_cfg:
|
||||
kfree(port->cfg);
|
||||
err_params:
|
||||
kfree(port);
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_port_config);
|
||||
|
@@ -1370,6 +1370,7 @@ static void qm_congestion_task(struct work_struct *work)
|
||||
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
||||
spin_unlock(&p->cgr_lock);
|
||||
dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
|
||||
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
|
||||
return;
|
||||
}
|
||||
/* mask out the ones I'm not interested in */
|
||||
@@ -1384,6 +1385,7 @@ static void qm_congestion_task(struct work_struct *work)
|
||||
if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
|
||||
cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
|
||||
spin_unlock(&p->cgr_lock);
|
||||
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
|
||||
}
|
||||
|
||||
static void qm_mr_process_task(struct work_struct *work)
|
||||
@@ -1443,12 +1445,14 @@ static void qm_mr_process_task(struct work_struct *work)
|
||||
}
|
||||
|
||||
qm_mr_cci_consume(&p->p, num);
|
||||
qman_p_irqsource_add(p, QM_PIRQ_MRI);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
|
||||
{
|
||||
if (is & QM_PIRQ_CSCI) {
|
||||
qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
|
||||
queue_work_on(smp_processor_id(), qm_portal_wq,
|
||||
&p->congestion_work);
|
||||
}
|
||||
@@ -1460,6 +1464,7 @@ static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
|
||||
}
|
||||
|
||||
if (is & QM_PIRQ_MRI) {
|
||||
qman_p_irqsource_remove(p, QM_PIRQ_MRI);
|
||||
queue_work_on(smp_processor_id(), qm_portal_wq,
|
||||
&p->mr_work);
|
||||
}
|
||||
@@ -2051,8 +2056,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qman_query_fq_np(struct qman_fq *fq,
|
||||
struct qm_mcr_queryfq_np *np)
|
||||
int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
|
||||
{
|
||||
union qm_mc_command *mcc;
|
||||
union qm_mc_result *mcr;
|
||||
@@ -2078,6 +2082,7 @@ out:
|
||||
put_affine_portal();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(qman_query_fq_np);
|
||||
|
||||
static int qman_query_cgr(struct qman_cgr *cgr,
|
||||
struct qm_mcr_querycgr *cgrd)
|
||||
|
@@ -46,6 +46,8 @@ u16 qman_ip_rev;
|
||||
EXPORT_SYMBOL(qman_ip_rev);
|
||||
u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
|
||||
EXPORT_SYMBOL(qm_channel_pool1);
|
||||
u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
|
||||
EXPORT_SYMBOL(qm_channel_caam);
|
||||
|
||||
/* Register offsets */
|
||||
#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
|
||||
@@ -754,8 +756,10 @@ static int fsl_qman_probe(struct platform_device *pdev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
|
||||
if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
|
||||
qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
|
||||
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
|
||||
}
|
||||
|
||||
ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
|
||||
WARN_ON(ret);
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#include "dpaa_sys.h"
|
||||
|
||||
#include <soc/fsl/qman.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/iommu.h>
|
||||
|
||||
#if defined(CONFIG_FSL_PAMU)
|
||||
@@ -89,67 +90,6 @@ static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
|
||||
return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
|
||||
}
|
||||
|
||||
/* "Query FQ Non-Programmable Fields" */
|
||||
|
||||
struct qm_mcr_queryfq_np {
|
||||
u8 verb;
|
||||
u8 result;
|
||||
u8 __reserved1;
|
||||
u8 state; /* QM_MCR_NP_STATE_*** */
|
||||
u32 fqd_link; /* 24-bit, _res2[24-31] */
|
||||
u16 odp_seq; /* 14-bit, _res3[14-15] */
|
||||
u16 orp_nesn; /* 14-bit, _res4[14-15] */
|
||||
u16 orp_ea_hseq; /* 15-bit, _res5[15] */
|
||||
u16 orp_ea_tseq; /* 15-bit, _res6[15] */
|
||||
u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
|
||||
u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
|
||||
u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
|
||||
u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
|
||||
u8 __reserved2[5];
|
||||
u8 is; /* 1-bit, _res12[1-7] */
|
||||
u16 ics_surp;
|
||||
u32 byte_cnt;
|
||||
u32 frm_cnt; /* 24-bit, _res13[24-31] */
|
||||
u32 __reserved3;
|
||||
u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
|
||||
u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
|
||||
u16 __reserved4;
|
||||
u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
|
||||
u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
|
||||
u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
|
||||
} __packed;
|
||||
|
||||
#define QM_MCR_NP_STATE_FE 0x10
|
||||
#define QM_MCR_NP_STATE_R 0x08
|
||||
#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
|
||||
#define QM_MCR_NP_STATE_OOS 0x00
|
||||
#define QM_MCR_NP_STATE_RETIRED 0x01
|
||||
#define QM_MCR_NP_STATE_TEN_SCHED 0x02
|
||||
#define QM_MCR_NP_STATE_TRU_SCHED 0x03
|
||||
#define QM_MCR_NP_STATE_PARKED 0x04
|
||||
#define QM_MCR_NP_STATE_ACTIVE 0x05
|
||||
#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
|
||||
#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
|
||||
#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
|
||||
#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
|
||||
#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
|
||||
|
||||
enum qm_mcr_queryfq_np_masks {
|
||||
qm_mcr_fqd_link_mask = BIT(24)-1,
|
||||
qm_mcr_odp_seq_mask = BIT(14)-1,
|
||||
qm_mcr_orp_nesn_mask = BIT(14)-1,
|
||||
qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
|
||||
qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
|
||||
qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
|
||||
qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
|
||||
qm_mcr_pfdr_hptr_mask = BIT(24)-1,
|
||||
qm_mcr_pfdr_tptr_mask = BIT(24)-1,
|
||||
qm_mcr_is_mask = BIT(1)-1,
|
||||
qm_mcr_frm_cnt_mask = BIT(24)-1,
|
||||
};
|
||||
#define qm_mcr_np_get(np, field) \
|
||||
((np)->field & (qm_mcr_##field##_mask))
|
||||
|
||||
/* Congestion Groups */
|
||||
|
||||
/*
|
||||
@@ -273,42 +213,6 @@ const struct qm_portal_config *qman_destroy_affine_portal(void);
|
||||
*/
|
||||
int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
|
||||
|
||||
/*
|
||||
* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
|
||||
* NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
|
||||
* FQID(n) to fill in the frame queue ID.
|
||||
*/
|
||||
#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
|
||||
#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
|
||||
#define QM_VDQCR_EXACT 0x40000000
|
||||
#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
|
||||
#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
|
||||
#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
|
||||
#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
|
||||
|
||||
#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
|
||||
#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
|
||||
#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
|
||||
|
||||
/*
|
||||
* qman_volatile_dequeue - Issue a volatile dequeue command
|
||||
* @fq: the frame queue object to dequeue from
|
||||
* @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
|
||||
* @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
|
||||
*
|
||||
* Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
|
||||
* The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
|
||||
* the VDQCR is already in use, otherwise returns non-zero for failure. If
|
||||
* QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
|
||||
* the VDQCR command has finished executing (ie. once the callback for the last
|
||||
* DQRR entry resulting from the VDQCR command has been called). If not using
|
||||
* the FINISH flag, completion can be determined either by detecting the
|
||||
* presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
|
||||
* in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
|
||||
* for the QMAN_FQ_STATE_VDQCR bit to disappear.
|
||||
*/
|
||||
int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
|
||||
|
||||
int qman_alloc_fq_table(u32 num_fqids);
|
||||
|
||||
/* QMan s/w corenet portal, low-level i/face */
|
||||
|
@@ -37,8 +37,11 @@
|
||||
/* Hardware constants */
|
||||
#define QM_CHANNEL_SWPORTAL0 0
|
||||
#define QMAN_CHANNEL_POOL1 0x21
|
||||
#define QMAN_CHANNEL_CAAM 0x80
|
||||
#define QMAN_CHANNEL_POOL1_REV3 0x401
|
||||
#define QMAN_CHANNEL_CAAM_REV3 0x840
|
||||
extern u16 qm_channel_pool1;
|
||||
extern u16 qm_channel_caam;
|
||||
|
||||
/* Portal processing (interrupt) sources */
|
||||
#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
|
||||
@@ -166,6 +169,7 @@ static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
|
||||
#define qm_fd_set_contig_big(fd, len) \
|
||||
qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
|
||||
#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
|
||||
#define qm_fd_set_compound(fd, len) qm_fd_set_param(fd, qm_fd_compound, 0, len)
|
||||
|
||||
static inline void qm_fd_clear_fd(struct qm_fd *fd)
|
||||
{
|
||||
@@ -640,6 +644,7 @@ struct qm_mcc_initcgr {
|
||||
#define QM_CGR_WE_MODE 0x0001
|
||||
|
||||
#define QMAN_CGR_FLAG_USE_INIT 0x00000001
|
||||
#define QMAN_CGR_MODE_FRAME 0x00000001
|
||||
|
||||
/* Portal and Frame Queues */
|
||||
/* Represents a managed portal */
|
||||
@@ -792,6 +797,84 @@ struct qman_cgr {
|
||||
#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
|
||||
#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
|
||||
|
||||
/*
|
||||
* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
|
||||
* NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
|
||||
* FQID(n) to fill in the frame queue ID.
|
||||
*/
|
||||
#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
|
||||
#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
|
||||
#define QM_VDQCR_EXACT 0x40000000
|
||||
#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
|
||||
#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
|
||||
#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
|
||||
#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
|
||||
|
||||
#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
|
||||
#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
|
||||
#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
|
||||
|
||||
/* "Query FQ Non-Programmable Fields" */
|
||||
struct qm_mcr_queryfq_np {
|
||||
u8 verb;
|
||||
u8 result;
|
||||
u8 __reserved1;
|
||||
u8 state; /* QM_MCR_NP_STATE_*** */
|
||||
u32 fqd_link; /* 24-bit, _res2[24-31] */
|
||||
u16 odp_seq; /* 14-bit, _res3[14-15] */
|
||||
u16 orp_nesn; /* 14-bit, _res4[14-15] */
|
||||
u16 orp_ea_hseq; /* 15-bit, _res5[15] */
|
||||
u16 orp_ea_tseq; /* 15-bit, _res6[15] */
|
||||
u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
|
||||
u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
|
||||
u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
|
||||
u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
|
||||
u8 __reserved2[5];
|
||||
u8 is; /* 1-bit, _res12[1-7] */
|
||||
u16 ics_surp;
|
||||
u32 byte_cnt;
|
||||
u32 frm_cnt; /* 24-bit, _res13[24-31] */
|
||||
u32 __reserved3;
|
||||
u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
|
||||
u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
|
||||
u16 __reserved4;
|
||||
u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
|
||||
u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
|
||||
u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
|
||||
} __packed;
|
||||
|
||||
#define QM_MCR_NP_STATE_FE 0x10
|
||||
#define QM_MCR_NP_STATE_R 0x08
|
||||
#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
|
||||
#define QM_MCR_NP_STATE_OOS 0x00
|
||||
#define QM_MCR_NP_STATE_RETIRED 0x01
|
||||
#define QM_MCR_NP_STATE_TEN_SCHED 0x02
|
||||
#define QM_MCR_NP_STATE_TRU_SCHED 0x03
|
||||
#define QM_MCR_NP_STATE_PARKED 0x04
|
||||
#define QM_MCR_NP_STATE_ACTIVE 0x05
|
||||
#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
|
||||
#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
|
||||
#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
|
||||
#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
|
||||
#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
|
||||
|
||||
enum qm_mcr_queryfq_np_masks {
|
||||
qm_mcr_fqd_link_mask = BIT(24) - 1,
|
||||
qm_mcr_odp_seq_mask = BIT(14) - 1,
|
||||
qm_mcr_orp_nesn_mask = BIT(14) - 1,
|
||||
qm_mcr_orp_ea_hseq_mask = BIT(15) - 1,
|
||||
qm_mcr_orp_ea_tseq_mask = BIT(15) - 1,
|
||||
qm_mcr_orp_ea_hptr_mask = BIT(24) - 1,
|
||||
qm_mcr_orp_ea_tptr_mask = BIT(24) - 1,
|
||||
qm_mcr_pfdr_hptr_mask = BIT(24) - 1,
|
||||
qm_mcr_pfdr_tptr_mask = BIT(24) - 1,
|
||||
qm_mcr_is_mask = BIT(1) - 1,
|
||||
qm_mcr_frm_cnt_mask = BIT(24) - 1,
|
||||
};
|
||||
|
||||
#define qm_mcr_np_get(np, field) \
|
||||
((np)->field & (qm_mcr_##field##_mask))
|
||||
|
||||
/* Portal Management */
|
||||
/**
|
||||
* qman_p_irqsource_add - add processing sources to be interrupt-driven
|
||||
@@ -966,6 +1049,25 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags);
|
||||
*/
|
||||
int qman_oos_fq(struct qman_fq *fq);
|
||||
|
||||
/*
|
||||
* qman_volatile_dequeue - Issue a volatile dequeue command
|
||||
* @fq: the frame queue object to dequeue from
|
||||
* @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
|
||||
* @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
|
||||
*
|
||||
* Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
|
||||
* The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
|
||||
* the VDQCR is already in use, otherwise returns non-zero for failure. If
|
||||
* QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
|
||||
* the VDQCR command has finished executing (ie. once the callback for the last
|
||||
* DQRR entry resulting from the VDQCR command has been called). If not using
|
||||
* the FINISH flag, completion can be determined either by detecting the
|
||||
* presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
|
||||
* in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
|
||||
* for the QMAN_FQ_STATE_VDQCR bit to disappear.
|
||||
*/
|
||||
int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
|
||||
|
||||
/**
|
||||
* qman_enqueue - Enqueue a frame to a frame queue
|
||||
* @fq: the frame queue object to enqueue to
|
||||
@@ -997,6 +1099,13 @@ int qman_alloc_fqid_range(u32 *result, u32 count);
|
||||
*/
|
||||
int qman_release_fqid(u32 fqid);
|
||||
|
||||
/**
|
||||
* qman_query_fq_np - Queries non-programmable FQD fields
|
||||
* @fq: the frame queue object to be queried
|
||||
* @np: storage for the queried FQD fields
|
||||
*/
|
||||
int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
|
||||
|
||||
/* Pool-channel management */
|
||||
/**
|
||||
* qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
|
||||
|
Reference in New Issue
Block a user