xdp: Rename convert_to_xdp_frame in xdp_convert_buff_to_frame
In order to use standard 'xdp' prefix, rename convert_to_xdp_frame utility routine in xdp_convert_buff_to_frame and replace all the occurrences Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Link: https://lore.kernel.org/bpf/6344f739be0d1a08ab2b9607584c4d5478c8c083.1590698295.git.lorenzo@kernel.org
This commit is contained in:
parent
fc37987265
commit
1b698fa5d8
@ -263,7 +263,7 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
|
|||||||
dma_addr_t dma = 0;
|
dma_addr_t dma = 0;
|
||||||
u32 size;
|
u32 size;
|
||||||
|
|
||||||
tx_info->xdpf = convert_to_xdp_frame(xdp);
|
tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
size = tx_info->xdpf->len;
|
size = tx_info->xdpf->len;
|
||||||
ena_buf = tx_info->bufs;
|
ena_buf = tx_info->bufs;
|
||||||
|
|
||||||
|
@ -2167,7 +2167,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
|
|||||||
|
|
||||||
int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
|
int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
|
||||||
{
|
{
|
||||||
struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
|
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
|
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
return I40E_XDP_CONSUMED;
|
return I40E_XDP_CONSUMED;
|
||||||
|
@ -254,7 +254,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
|
|||||||
*/
|
*/
|
||||||
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
|
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
|
||||||
{
|
{
|
||||||
struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
|
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
|
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
return ICE_XDP_CONSUMED;
|
return ICE_XDP_CONSUMED;
|
||||||
|
@ -2215,7 +2215,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
|
|||||||
case XDP_PASS:
|
case XDP_PASS:
|
||||||
break;
|
break;
|
||||||
case XDP_TX:
|
case XDP_TX:
|
||||||
xdpf = convert_to_xdp_frame(xdp);
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
if (unlikely(!xdpf)) {
|
if (unlikely(!xdpf)) {
|
||||||
result = IXGBE_XDP_CONSUMED;
|
result = IXGBE_XDP_CONSUMED;
|
||||||
break;
|
break;
|
||||||
|
@ -107,7 +107,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
|
|||||||
case XDP_PASS:
|
case XDP_PASS:
|
||||||
break;
|
break;
|
||||||
case XDP_TX:
|
case XDP_TX:
|
||||||
xdpf = convert_to_xdp_frame(xdp);
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
if (unlikely(!xdpf)) {
|
if (unlikely(!xdpf)) {
|
||||||
result = IXGBE_XDP_CONSUMED;
|
result = IXGBE_XDP_CONSUMED;
|
||||||
break;
|
break;
|
||||||
|
@ -2073,7 +2073,7 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
|
|||||||
int cpu;
|
int cpu;
|
||||||
u32 ret;
|
u32 ret;
|
||||||
|
|
||||||
xdpf = convert_to_xdp_frame(xdp);
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
return MVNETA_XDP_DROPPED;
|
return MVNETA_XDP_DROPPED;
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
|
|||||||
struct xdp_frame *xdpf;
|
struct xdp_frame *xdpf;
|
||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
|
|
||||||
xdpf = convert_to_xdp_frame(xdp);
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -97,10 +97,10 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
|
|||||||
xdpi.frame.xdpf = xdpf;
|
xdpi.frame.xdpf = xdpf;
|
||||||
xdpi.frame.dma_addr = dma_addr;
|
xdpi.frame.dma_addr = dma_addr;
|
||||||
} else {
|
} else {
|
||||||
/* Driver assumes that convert_to_xdp_frame returns an xdp_frame
|
/* Driver assumes that xdp_convert_buff_to_frame returns
|
||||||
* that points to the same memory region as the original
|
* an xdp_frame that points to the same memory region as
|
||||||
* xdp_buff. It allows to map the memory only once and to use
|
* the original xdp_buff. It allows to map the memory only
|
||||||
* the DMA_BIDIRECTIONAL mode.
|
* once and to use the DMA_BIDIRECTIONAL mode.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
|
xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
|
||||||
|
@ -329,7 +329,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
|
|||||||
|
|
||||||
case XDP_TX:
|
case XDP_TX:
|
||||||
/* Buffer ownership passes to tx on success. */
|
/* Buffer ownership passes to tx on success. */
|
||||||
xdpf = convert_to_xdp_frame(&xdp);
|
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||||
err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
|
err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
|
||||||
if (unlikely(err != 1)) {
|
if (unlikely(err != 1)) {
|
||||||
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||||
|
@ -867,7 +867,7 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
|
|||||||
static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
|
static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
|
||||||
{
|
{
|
||||||
struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
|
struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
|
||||||
struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
|
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
u32 ret;
|
u32 ret;
|
||||||
|
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
|
@ -1355,7 +1355,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
|
|||||||
ret = CPSW_XDP_PASS;
|
ret = CPSW_XDP_PASS;
|
||||||
break;
|
break;
|
||||||
case XDP_TX:
|
case XDP_TX:
|
||||||
xdpf = convert_to_xdp_frame(xdp);
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
|
@ -1295,7 +1295,7 @@ resample:
|
|||||||
|
|
||||||
static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
|
static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
|
||||||
{
|
{
|
||||||
struct xdp_frame *frame = convert_to_xdp_frame(xdp);
|
struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
|
||||||
|
|
||||||
if (unlikely(!frame))
|
if (unlikely(!frame))
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
@ -541,7 +541,7 @@ out:
|
|||||||
static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
|
static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
|
||||||
struct veth_xdp_tx_bq *bq)
|
struct veth_xdp_tx_bq *bq)
|
||||||
{
|
{
|
||||||
struct xdp_frame *frame = convert_to_xdp_frame(xdp);
|
struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
|
||||||
|
|
||||||
if (unlikely(!frame))
|
if (unlikely(!frame))
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
@ -703,7 +703,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
|
|||||||
break;
|
break;
|
||||||
case XDP_TX:
|
case XDP_TX:
|
||||||
stats->xdp_tx++;
|
stats->xdp_tx++;
|
||||||
xdpf = convert_to_xdp_frame(&xdp);
|
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
goto err_xdp;
|
goto err_xdp;
|
||||||
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
||||||
@ -892,7 +892,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||||||
break;
|
break;
|
||||||
case XDP_TX:
|
case XDP_TX:
|
||||||
stats->xdp_tx++;
|
stats->xdp_tx++;
|
||||||
xdpf = convert_to_xdp_frame(&xdp);
|
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
goto err_xdp;
|
goto err_xdp;
|
||||||
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
||||||
|
@ -123,7 +123,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
|
|||||||
|
|
||||||
/* Convert xdp_buff to xdp_frame */
|
/* Convert xdp_buff to xdp_frame */
|
||||||
static inline
|
static inline
|
||||||
struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
|
struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
|
||||||
{
|
{
|
||||||
struct xdp_frame *xdp_frame;
|
struct xdp_frame *xdp_frame;
|
||||||
int metasize;
|
int metasize;
|
||||||
|
@ -621,7 +621,7 @@ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
|
|||||||
{
|
{
|
||||||
struct xdp_frame *xdpf;
|
struct xdp_frame *xdpf;
|
||||||
|
|
||||||
xdpf = convert_to_xdp_frame(xdp);
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
||||||
|
@ -465,7 +465,7 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
|||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
xdpf = convert_to_xdp_frame(xdp);
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user