Skip to content

Commit

Permalink
Merge pull request #265 from yishaih/mlx5_tunnel
Browse files Browse the repository at this point in the history
Add tunnel support

This series from Maor is the supplementary part of the kernel series that was                                                                                            
merged into 4.15.                                                                                                                                                        
                                                                                                                                                                         
It introduces the following support for tunneling packets:                                                                                                               
1) RSS on the tunneled packet.                                                                                                                                           
2) Tunneling offloads support for mlx5 devices via the DV API. 

Signed-off-by: Leon Romanovsky <leon@kernel.org>
  • Loading branch information
rleon authored Dec 4, 2017
2 parents aefc971 + 8961755 commit bf2450e
Show file tree
Hide file tree
Showing 10 changed files with 121 additions and 10 deletions.
1 change: 1 addition & 0 deletions debian/ibverbs-providers.symbols
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@ libmlx5.so.1 ibverbs-providers #MINVER#
mlx5dv_query_device@MLX5_1.0 13
mlx5dv_create_cq@MLX5_1.1 14
mlx5dv_set_context_attr@MLX5_1.2 15
mlx5dv_create_qp@MLX5_1.3 16
mlx5dv_create_wq@MLX5_1.3 16
20 changes: 20 additions & 0 deletions libibverbs/man/ibv_create_qp_ex.3
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,26 @@ uint64_t rx_hash_fields_mask; /* RX fields that should particip
.in -8
};
.fi

.nf
enum ibv_rx_hash_fields {
.in +8
IBV_RX_HASH_SRC_IPV4 = 1 << 0,
IBV_RX_HASH_DST_IPV4 = 1 << 1,
IBV_RX_HASH_SRC_IPV6 = 1 << 2,
IBV_RX_HASH_DST_IPV6 = 1 << 3,
IBV_RX_HASH_SRC_PORT_TCP = 1 << 4,
IBV_RX_HASH_DST_PORT_TCP = 1 << 5,
IBV_RX_HASH_SRC_PORT_UDP = 1 << 6,
IBV_RX_HASH_DST_PORT_UDP = 1 << 7,
/* When using tunneling protocol, e.g. VXLAN, then we have an inner (encapsulated packet) and outer.
* For applying RSS on the inner packet, then the following field should be set with one of the L3/L4 fields.
*/
IBV_RX_HASH_INNER = (1UL << 31),
.in -8
};
.fi

.PP
The function
.B ibv_create_qp_ex()
Expand Down
3 changes: 2 additions & 1 deletion libibverbs/verbs.h
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,8 @@ enum ibv_rx_hash_fields {
IBV_RX_HASH_SRC_PORT_TCP = 1 << 4,
IBV_RX_HASH_DST_PORT_TCP = 1 << 5,
IBV_RX_HASH_SRC_PORT_UDP = 1 << 6,
IBV_RX_HASH_DST_PORT_UDP = 1 << 7
IBV_RX_HASH_DST_PORT_UDP = 1 << 7,
IBV_RX_HASH_INNER = (1UL << 31),
};

struct ibv_rss_caps {
Expand Down
1 change: 1 addition & 0 deletions providers/mlx5/libmlx5.map
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,6 @@ MLX5_1.2 {

MLX5_1.3 {
global:
mlx5dv_create_qp;
mlx5dv_create_wq;
} MLX5_1.2;
15 changes: 14 additions & 1 deletion providers/mlx5/man/mlx5dv_query_device.3
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ uint64_t flags;
uint64_t comp_mask; /* Use enum mlx5dv_context_comp_mask */
struct mlx5dv_cqe_comp_caps cqe_comp_caps;
struct mlx5dv_sw_parsing_caps sw_parsing_caps;
uint32_t tunnel_offloads_caps;
.in -8
};

Expand All @@ -71,7 +72,8 @@ enum mlx5dv_context_comp_mask {
MLX5DV_CONTEXT_MASK_CQE_COMPRESION = 1 << 0,
MLX5DV_CONTEXT_MASK_SWP = 1 << 1,
MLX5DV_CONTEXT_MASK_STRIDING_RQ = 1 << 2,
MLX5DV_CONTEXT_MASK_RESERVED = 1 << 3,
MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS = 1 << 3,
MLX5DV_CONTEXT_MASK_RESERVED = 1 << 4,
.in -8
};

Expand All @@ -84,6 +86,17 @@ MLX5DV_SW_PARSING_CSUM = 1 << 1,
MLX5DV_SW_PARSING_LSO = 1 << 2,
.in -8
};

.PP
.nf
enum mlx5dv_tunnel_offloads {
.in +8
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN = 1 << 0,
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE = 1 << 1,
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
.in -8
};

.fi
.SH "RETURN VALUE"
0 on success or the value of errno on failure (which indicates the failure reason).
Expand Down
5 changes: 4 additions & 1 deletion providers/mlx5/mlx5-abi.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
};

enum {
Expand Down Expand Up @@ -182,7 +183,7 @@ struct mlx5_create_qp_ex_rss {
__u8 reserved[6];
__u8 rx_hash_key[128];
__u32 comp_mask;
__u32 reserved1;
__u32 create_flags;
};

struct mlx5_create_qp_resp_ex {
Expand Down Expand Up @@ -311,6 +312,8 @@ struct mlx5_query_device_ex_resp {
__u32 flags; /* Use enum mlx5_query_dev_resp_flags */
struct mlx5dv_sw_parsing_caps sw_parsing_caps;
struct mlx5_striding_rq_caps striding_rq_caps;
__u32 tunnel_offloads_caps;
__u32 reserved;
};

#endif /* MLX5_ABI_H */
5 changes: 5 additions & 0 deletions providers/mlx5/mlx5.c
Original file line number Diff line number Diff line change
Expand Up @@ -646,6 +646,11 @@ int mlx5dv_query_device(struct ibv_context *ctx_in,
comp_mask_out |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
}

if (attrs_out->comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
attrs_out->tunnel_offloads_caps = mctx->tunnel_offloads_caps;
comp_mask_out |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
}

attrs_out->comp_mask = comp_mask_out;

return 0;
Expand Down
1 change: 1 addition & 0 deletions providers/mlx5/mlx5.h
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,7 @@ struct mlx5_context {
struct mlx5dv_ctx_allocators extern_alloc;
struct mlx5dv_sw_parsing_caps sw_parsing_caps;
struct mlx5dv_striding_rq_caps striding_rq_caps;
uint32_t tunnel_offloads_caps;
};

struct mlx5_bitmap {
Expand Down
27 changes: 26 additions & 1 deletion providers/mlx5/mlx5dv.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ enum mlx5dv_context_comp_mask {
MLX5DV_CONTEXT_MASK_CQE_COMPRESION = 1 << 0,
MLX5DV_CONTEXT_MASK_SWP = 1 << 1,
MLX5DV_CONTEXT_MASK_STRIDING_RQ = 1 << 2,
MLX5DV_CONTEXT_MASK_RESERVED = 1 << 3,
MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS = 1 << 3,
MLX5DV_CONTEXT_MASK_RESERVED = 1 << 4,
};

struct mlx5dv_cqe_comp_caps {
Expand All @@ -82,6 +83,12 @@ struct mlx5dv_striding_rq_caps {
uint32_t supported_qpts;
};

enum mlx5dv_tunnel_offloads {
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN = 1 << 0,
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE = 1 << 1,
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
};

/*
* Direct verbs device-specific attributes
*/
Expand All @@ -92,6 +99,7 @@ struct mlx5dv_context {
struct mlx5dv_cqe_comp_caps cqe_comp_caps;
struct mlx5dv_sw_parsing_caps sw_parsing_caps;
struct mlx5dv_striding_rq_caps striding_rq_caps;
uint32_t tunnel_offloads_caps;
};

enum mlx5dv_context_flags {
Expand Down Expand Up @@ -126,6 +134,23 @@ struct mlx5dv_cq_init_attr {
struct ibv_cq_ex *mlx5dv_create_cq(struct ibv_context *context,
struct ibv_cq_init_attr_ex *cq_attr,
struct mlx5dv_cq_init_attr *mlx5_cq_attr);

enum mlx5dv_qp_create_flags {
MLX5DV_QP_CREATE_TUNNEL_OFFLOADS = 1 << 0,
};

enum mlx5dv_qp_init_attr_mask {
MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS = 1 << 0,
};

struct mlx5dv_qp_init_attr {
uint64_t comp_mask; /* Use enum mlx5dv_qp_init_attr_mask */
uint32_t create_flags; /* Use enum mlx5dv_qp_create_flags */
};

struct ibv_qp *mlx5dv_create_qp(struct ibv_context *context,
struct ibv_qp_init_attr_ex *qp_attr,
struct mlx5dv_qp_init_attr *mlx5_qp_attr);
/*
* Most device capabilities are exported by ibv_query_device(...),
* but there is HW device-specific information which is important
Expand Down
53 changes: 47 additions & 6 deletions providers/mlx5/verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1210,7 +1210,8 @@ static void mlx5_free_qp_buf(struct mlx5_qp *qp)

static int mlx5_cmd_create_rss_qp(struct ibv_context *context,
struct ibv_qp_init_attr_ex *attr,
struct mlx5_qp *qp)
struct mlx5_qp *qp,
uint32_t mlx5_create_flags)
{
struct mlx5_create_qp_ex_rss cmd_ex_rss = {};
struct mlx5_create_qp_resp_ex resp = {};
Expand All @@ -1224,6 +1225,7 @@ static int mlx5_cmd_create_rss_qp(struct ibv_context *context,
cmd_ex_rss.rx_hash_fields_mask = attr->rx_hash_conf.rx_hash_fields_mask;
cmd_ex_rss.rx_hash_function = attr->rx_hash_conf.rx_hash_function;
cmd_ex_rss.rx_key_len = attr->rx_hash_conf.rx_hash_key_len;
cmd_ex_rss.create_flags = mlx5_create_flags;
memcpy(cmd_ex_rss.rx_hash_key, attr->rx_hash_conf.rx_hash_key,
attr->rx_hash_conf.rx_hash_key_len);

Expand Down Expand Up @@ -1276,6 +1278,10 @@ enum {
IBV_QP_INIT_ATTR_RX_HASH),
};

enum {
MLX5_DV_CREATE_QP_SUP_COMP_MASK = MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS
};

enum {
MLX5_CREATE_QP_EX2_COMP_MASK = (IBV_QP_INIT_ATTR_CREATE_FLAGS |
IBV_QP_INIT_ATTR_MAX_TSO_HEADER |
Expand All @@ -1284,7 +1290,8 @@ enum {
};

static struct ibv_qp *create_qp(struct ibv_context *context,
struct ibv_qp_init_attr_ex *attr)
struct ibv_qp_init_attr_ex *attr,
struct mlx5dv_qp_init_attr *mlx5_qp_attr)
{
struct mlx5_create_qp cmd;
struct mlx5_create_qp_resp resp;
Expand All @@ -1295,6 +1302,7 @@ static struct ibv_qp *create_qp(struct ibv_context *context,
struct ibv_qp *ibqp;
int32_t usr_idx = 0;
uint32_t uuar_index;
uint32_t mlx5_create_flags = 0;
FILE *fp = ctx->dbg_fp;

if (attr->comp_mask & ~MLX5_CREATE_QP_SUP_COMP_MASK)
Expand Down Expand Up @@ -1327,14 +1335,39 @@ static struct ibv_qp *create_qp(struct ibv_context *context,
memset(&resp, 0, sizeof(resp));
memset(&resp_ex, 0, sizeof(resp_ex));

if (mlx5_qp_attr) {
if (!check_comp_mask(mlx5_qp_attr->comp_mask,
MLX5_DV_CREATE_QP_SUP_COMP_MASK)) {
mlx5_dbg(fp, MLX5_DBG_QP,
"Unsupported vendor comp_mask for create_qp\n");
errno = EINVAL;
goto err;
}

if (mlx5_qp_attr->comp_mask &
MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS) {
if (mlx5_qp_attr->create_flags &
MLX5DV_QP_CREATE_TUNNEL_OFFLOADS) {
mlx5_create_flags = MLX5_QP_FLAG_TUNNEL_OFFLOADS;
} else {
mlx5_dbg(fp, MLX5_DBG_QP,
"Unsupported creation flags requested for create_qp\n");
errno = EINVAL;
goto err;
}
}
}

if (attr->comp_mask & IBV_QP_INIT_ATTR_RX_HASH) {
ret = mlx5_cmd_create_rss_qp(context, attr, qp);
ret = mlx5_cmd_create_rss_qp(context, attr, qp,
mlx5_create_flags);
if (ret)
goto err;

return ibqp;
}

cmd.flags = mlx5_create_flags;
qp->wq_sig = qp_sig_enabled();
if (qp->wq_sig)
cmd.flags |= MLX5_QP_FLAG_SIGNATURE;
Expand Down Expand Up @@ -1488,7 +1521,7 @@ struct ibv_qp *mlx5_create_qp(struct ibv_pd *pd,
memcpy(&attrx, attr, sizeof(*attr));
attrx.comp_mask = IBV_QP_INIT_ATTR_PD;
attrx.pd = pd;
qp = create_qp(pd->context, &attrx);
qp = create_qp(pd->context, &attrx, NULL);
if (qp)
memcpy(attr, &attrx, sizeof(*attr));

Expand Down Expand Up @@ -1825,7 +1858,14 @@ int mlx5_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
struct ibv_qp *mlx5_create_qp_ex(struct ibv_context *context,
struct ibv_qp_init_attr_ex *attr)
{
return create_qp(context, attr);
return create_qp(context, attr, NULL);
}

struct ibv_qp *mlx5dv_create_qp(struct ibv_context *context,
struct ibv_qp_init_attr_ex *qp_attr,
struct mlx5dv_qp_init_attr *mlx5_qp_attr)
{
return create_qp(context, qp_attr, mlx5_qp_attr);
}

int mlx5_get_srq_num(struct ibv_srq *srq, uint32_t *srq_num)
Expand Down Expand Up @@ -1910,7 +1950,7 @@ create_cmd_qp(struct ibv_context *context,
init_attr.send_cq = srq_attr->cq;
init_attr.recv_cq = srq_attr->cq;

qp = create_qp(context, &init_attr);
qp = create_qp(context, &init_attr, NULL);
if (!qp)
return NULL;

Expand Down Expand Up @@ -2174,6 +2214,7 @@ int mlx5_query_device_ex(struct ibv_context *context,
mctx->cqe_comp_caps = resp.cqe_comp_caps;
mctx->sw_parsing_caps = resp.sw_parsing_caps;
mctx->striding_rq_caps = resp.striding_rq_caps.caps;
mctx->tunnel_offloads_caps = resp.tunnel_offloads_caps;

if (resp.flags & MLX5_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP)
mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_CQE_128B_COMP;
Expand Down

0 comments on commit bf2450e

Please sign in to comment.