From 24e42754f676d34e5c26d6b7b30f36df8004ec08 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Jan 2014 17:45:11 +0200 Subject: mlx5_core: Remove dead code Remove leftover of debug code. Signed-off-by: Dan Carpenter Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 37b6ad1f9a1b..c35c4320225a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -192,10 +192,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) struct fw_page *fp; unsigned n; - if (list_empty(&dev->priv.free_list)) { + if (list_empty(&dev->priv.free_list)) return -ENOMEM; - mlx5_core_warn(dev, "\n"); - } fp = list_entry(dev->priv.free_list.next, struct fw_page, list); n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); -- cgit v1.2.3 From 4de6580360867d44adecb2d05febed1c8d186c82 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Thu, 7 Nov 2013 15:25:14 +0200 Subject: mlx4_core: Add support for steerable IB UD QPs This patch adds support for allocating IB UD QPs that we can steer traffic from. We introduce a new firmware command FLOW_STEERING_IB_UC_QP_RANGE and a capability bit. This command isn't supported for VFs. Signed-off-by: Matan Barak Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 9 +++++++++ drivers/net/ethernet/mellanox/mlx4/fw.c | 10 ++++++++++ drivers/net/ethernet/mellanox/mlx4/mcg.c | 17 +++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 5 +++++ drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 10 ++++++++++ include/linux/mlx4/cmd.h | 1 + include/linux/mlx4/device.h | 6 +++++- 7 files changed, 57 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 1e9970d2f0f3..0d02fba94536 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1371,6 +1371,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper }, + { + .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper + }, }; static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 194928214606..4bd2d80d065e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -513,6 +513,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 +#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 @@ -603,6 +604,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); dev_cap->fs_max_num_qp_per_entry = field; MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); @@ -860,6 +864,12 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); } + + /* turn off ipoib managed steering for guests */ + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + field &= ~0x80; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index acf9d5f1f922..34dffcf61bff 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -895,6 +895,23 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) } EXPORT_SYMBOL_GPL(mlx4_flow_detach); +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, + u32 max_range_qpn) +{ + int err; + u64 in_param; + + in_param = ((u64) min_range_qpn) << 32; + in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; + + err = mlx4_cmd(dev, in_param, 0, 0, + MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); + int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e582a41a802b..59f67f9086dc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1236,6 +1236,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 2f3f2bc7f283..663510325c22 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3844,6 +3844,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, return err; } +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return -EPERM; +} + + static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) { struct res_gid *rgid; diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 8df61bc5da00..ff36620f88a7 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -157,6 +157,7 @@ enum { /* register/delete flow steering network rules */ MLX4_QP_FLOW_STEERING_ATTACH = 0x65, MLX4_QP_FLOW_STEERING_DETACH = 0x66, + MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64, }; enum { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7d3a523160ba..7de9fde3a9dd 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -160,7 +160,8 @@ enum { MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7, - MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8 + MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8, + MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9 }; enum { @@ -1144,6 +1145,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid); __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave); +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, + u32 max_range_qpn); + cycle_t mlx4_read_clock(struct mlx4_dev *dev); #endif /* MLX4_DEVICE_H */ -- cgit v1.2.3 From e08a8761d89b7625144c3fbf0ff9643159135c96 Mon Sep 17 00:00:00 2001 From: Haggai Eran Date: Tue, 14 Jan 2014 17:45:13 +0200 Subject: mlx5_core: Fix out arg size in access_register command The output size should be the sum of the core access reg output struct plus the size of the specific register data provided by the caller. Signed-off-by: Haggai Eran Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx5/core/port.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index f6afe7b5a675..8c9ac870ecb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -57,7 +57,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, in->arg = cpu_to_be32(arg); in->register_id = cpu_to_be16(reg_num); err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, - sizeof(out) + size_out); + sizeof(*out) + size_out); if (err) goto ex2; -- cgit v1.2.3 From 0b6e81b91070bdbe0defb9101384ebb26835e401 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jan 2014 17:45:14 +0200 Subject: IB/mlx5: Clear out struct before create QP command Output structs are expected by firmware to be cleared when a command is called. Clear the "out" struct instead of "dout" which is used only later. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx5/core/qp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 54faf8bfcaf4..e7941843a72d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -74,7 +74,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_destroy_qp_mbox_out dout; int err; - memset(&dout, 0, sizeof(dout)); + memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); -- cgit v1.2.3 From 042b9adae899e1b497282d92205d3fef42d5ca8d Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jan 2014 17:45:15 +0200 Subject: mlx5_core: Use mlx5 core style warning Use mlx5_core_warn(), which is the standard warning emitter function, instead of pr_warn(). Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx5/core/qp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index e7941843a72d..510576213dd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -84,7 +84,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, } if (out.hdr.status) { - pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps)); + mlx5_core_warn(dev, "current num of QPs 0x%x\n", + atomic_read(&dev->num_qps)); return mlx5_cmd_status_to_err(&out.hdr); } -- cgit v1.2.3 From 3bdb31f688276505ede23280885948e934304674 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jan 2014 17:45:17 +0200 Subject: IB/mlx5: Implement modify CQ Modify CQ is used by ULPs like IPoIB to change moderation parameters. This patch adds support in mlx5. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx5/cq.c | 26 +++++++++++++++++++++++++- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 17 +++++++++++++++-- include/linux/mlx5/cq.h | 8 ++++---- include/linux/mlx5/device.h | 15 +++++++++++++++ 4 files changed, 59 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index b72627429745..b4c122eab484 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -818,7 +818,31 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) { - return -ENOSYS; + struct mlx5_modify_cq_mbox_in *in; + struct mlx5_ib_dev *dev = to_mdev(cq->device); + struct mlx5_ib_cq *mcq = to_mcq(cq); + int err; + u32 fsel; + + if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) + return -ENOSYS; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->cqn = cpu_to_be32(mcq->mcq.cqn); + fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); + in->ctx.cq_period = cpu_to_be16(cq_period); + in->ctx.cq_max_count = cpu_to_be16(cq_count); + in->field_select = cpu_to_be32(fsel); + err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in); + kfree(in); + + if (err) + mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); + + return err; } int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index c2d660be6f76..e6fedcf94182 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -201,10 +201,23 @@ EXPORT_SYMBOL(mlx5_core_query_cq); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - int type, struct mlx5_cq_modify_params *params) + struct mlx5_modify_cq_mbox_in *in) { - return -ENOSYS; + struct mlx5_modify_cq_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); + err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return 0; } +EXPORT_SYMBOL(mlx5_core_modify_cq); int mlx5_init_cq_table(struct mlx5_core_dev *dev) { diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 3db67f73d96d..c3cf5a46abce 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -85,9 +85,9 @@ enum { }; enum { - MLX5_CQ_MODIFY_RESEIZE = 0, - MLX5_CQ_MODIFY_MODER = 1, - MLX5_CQ_MODIFY_MAPPING = 2, + MLX5_CQ_MODIFY_PERIOD = 1 << 0, + MLX5_CQ_MODIFY_COUNT = 1 << 1, + MLX5_CQ_MODIFY_OVERRUN = 1 << 2, }; struct mlx5_cq_modify_params { @@ -158,7 +158,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, struct mlx5_query_cq_mbox_out *out); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - int type, struct mlx5_cq_modify_params *params); + struct mlx5_modify_cq_mbox_in *in); int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 2c1c62e339ca..dbb03caa8aed 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -177,6 +177,7 @@ enum { MLX5_DEV_CAP_FLAG_APM = 1LL << 17, MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, + MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, @@ -698,6 +699,19 @@ struct mlx5_query_cq_mbox_out { __be64 pas[0]; }; +struct mlx5_modify_cq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 cqn; + __be32 field_select; + struct mlx5_cq_context ctx; + u8 rsvd[192]; + __be64 pas[0]; +}; + +struct mlx5_modify_cq_mbox_out { + struct mlx5_outbox_hdr hdr; +}; + struct mlx5_enable_hca_mbox_in { struct mlx5_inbox_hdr hdr; u8 rsvd[8]; @@ -872,6 +886,7 @@ struct mlx5_modify_mkey_mbox_in { struct mlx5_modify_mkey_mbox_out { struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; }; struct mlx5_dump_mkey_mbox_in { -- cgit v1.2.3 From bde51583f49bd87e452e9504d489926638046b11 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jan 2014 17:45:18 +0200 Subject: IB/mlx5: Add support for resize CQ Implement resize CQ which is a mandatory verb in mlx5. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx5/cq.c | 282 +++++++++++++++++++++++++-- drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 +- drivers/infiniband/hw/mlx5/user.h | 3 + drivers/net/ethernet/mellanox/mlx5/core/cq.c | 4 +- include/linux/mlx5/cq.h | 12 +- include/linux/mlx5/device.h | 2 + 6 files changed, 284 insertions(+), 22 deletions(-) (limited to 'drivers/net') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index b4c122eab484..50b03a8067e5 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -73,14 +73,24 @@ static void *get_cqe(struct mlx5_ib_cq *cq, int n) return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); } +static u8 sw_ownership_bit(int n, int nent) +{ + return (n & nent) ? 1 : 0; +} + static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) { void *cqe = get_cqe(cq, n & cq->ibcq.cqe); struct mlx5_cqe64 *cqe64; cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; - return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ - !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; + + if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && + !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { + return cqe; + } else { + return NULL; + } } static void *next_cqe_sw(struct mlx5_ib_cq *cq) @@ -351,6 +361,11 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, qp->sq.last_poll = tail; } +static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) +{ + mlx5_buf_free(&dev->mdev, &buf->buf); +} + static int mlx5_poll_one(struct mlx5_ib_cq *cq, struct mlx5_ib_qp **cur_qp, struct ib_wc *wc) @@ -366,6 +381,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, void *cqe; int idx; +repoll: cqe = next_cqe_sw(cq); if (!cqe) return -EAGAIN; @@ -379,7 +395,18 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, */ rmb(); - /* TBD: resize CQ */ + opcode = cqe64->op_own >> 4; + if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { + if (likely(cq->resize_buf)) { + free_cq_buf(dev, &cq->buf); + cq->buf = *cq->resize_buf; + kfree(cq->resize_buf); + cq->resize_buf = NULL; + goto repoll; + } else { + mlx5_ib_warn(dev, "unexpected resize cqe\n"); + } + } qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { @@ -398,7 +425,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, } wc->qp = &(*cur_qp)->ibqp; - opcode = cqe64->op_own >> 4; switch (opcode) { case MLX5_CQE_REQ: wq = &(*cur_qp)->sq; @@ -503,15 +529,11 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, return err; buf->cqe_size = cqe_size; + buf->nent = nent; return 0; } -static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) -{ - mlx5_buf_free(&dev->mdev, &buf->buf); -} - static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, struct ib_ucontext *context, struct mlx5_ib_cq *cq, int entries, struct mlx5_create_cq_mbox_in **cqb, @@ -576,16 +598,16 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) ib_umem_release(cq->buf.umem); } -static void init_cq_buf(struct mlx5_ib_cq *cq, int nent) +static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) { int i; void *cqe; struct mlx5_cqe64 *cqe64; - for (i = 0; i < nent; i++) { - cqe = get_cqe(cq, i); - cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64; - cqe64->op_own = 0xf1; + for (i = 0; i < buf->nent; i++) { + cqe = get_cqe_from_buf(buf, i, buf->cqe_size); + cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; + cqe64->op_own = MLX5_CQE_INVALID << 4; } } @@ -610,7 +632,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (err) goto err_db; - init_cq_buf(cq, entries); + init_cq_buf(cq, &cq->buf); *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; *cqb = mlx5_vzalloc(*inlen); @@ -836,7 +858,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in->ctx.cq_period = cpu_to_be16(cq_period); in->ctx.cq_max_count = cpu_to_be16(cq_count); in->field_select = cpu_to_be32(fsel); - err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in); + err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in)); kfree(in); if (err) @@ -845,9 +867,235 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) return err; } +static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, + int entries, struct ib_udata *udata, int *npas, + int *page_shift, int *cqe_size) +{ + struct mlx5_ib_resize_cq ucmd; + struct ib_umem *umem; + int err; + int npages; + struct ib_ucontext *context = cq->buf.umem->context; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) + return -EFAULT; + + umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(umem)) { + err = PTR_ERR(umem); + return err; + } + + mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, + npas, NULL); + + cq->resize_umem = umem; + *cqe_size = ucmd.cqe_size; + + return 0; +} + +static void un_resize_user(struct mlx5_ib_cq *cq) +{ + ib_umem_release(cq->resize_umem); +} + +static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, + int entries, int cqe_size) +{ + int err; + + cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); + if (!cq->resize_buf) + return -ENOMEM; + + err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); + if (err) + goto ex; + + init_cq_buf(cq, cq->resize_buf); + + return 0; + +ex: + kfree(cq->resize_buf); + return err; +} + +static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) +{ + free_cq_buf(dev, cq->resize_buf); + cq->resize_buf = NULL; +} + +static int copy_resize_cqes(struct mlx5_ib_cq *cq) +{ + struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); + struct mlx5_cqe64 *scqe64; + struct mlx5_cqe64 *dcqe64; + void *start_cqe; + void *scqe; + void *dcqe; + int ssize; + int dsize; + int i; + u8 sw_own; + + ssize = cq->buf.cqe_size; + dsize = cq->resize_buf->cqe_size; + if (ssize != dsize) { + mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); + return -EINVAL; + } + + i = cq->mcq.cons_index; + scqe = get_sw_cqe(cq, i); + scqe64 = ssize == 64 ? scqe : scqe + 64; + start_cqe = scqe; + if (!scqe) { + mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); + return -EINVAL; + } + + while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { + dcqe = get_cqe_from_buf(cq->resize_buf, + (i + 1) & (cq->resize_buf->nent), + dsize); + dcqe64 = dsize == 64 ? dcqe : dcqe + 64; + sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); + memcpy(dcqe, scqe, dsize); + dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; + + ++i; + scqe = get_sw_cqe(cq, i); + scqe64 = ssize == 64 ? scqe : scqe + 64; + if (!scqe) { + mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); + return -EINVAL; + } + + if (scqe == start_cqe) { + pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", + cq->mcq.cqn); + return -ENOMEM; + } + } + ++cq->mcq.cons_index; + return 0; +} + int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { - return -ENOSYS; + struct mlx5_ib_dev *dev = to_mdev(ibcq->device); + struct mlx5_ib_cq *cq = to_mcq(ibcq); + struct mlx5_modify_cq_mbox_in *in; + int err; + int npas; + int page_shift; + int inlen; + int uninitialized_var(cqe_size); + unsigned long flags; + + if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { + pr_info("Firmware does not support resize CQ\n"); + return -ENOSYS; + } + + if (entries < 1) + return -EINVAL; + + entries = roundup_pow_of_two(entries + 1); + if (entries > dev->mdev.caps.max_cqes + 1) + return -EINVAL; + + if (entries == ibcq->cqe + 1) + return 0; + + mutex_lock(&cq->resize_mutex); + if (udata) { + err = resize_user(dev, cq, entries, udata, &npas, &page_shift, + &cqe_size); + } else { + cqe_size = 64; + err = resize_kernel(dev, cq, entries, cqe_size); + if (!err) { + npas = cq->resize_buf->buf.npages; + page_shift = cq->resize_buf->buf.page_shift; + } + } + + if (err) + goto ex; + + inlen = sizeof(*in) + npas * sizeof(in->pas[0]); + in = mlx5_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto ex_resize; + } + + if (udata) + mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, + in->pas, 0); + else + mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); + + in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE | + MLX5_MODIFY_CQ_MASK_PG_OFFSET | + MLX5_MODIFY_CQ_MASK_PG_SIZE); + in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; + in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; + in->ctx.page_offset = 0; + in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); + in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); + in->cqn = cpu_to_be32(cq->mcq.cqn); + + err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen); + if (err) + goto ex_alloc; + + if (udata) { + cq->ibcq.cqe = entries - 1; + ib_umem_release(cq->buf.umem); + cq->buf.umem = cq->resize_umem; + cq->resize_umem = NULL; + } else { + struct mlx5_ib_cq_buf tbuf; + int resized = 0; + + spin_lock_irqsave(&cq->lock, flags); + if (cq->resize_buf) { + err = copy_resize_cqes(cq); + if (!err) { + tbuf = cq->buf; + cq->buf = *cq->resize_buf; + kfree(cq->resize_buf); + cq->resize_buf = NULL; + resized = 1; + } + } + cq->ibcq.cqe = entries - 1; + spin_unlock_irqrestore(&cq->lock, flags); + if (resized) + free_cq_buf(dev, &tbuf); + } + mutex_unlock(&cq->resize_mutex); + + mlx5_vfree(in); + return 0; + +ex_alloc: + mlx5_vfree(in); + +ex_resize: + if (udata) + un_resize_user(cq); + else + un_resize_kernel(dev, cq); +ex: + mutex_unlock(&cq->resize_mutex); + return err; } int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 5acef30a4998..389e31965773 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -195,6 +195,7 @@ struct mlx5_ib_cq_buf { struct mlx5_buf buf; struct ib_umem *umem; int cqe_size; + int nent; }; enum mlx5_ib_qp_flags { @@ -220,7 +221,7 @@ struct mlx5_ib_cq { /* protect resize cq */ struct mutex resize_mutex; - struct mlx5_ib_cq_resize *resize_buf; + struct mlx5_ib_cq_buf *resize_buf; struct ib_umem *resize_umem; int cqe_size; }; diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h index a886de3e593c..32a2a5dfc523 100644 --- a/drivers/infiniband/hw/mlx5/user.h +++ b/drivers/infiniband/hw/mlx5/user.h @@ -93,6 +93,9 @@ struct mlx5_ib_create_cq_resp { struct mlx5_ib_resize_cq { __u64 buf_addr; + __u16 cqe_size; + __u16 reserved0; + __u32 reserved1; }; struct mlx5_ib_create_srq { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index e6fedcf94182..43c5f4809526 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -201,14 +201,14 @@ EXPORT_SYMBOL(mlx5_core_query_cq); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_modify_cq_mbox_in *in) + struct mlx5_modify_cq_mbox_in *in, int in_sz) { struct mlx5_modify_cq_mbox_out out; int err; memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); - err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); if (err) return err; diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index c3cf5a46abce..2202c7f72b75 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -79,9 +79,10 @@ enum { MLX5_CQE_RESP_SEND = 2, MLX5_CQE_RESP_SEND_IMM = 3, MLX5_CQE_RESP_SEND_INV = 4, - MLX5_CQE_RESIZE_CQ = 0xff, /* TBD */ + MLX5_CQE_RESIZE_CQ = 5, MLX5_CQE_REQ_ERR = 13, MLX5_CQE_RESP_ERR = 14, + MLX5_CQE_INVALID = 15, }; enum { @@ -90,6 +91,13 @@ enum { MLX5_CQ_MODIFY_OVERRUN = 1 << 2, }; +enum { + MLX5_CQ_OPMOD_RESIZE = 1, + MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0, + MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1, + MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2, +}; + struct mlx5_cq_modify_params { int type; union { @@ -158,7 +166,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, struct mlx5_query_cq_mbox_out *out); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_modify_cq_mbox_in *in); + struct mlx5_modify_cq_mbox_in *in, int in_sz); int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index dbb03caa8aed..87e23717df70 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -178,6 +178,7 @@ enum { MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, + MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, @@ -710,6 +711,7 @@ struct mlx5_modify_cq_mbox_in { struct mlx5_modify_cq_mbox_out { struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; }; struct mlx5_enable_hca_mbox_in { -- cgit v1.2.3 From db81a5c374b5bd650c5e6ae85d026709751db103 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jan 2014 17:45:19 +0200 Subject: mlx5_core: Improve debugfs readability Use strings to display transport service or state of QPs. Use numeric value for MTU of a QP. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 39 +++++++++++++++++--- include/linux/mlx5/qp.h | 45 +++++++++++++++++++++++ 2 files changed, 78 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 80f6d127257a..10e1f1a18255 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -275,7 +275,7 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) } static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - int index) + int index, int *is_str) { struct mlx5_query_qp_mbox_out *out; struct mlx5_qp_context *ctx; @@ -293,19 +293,40 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, goto out; } + *is_str = 0; ctx = &out->ctx; switch (index) { case QP_PID: param = qp->pid; break; case QP_STATE: - param = be32_to_cpu(ctx->flags) >> 28; + param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28); + *is_str = 1; break; case QP_XPORT: - param = (be32_to_cpu(ctx->flags) >> 16) & 0xff; + param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff); + *is_str = 1; break; case QP_MTU: - param = ctx->mtu_msgmax >> 5; + switch (ctx->mtu_msgmax >> 5) { + case IB_MTU_256: + param = 256; + break; + case IB_MTU_512: + param = 512; + break; + case IB_MTU_1024: + param = 1024; + break; + case IB_MTU_2048: + param = 2048; + break; + case IB_MTU_4096: + param = 4096; + break; + default: + param = 0; + } break; case QP_N_RECV: param = 1 << ((ctx->rq_size_stride >> 3) & 0xf); @@ -414,6 +435,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, struct mlx5_field_desc *desc; struct mlx5_rsc_debug *d; char tbuf[18]; + int is_str = 0; u64 field; int ret; @@ -424,7 +446,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, d = (void *)(desc - desc->i) - sizeof(*d); switch (d->type) { case MLX5_DBG_RSC_QP: - field = qp_read_field(d->dev, d->object, desc->i); + field = qp_read_field(d->dev, d->object, desc->i, &is_str); break; case MLX5_DBG_RSC_EQ: @@ -440,7 +462,12 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, return -EINVAL; } - ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + + if (is_str) + ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field); + else + ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + if (ret > 0) { if (copy_to_user(buf, tbuf, ret)) return -EFAULT; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index d9e3eacb3a7f..d51eff713549 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -464,4 +464,49 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); +static inline const char *mlx5_qp_type_str(int type) +{ + switch (type) { + case MLX5_QP_ST_RC: return "RC"; + case MLX5_QP_ST_UC: return "C"; + case MLX5_QP_ST_UD: return "UD"; + case MLX5_QP_ST_XRC: return "XRC"; + case MLX5_QP_ST_MLX: return "MLX"; + case MLX5_QP_ST_QP0: return "QP0"; + case MLX5_QP_ST_QP1: return "QP1"; + case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE"; + case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6"; + case MLX5_QP_ST_SNIFFER: return "SNIFFER"; + case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR"; + case MLX5_QP_ST_PTP_1588: return "PTP_1588"; + case MLX5_QP_ST_REG_UMR: return "REG_UMR"; + default: return "Invalid transport type"; + } +} + +static inline const char *mlx5_qp_state_str(int state) +{ + switch (state) { + case MLX5_QP_STATE_RST: + return "RST"; + case MLX5_QP_STATE_INIT: + return "INIT"; + case MLX5_QP_STATE_RTR: + return "RTR"; + case MLX5_QP_STATE_RTS: + return "RTS"; + case MLX5_QP_STATE_SQER: + return "SQER"; + case MLX5_QP_STATE_SQD: + return "SQD"; + case MLX5_QP_STATE_ERR: + return "ERR"; + case MLX5_QP_STATE_SQ_DRAINING: + return "SQ_DRAINING"; + case MLX5_QP_STATE_SUSPENDED: + return "SUSPENDED"; + default: return "Invalid QP state"; + } +} + #endif /* MLX5_QP_H */ -- cgit v1.2.3 From 05bdb2ab6b09f2306f0afe0f60f4b9abffa7aba4 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jan 2014 17:45:20 +0200 Subject: mlx5_core: Fix PowerPC support 1. Fix derivation of sub-page index from the dma address in free_4k. 2. Fix the DMA address passed to dma_unmap_page by masking it properly. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 9 +++++---- include/linux/mlx5/device.h | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index c35c4320225a..d59790a82bc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -99,7 +99,7 @@ enum { enum { MLX5_MAX_RECLAIM_TIME_MILI = 5000, - MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096, + MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, }; static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) @@ -206,7 +206,7 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) if (!fp->free_count) list_del(&fp->list); - *addr = fp->addr + n * 4096; + *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; return 0; } @@ -222,14 +222,15 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr) return; } - n = (addr & ~PAGE_MASK) % 4096; + n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; fwp->free_count++; set_bit(n, &fwp->bitmask); if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { rb_erase(&fwp->rb_node, &dev->priv.page_root); if (fwp->free_count != 1) list_del(&fwp->list); - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE, + DMA_BIDIRECTIONAL); __free_page(fwp->page); kfree(fwp); } else if (fwp->free_count == 1) { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 87e23717df70..1d059099226c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -234,7 +234,8 @@ enum { }; enum { - MLX5_ADAPTER_PAGE_SHIFT = 12 + MLX5_ADAPTER_PAGE_SHIFT = 12, + MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, }; enum { -- cgit v1.2.3 From 1bde6e301cf6217da9238086c958f532b16e504d Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jan 2014 17:45:22 +0200 Subject: IB/mlx5: Abort driver cleanup if teardown hca fails Do not continue with cleanup flow. If this ever happens we can check which resources remained open. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 40a9f5ed814d..a064f06e0cb8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -460,7 +460,10 @@ disable_msix: err_stop_poll: mlx5_stop_health_poll(dev); - mlx5_cmd_teardown_hca(dev); + if (mlx5_cmd_teardown_hca(dev)) { + dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); + return err; + } err_pagealloc_stop: mlx5_pagealloc_stop(dev); @@ -503,7 +506,10 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev) mlx5_eq_cleanup(dev); mlx5_disable_msix(dev); mlx5_stop_health_poll(dev); - mlx5_cmd_teardown_hca(dev); + if (mlx5_cmd_teardown_hca(dev)) { + dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); + return; + } mlx5_pagealloc_stop(dev); mlx5_reclaim_startup_pages(dev); mlx5_core_disable_hca(dev); -- cgit v1.2.3