diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-04-15 11:41:56 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-04-15 11:41:57 -0700 |
commit | 7b05c54226015807993a7d1bab0ba59d00e592ad (patch) | |
tree | 00081ea02390cf29fc0e12f9ed3ee1fc9e444caa | |
parent | 0a03f3c511f57da4d7159a150f1ab4b87f62c040 (diff) | |
parent | 1f702c1643f2f9657f9dd03085b309ab9a1de1d7 (diff) |
Merge branch 'net-ethool-add-support-to-get-set-tx-push-by-ethtool-g-g'
Jie Wang says:
====================
net: ethool: add support to get/set tx push by ethtool -G/g
These three patches add tx push in ring params and adapt the set and get APIs
of ring params.
====================
Link: https://lore.kernel.org/r/20220412020121.14140-1-huangguangbin2@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r-- | Documentation/networking/ethtool-netlink.rst | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 33 | ||||
-rw-r--r-- | include/linux/ethtool.h | 4 | ||||
-rw-r--r-- | include/uapi/linux/ethtool_netlink.h | 1 | ||||
-rw-r--r-- | net/ethtool/netlink.h | 2 | ||||
-rw-r--r-- | net/ethtool/rings.c | 54 |
6 files changed, 80 insertions, 22 deletions
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 24d9be69065d..dbca3e9ec782 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -862,6 +862,7 @@ Kernel response contents: ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE + ``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode ==================================== ====== =========================== ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with @@ -871,6 +872,12 @@ separate buffers. The device configuration must make it possible to receive full memory pages of data, for example because MTU is high enough or through HW-GRO. +``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast +path to send packets. In ordinary path, driver fills descriptors in DRAM and +notifies NIC hardware. In fast path, driver pushes descriptors to the device +through MMIO writes, thus reducing the latency. However, enabling this feature +may increase the CPU cost. Drivers may enforce additional per-packet +eligibility checks (e.g. on packet size). RINGS_SET ========= @@ -887,6 +894,7 @@ Request contents: ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE + ``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode ==================================== ====== =========================== Kernel checks that requested ring sizes do not exceed limits reported by diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f4da77452126..9f4111fd2986 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -664,6 +664,8 @@ static void hns3_get_ringparam(struct net_device *netdev, param->tx_pending = priv->ring[0].desc_num; param->rx_pending = priv->ring[rx_queue_index].desc_num; kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size; + kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, + &priv->state); } static void hns3_get_pauseparam(struct net_device *netdev, @@ -1120,6 +1122,30 @@ static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) return 0; } +static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push) + return -EOPNOTSUPP; + + if (tx_push == old_state) + return 0; + + netdev_dbg(netdev, "Changing tx push from %s to %s\n", + old_state ? "on" : "off", tx_push ? "on" : "off"); + + if (tx_push) + set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + else + clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + return 0; +} + static int hns3_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *param, struct kernel_ethtool_ringparam *kernel_param, @@ -1139,6 +1165,10 @@ static int hns3_set_ringparam(struct net_device *ndev, if (ret) return ret; + ret = hns3_set_tx_push(ndev, kernel_param->tx_push); + if (ret) + return ret; + /* Hardware requires that its descriptors must be multiple of eight */ new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); @@ -1858,7 +1888,8 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_COALESCE_MAX_FRAMES | \ ETHTOOL_COALESCE_USE_CQE) -#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN +#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \ + ETHTOOL_RING_USE_TX_PUSH) static int hns3_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 4af58459a1e7..99dc7bfbcd3c 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -71,11 +71,13 @@ enum { * struct kernel_ethtool_ringparam - RX/TX ring configuration * @rx_buf_len: Current length of buffers on the rx ring. * @tcp_data_split: Scatter packet headers and data to separate buffers + * @tx_push: The flag of tx push mode * @cqe_size: Size of TX/RX completion queue event */ struct kernel_ethtool_ringparam { u32 rx_buf_len; u8 tcp_data_split; + u8 tx_push; u32 cqe_size; }; @@ -83,10 +85,12 @@ struct kernel_ethtool_ringparam { * enum ethtool_supported_ring_param - indicator caps for setting ring params * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size + * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push */ enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), ETHTOOL_RING_USE_CQE_SIZE = BIT(1), + ETHTOOL_RING_USE_TX_PUSH = BIT(2), }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 979850221b8d..d2fb4f7be61b 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -338,6 +338,7 @@ enum { ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */ ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */ ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */ + ETHTOOL_A_RINGS_TX_PUSH, /* u8 */ /* add new constants above here */ __ETHTOOL_A_RINGS_CNT, diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 29d01662a48b..7919ddb2371c 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -363,7 +363,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1]; extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1]; extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1]; -extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_CQE_SIZE + 1]; +extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH + 1]; extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1]; extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1]; extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1]; diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index 9f33c9689b56..fa3ec8d438f7 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -55,7 +55,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u32)) + /* _RINGS_TX */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ - nla_total_size(sizeof(u32)); /* _RINGS_CQE_SIZE */ + nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */ + nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */ } static int rings_fill_reply(struct sk_buff *skb, @@ -94,7 +95,8 @@ static int rings_fill_reply(struct sk_buff *skb, (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, kr->tcp_data_split))) || (kr->cqe_size && - (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size)))) + (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) || + nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push)) return -EMSGSIZE; return 0; @@ -123,6 +125,7 @@ const struct nla_policy ethnl_rings_set_policy[] = { [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), + [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), }; int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) @@ -149,6 +152,33 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) if (!ops->get_ringparam || !ops->set_ringparam) goto out_dev; + if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { + ret = -EOPNOTSUPP; + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_RX_BUF_LEN], + "setting rx buf len not supported"); + goto out_dev; + } + + if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { + ret = -EOPNOTSUPP; + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_CQE_SIZE], + "setting cqe size not supported"); + goto out_dev; + } + + if (tb[ETHTOOL_A_RINGS_TX_PUSH] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) { + ret = -EOPNOTSUPP; + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_TX_PUSH], + "setting tx push not supported"); + goto out_dev; + } + rtnl_lock(); ret = ethnl_ops_begin(dev); if (ret < 0) @@ -165,6 +195,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); ethnl_update_u32(&kernel_ringparam.cqe_size, tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); + ethnl_update_u8(&kernel_ringparam.tx_push, + tb[ETHTOOL_A_RINGS_TX_PUSH], &mod); ret = 0; if (!mod) goto out_ops; @@ -187,24 +219,6 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) goto out_ops; } - if (kernel_ringparam.rx_buf_len != 0 && - !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { - ret = -EOPNOTSUPP; - NL_SET_ERR_MSG_ATTR(info->extack, - tb[ETHTOOL_A_RINGS_RX_BUF_LEN], - "setting rx buf len not supported"); - goto out_ops; - } - - if (kernel_ringparam.cqe_size && - !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { - ret = -EOPNOTSUPP; - NL_SET_ERR_MSG_ATTR(info->extack, - tb[ETHTOOL_A_RINGS_CQE_SIZE], - "setting cqe size not supported"); - goto out_ops; - } - ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, &kernel_ringparam, info->extack); if (ret < 0) |