Projects
Eulaceura:Mainline
dpdk
_service:obs_scm:0022-net-hns3-refactor-PF-mail...
Sign Up
Log In
Username
Password
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File _service:obs_scm:0022-net-hns3-refactor-PF-mailbox-message-struct.patch of Package dpdk
From 11e1f6c8b4ec6d246b65448a65533aad7129a5d2 Mon Sep 17 00:00:00 2001 From: Dengdui Huang <huangdengdui@huawei.com> Date: Fri, 8 Dec 2023 14:55:06 +0800 Subject: [PATCH 22/30] net/hns3: refactor PF mailbox message struct [ upstream commit 4d534598d922130d12c244d3237652fbfdad0f4b ] The data region in PF to VF mbx memssage command is used to communicate with VF driver. And this data region exists as an array. As a result, some complicated feature commands, like mailbox response, link change event, close promisc mode, reset request and update pvid state, have to use magic number to set them. This isn't good for maintenance of driver. So this patch refactors these messages by extracting an hns3_pf_to_vf_msg structure. Fixes: 463e748964f5 ("net/hns3: support mailbox") Cc: stable@dpdk.org Signed-off-by: Dengdui Huang <huangdengdui@huawei.com> Signed-off-by: Jie Hai <haijie1@huawei.com> --- drivers/net/hns3/hns3_mbx.c | 38 ++++++++++++++++++------------------- drivers/net/hns3/hns3_mbx.h | 25 +++++++++++++++++++++++- 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index ad5ec55..c90f5d5 100644 --- a/drivers/net/hns3/hns3_mbx.c +++ b/drivers/net/hns3/hns3_mbx.c @@ -192,17 +192,17 @@ static void hns3vf_handle_link_change_event(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) { + struct hns3_mbx_link_status *link_info = + (struct hns3_mbx_link_status *)req->msg.msg_data; uint8_t link_status, link_duplex; - uint16_t *msg_q = req->msg; uint8_t support_push_lsc; uint32_t link_speed; - memcpy(&link_speed, &msg_q[2], sizeof(link_speed)); - link_status = rte_le_to_cpu_16(msg_q[1]); - link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); - hns3vf_update_link_status(hw, link_status, link_speed, - link_duplex); - support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u; + link_status = (uint8_t)rte_le_to_cpu_16(link_info->link_status); + link_speed = rte_le_to_cpu_32(link_info->speed); + link_duplex = (uint8_t)rte_le_to_cpu_16(link_info->duplex); + hns3vf_update_link_status(hw, link_status, link_speed, link_duplex); + support_push_lsc = (link_info->flag) & 1u; hns3vf_update_push_lsc_cap(hw, support_push_lsc); } @@ -211,7 +211,6 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) { enum hns3_reset_level reset_level; - uint16_t *msg_q = req->msg; /* * PF has asserted reset hence VF should go in pending @@ -219,7 +218,7 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, * has been completely reset. After this stack should * eventually be re-initialized. */ - reset_level = rte_le_to_cpu_16(msg_q[1]); + reset_level = rte_le_to_cpu_16(req->msg.reset_level); hns3_atomic_set_bit(reset_level, &hw->reset.pending); hns3_warn(hw, "PF inform reset level %d", reset_level); @@ -241,8 +240,9 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) * to match the request. */ if (req->match_id == resp->match_id) { - resp->resp_status = hns3_resp_to_errno(req->msg[3]); - memcpy(resp->additional_info, &req->msg[4], + resp->resp_status = + hns3_resp_to_errno(req->msg.resp_status); + memcpy(resp->additional_info, &req->msg.resp_data, HNS3_MBX_MAX_RESP_DATA_SIZE); rte_io_wmb(); resp->received_match_resp = true; @@ -255,7 +255,8 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) * support copy request's match_id to its response. So VF follows the * original scheme to process. */ - msg_data = (uint32_t)req->msg[1] << HNS3_MBX_RESP_CODE_OFFSET | req->msg[2]; + msg_data = (uint32_t)req->msg.vf_mbx_msg_code << + HNS3_MBX_RESP_CODE_OFFSET | req->msg.vf_mbx_msg_subcode; if (resp->req_msg_data != msg_data) { hns3_warn(hw, "received response tag (%u) is mismatched with requested tag (%u)", @@ -263,8 +264,8 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) return; } - resp->resp_status = hns3_resp_to_errno(req->msg[3]); - memcpy(resp->additional_info, &req->msg[4], + resp->resp_status = hns3_resp_to_errno(req->msg.resp_status); + memcpy(resp->additional_info, &req->msg.resp_data, HNS3_MBX_MAX_RESP_DATA_SIZE); rte_io_wmb(); resp->received_match_resp = true; @@ -305,8 +306,7 @@ static void hns3_update_port_base_vlan_info(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) { -#define PVID_STATE_OFFSET 1 - uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ? + uint16_t new_pvid_state = req->msg.pvid_state ? HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; /* * Currently, hardware doesn't support more than two layers VLAN offload @@ -355,7 +355,7 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) while (next_to_use != tail) { desc = &crq->desc[next_to_use]; req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; - opcode = req->msg[0] & 0xff; + opcode = req->msg.code & 0xff; flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag); if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B)) @@ -428,7 +428,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) desc = &crq->desc[crq->next_to_use]; req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; - opcode = req->msg[0] & 0xff; + opcode = req->msg.code & 0xff; flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { @@ -484,7 +484,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) * hns3 PF kernel driver, VF driver will receive this * mailbox message from PF driver. */ - hns3_handle_promisc_info(hw, req->msg[1]); + hns3_handle_promisc_info(hw, req->msg.promisc_en); break; default: hns3_err(hw, "received unsupported(%u) mbx msg", diff --git a/drivers/net/hns3/hns3_mbx.h b/drivers/net/hns3/hns3_mbx.h index 59fb73a..09780fc 100644 --- a/drivers/net/hns3/hns3_mbx.h +++ b/drivers/net/hns3/hns3_mbx.h @@ -118,6 +118,13 @@ struct hns3_mbx_vlan_filter { uint16_t proto; } __rte_packed; +struct hns3_mbx_link_status { + uint16_t link_status; + uint32_t speed; + uint16_t duplex; + uint8_t flag; +} __rte_packed; + #define HNS3_MBX_MSG_MAX_DATA_SIZE 14 #define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 struct hns3_vf_to_pf_msg { @@ -146,6 +153,22 @@ struct hns3_vf_to_pf_msg { }; }; +struct hns3_pf_to_vf_msg { + uint16_t code; + union { + struct { + uint16_t vf_mbx_msg_code; + uint16_t vf_mbx_msg_subcode; + uint16_t resp_status; + uint8_t resp_data[HNS3_MBX_MAX_RESP_DATA_SIZE]; + }; + uint16_t promisc_en; + uint16_t reset_level; + uint16_t pvid_state; + uint8_t msg_data[HNS3_MBX_MSG_MAX_DATA_SIZE]; + }; +}; + struct errno_respcode_map { uint16_t resp_code; int err_no; @@ -170,7 +193,7 @@ struct hns3_mbx_pf_to_vf_cmd { uint8_t msg_len; uint8_t rsv1; uint16_t match_id; - uint16_t msg[8]; + struct hns3_pf_to_vf_msg msg; }; struct hns3_pf_rst_done_cmd { -- 2.33.0
Locations
Projects
Search
Status Monitor
Help
Open Build Service
OBS Manuals
API Documentation
OBS Portal
Reporting a Bug
Contact
Mailing List
Forums
Chat (IRC)
Twitter
Open Build Service (OBS)
is an
openSUSE project
.
浙ICP备2022010568号-2