kernel
This commit is contained in:
parent
b59f84ee36
commit
cfab1d3ce7
83468 changed files with 37938688 additions and 0 deletions
1476
linux-6.8.1/include/linux/qed/common_hsi.h
Normal file
1476
linux-6.8.1/include/linux/qed/common_hsi.h
Normal file
File diff suppressed because it is too large
Load diff
490
linux-6.8.1/include/linux/qed/eth_common.h
Normal file
490
linux-6.8.1/include/linux/qed/eth_common.h
Normal file
|
@ -0,0 +1,490 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __ETH_COMMON__
|
||||
#define __ETH_COMMON__
|
||||
|
||||
/********************/
|
||||
/* ETH FW CONSTANTS */
|
||||
/********************/
|
||||
|
||||
#define ETH_HSI_VER_MAJOR 3
|
||||
#define ETH_HSI_VER_MINOR 11
|
||||
|
||||
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
|
||||
/* Maximum number of pinned L2 connections (CIDs) */
|
||||
#define ETH_PINNED_CONN_MAX_NUM 32
|
||||
|
||||
#define ETH_CACHE_LINE_SIZE 64
|
||||
#define ETH_RX_CQE_GAP 32
|
||||
#define ETH_MAX_RAMROD_PER_CON 8
|
||||
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
|
||||
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
|
||||
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
|
||||
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
|
||||
|
||||
#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
|
||||
#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
|
||||
|
||||
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
|
||||
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
|
||||
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
|
||||
#define ETH_TX_MAX_LSO_HDR_NBD 4
|
||||
#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
|
||||
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
|
||||
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
|
||||
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
|
||||
#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4
|
||||
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
|
||||
#define ETH_TX_MAX_LSO_HDR_BYTES 510
|
||||
#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
|
||||
#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
|
||||
#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
|
||||
#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
|
||||
#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
|
||||
|
||||
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
|
||||
#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
|
||||
(ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
|
||||
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
|
||||
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
|
||||
|
||||
#define ETH_RX_MAX_BUFF_PER_PKT 5
|
||||
#define ETH_RX_BD_THRESHOLD 16
|
||||
|
||||
/* Num of MAC/VLAN filters */
|
||||
#define ETH_NUM_MAC_FILTERS 512
|
||||
#define ETH_NUM_VLAN_FILTERS 512
|
||||
|
||||
/* Approx. multicast constants */
|
||||
#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
|
||||
#define ETH_MULTICAST_MAC_BINS 256
|
||||
#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
|
||||
|
||||
/* Ethernet vport update constants */
|
||||
#define ETH_FILTER_RULES_COUNT 10
|
||||
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
|
||||
#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
|
||||
#define ETH_RSS_KEY_SIZE_REGS 10
|
||||
#define ETH_RSS_ENGINE_NUM_K2 207
|
||||
#define ETH_RSS_ENGINE_NUM_BB 127
|
||||
|
||||
/* TPA constants */
|
||||
#define ETH_TPA_MAX_AGGS_NUM 64
|
||||
#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2
|
||||
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
|
||||
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
|
||||
|
||||
/* Control frame check constants */
|
||||
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
|
||||
|
||||
/* GFS constants */
|
||||
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
|
||||
|
||||
/* Destination port mode */
|
||||
enum dst_port_mode {
|
||||
DST_PORT_PHY,
|
||||
DST_PORT_LOOPBACK,
|
||||
DST_PORT_PHY_LOOPBACK,
|
||||
DST_PORT_DROP,
|
||||
MAX_DST_PORT_MODE
|
||||
};
|
||||
|
||||
/* Ethernet address type */
|
||||
enum eth_addr_type {
|
||||
BROADCAST_ADDRESS,
|
||||
MULTICAST_ADDRESS,
|
||||
UNICAST_ADDRESS,
|
||||
UNKNOWN_ADDRESS,
|
||||
MAX_ETH_ADDR_TYPE
|
||||
};
|
||||
|
||||
struct eth_tx_1st_bd_flags {
|
||||
u8 bitfields;
|
||||
#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
|
||||
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
|
||||
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
|
||||
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
|
||||
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
|
||||
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
|
||||
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
|
||||
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
|
||||
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
|
||||
};
|
||||
|
||||
/* The parsing information data fo rthe first tx bd of a given packet */
|
||||
struct eth_tx_data_1st_bd {
|
||||
__le16 vlan;
|
||||
u8 nbds;
|
||||
struct eth_tx_1st_bd_flags bd_flags;
|
||||
__le16 bitfields;
|
||||
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
|
||||
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
|
||||
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
|
||||
#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
|
||||
#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
|
||||
#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
|
||||
};
|
||||
|
||||
/* The parsing information data for the second tx bd of a given packet */
|
||||
struct eth_tx_data_2nd_bd {
|
||||
__le16 tunn_ip_size;
|
||||
__le16 bitfields1;
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
|
||||
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_MASK 0x3
|
||||
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_SHIFT 6
|
||||
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
|
||||
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
|
||||
#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
|
||||
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
|
||||
__le16 bitfields2;
|
||||
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
|
||||
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
|
||||
#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
|
||||
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
|
||||
};
|
||||
|
||||
/* Firmware data for L2-EDPM packet */
|
||||
struct eth_edpm_fw_data {
|
||||
struct eth_tx_data_1st_bd data_1st_bd;
|
||||
struct eth_tx_data_2nd_bd data_2nd_bd;
|
||||
__le32 reserved;
|
||||
};
|
||||
|
||||
/* Tunneling parsing flags */
|
||||
struct eth_tunnel_parsing_flags {
|
||||
u8 flags;
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
|
||||
};
|
||||
|
||||
/* PMD flow control bits */
|
||||
struct eth_pmd_flow_flags {
|
||||
u8 flags;
|
||||
#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
|
||||
#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
|
||||
#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
|
||||
#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
|
||||
#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
|
||||
#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
|
||||
};
|
||||
|
||||
/* Regular ETH Rx FP CQE */
|
||||
struct eth_fast_path_rx_reg_cqe {
|
||||
u8 type;
|
||||
u8 bitfields;
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
|
||||
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
|
||||
__le16 pkt_len;
|
||||
struct parsing_and_err_flags pars_flags;
|
||||
__le16 vlan_tag;
|
||||
__le32 rss_hash;
|
||||
__le16 len_on_first_bd;
|
||||
u8 placement_offset;
|
||||
struct eth_tunnel_parsing_flags tunnel_pars_flags;
|
||||
u8 bd_num;
|
||||
u8 reserved;
|
||||
__le16 reserved2;
|
||||
__le32 flow_id_or_resource_id;
|
||||
u8 reserved1[7];
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
/* TPA-continue ETH Rx FP CQE */
|
||||
struct eth_fast_path_rx_tpa_cont_cqe {
|
||||
u8 type;
|
||||
u8 tpa_agg_index;
|
||||
__le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
|
||||
u8 reserved;
|
||||
u8 reserved1;
|
||||
__le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
|
||||
u8 reserved3[3];
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
/* TPA-end ETH Rx FP CQE */
|
||||
struct eth_fast_path_rx_tpa_end_cqe {
|
||||
u8 type;
|
||||
u8 tpa_agg_index;
|
||||
__le16 total_packet_len;
|
||||
u8 num_of_bds;
|
||||
u8 end_reason;
|
||||
__le16 num_of_coalesced_segs;
|
||||
__le32 ts_delta;
|
||||
__le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
|
||||
__le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
|
||||
__le16 reserved1;
|
||||
u8 reserved2;
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
/* TPA-start ETH Rx FP CQE */
|
||||
struct eth_fast_path_rx_tpa_start_cqe {
|
||||
u8 type;
|
||||
u8 bitfields;
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
|
||||
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
|
||||
__le16 seg_len;
|
||||
struct parsing_and_err_flags pars_flags;
|
||||
__le16 vlan_tag;
|
||||
__le32 rss_hash;
|
||||
__le16 len_on_first_bd;
|
||||
u8 placement_offset;
|
||||
struct eth_tunnel_parsing_flags tunnel_pars_flags;
|
||||
u8 tpa_agg_index;
|
||||
u8 header_len;
|
||||
__le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE];
|
||||
__le16 reserved2;
|
||||
__le32 flow_id_or_resource_id;
|
||||
u8 reserved[3];
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
/* The L4 pseudo checksum mode for Ethernet */
|
||||
enum eth_l4_pseudo_checksum_mode {
|
||||
ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH,
|
||||
ETH_L4_PSEUDO_CSUM_ZERO_LENGTH,
|
||||
MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
|
||||
};
|
||||
|
||||
struct eth_rx_bd {
|
||||
struct regpair addr;
|
||||
};
|
||||
|
||||
/* Regular ETH Rx SP CQE */
|
||||
struct eth_slow_path_rx_cqe {
|
||||
u8 type;
|
||||
u8 ramrod_cmd_id;
|
||||
u8 error_flag;
|
||||
u8 reserved[25];
|
||||
__le16 echo;
|
||||
u8 reserved1;
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
/* Union for all ETH Rx CQE types */
|
||||
union eth_rx_cqe {
|
||||
struct eth_fast_path_rx_reg_cqe fast_path_regular;
|
||||
struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
|
||||
struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
|
||||
struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
|
||||
struct eth_slow_path_rx_cqe slow_path;
|
||||
};
|
||||
|
||||
/* ETH Rx CQE type */
|
||||
enum eth_rx_cqe_type {
|
||||
ETH_RX_CQE_TYPE_UNUSED,
|
||||
ETH_RX_CQE_TYPE_REGULAR,
|
||||
ETH_RX_CQE_TYPE_SLOW_PATH,
|
||||
ETH_RX_CQE_TYPE_TPA_START,
|
||||
ETH_RX_CQE_TYPE_TPA_CONT,
|
||||
ETH_RX_CQE_TYPE_TPA_END,
|
||||
MAX_ETH_RX_CQE_TYPE
|
||||
};
|
||||
|
||||
struct eth_rx_pmd_cqe {
|
||||
union eth_rx_cqe cqe;
|
||||
u8 reserved[ETH_RX_CQE_GAP];
|
||||
};
|
||||
|
||||
enum eth_rx_tunn_type {
|
||||
ETH_RX_NO_TUNN,
|
||||
ETH_RX_TUNN_GENEVE,
|
||||
ETH_RX_TUNN_GRE,
|
||||
ETH_RX_TUNN_VXLAN,
|
||||
MAX_ETH_RX_TUNN_TYPE
|
||||
};
|
||||
|
||||
/* Aggregation end reason. */
|
||||
enum eth_tpa_end_reason {
|
||||
ETH_AGG_END_UNUSED,
|
||||
ETH_AGG_END_SP_UPDATE,
|
||||
ETH_AGG_END_MAX_LEN,
|
||||
ETH_AGG_END_LAST_SEG,
|
||||
ETH_AGG_END_TIMEOUT,
|
||||
ETH_AGG_END_NOT_CONSISTENT,
|
||||
ETH_AGG_END_OUT_OF_ORDER,
|
||||
ETH_AGG_END_NON_TPA_SEG,
|
||||
MAX_ETH_TPA_END_REASON
|
||||
};
|
||||
|
||||
/* The first tx bd of a given packet */
|
||||
struct eth_tx_1st_bd {
|
||||
struct regpair addr;
|
||||
__le16 nbytes;
|
||||
struct eth_tx_data_1st_bd data;
|
||||
};
|
||||
|
||||
/* The second tx bd of a given packet */
|
||||
struct eth_tx_2nd_bd {
|
||||
struct regpair addr;
|
||||
__le16 nbytes;
|
||||
struct eth_tx_data_2nd_bd data;
|
||||
};
|
||||
|
||||
/* The parsing information data for the third tx bd of a given packet */
|
||||
struct eth_tx_data_3rd_bd {
|
||||
__le16 lso_mss;
|
||||
__le16 bitfields;
|
||||
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
|
||||
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
|
||||
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
|
||||
#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
|
||||
#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
|
||||
#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
|
||||
#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
|
||||
#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
|
||||
u8 tunn_l4_hdr_start_offset_w;
|
||||
u8 tunn_hdr_size_w;
|
||||
};
|
||||
|
||||
/* The third tx bd of a given packet */
|
||||
struct eth_tx_3rd_bd {
|
||||
struct regpair addr;
|
||||
__le16 nbytes;
|
||||
struct eth_tx_data_3rd_bd data;
|
||||
};
|
||||
|
||||
/* The parsing information data for the forth tx bd of a given packet. */
|
||||
struct eth_tx_data_4th_bd {
|
||||
u8 dst_vport_id;
|
||||
u8 reserved4;
|
||||
__le16 bitfields;
|
||||
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1
|
||||
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0
|
||||
#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F
|
||||
#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1
|
||||
#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1
|
||||
#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8
|
||||
#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F
|
||||
#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9
|
||||
__le16 reserved3;
|
||||
};
|
||||
|
||||
/* The forth tx bd of a given packet */
|
||||
struct eth_tx_4th_bd {
|
||||
struct regpair addr; /* Single continuous buffer */
|
||||
__le16 nbytes; /* Number of bytes in this BD */
|
||||
struct eth_tx_data_4th_bd data; /* Parsing information data */
|
||||
};
|
||||
|
||||
/* Complementary information for the regular tx bd of a given packet */
|
||||
struct eth_tx_data_bd {
|
||||
__le16 reserved0;
|
||||
__le16 bitfields;
|
||||
#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
|
||||
#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
|
||||
#define ETH_TX_DATA_BD_START_BD_MASK 0x1
|
||||
#define ETH_TX_DATA_BD_START_BD_SHIFT 8
|
||||
#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
|
||||
#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
|
||||
__le16 reserved3;
|
||||
};
|
||||
|
||||
/* The common non-special TX BD ring element */
|
||||
struct eth_tx_bd {
|
||||
struct regpair addr;
|
||||
__le16 nbytes;
|
||||
struct eth_tx_data_bd data;
|
||||
};
|
||||
|
||||
union eth_tx_bd_types {
|
||||
struct eth_tx_1st_bd first_bd;
|
||||
struct eth_tx_2nd_bd second_bd;
|
||||
struct eth_tx_3rd_bd third_bd;
|
||||
struct eth_tx_4th_bd fourth_bd;
|
||||
struct eth_tx_bd reg_bd;
|
||||
};
|
||||
|
||||
/* Mstorm Queue Zone */
|
||||
enum eth_tx_tunn_type {
|
||||
ETH_TX_TUNN_GENEVE,
|
||||
ETH_TX_TUNN_TTAG,
|
||||
ETH_TX_TUNN_GRE,
|
||||
ETH_TX_TUNN_VXLAN,
|
||||
MAX_ETH_TX_TUNN_TYPE
|
||||
};
|
||||
|
||||
/* Mstorm Queue Zone */
|
||||
struct mstorm_eth_queue_zone {
|
||||
struct eth_rx_prod_data rx_producers;
|
||||
__le32 reserved[3];
|
||||
};
|
||||
|
||||
/* Ystorm Queue Zone */
|
||||
struct xstorm_eth_queue_zone {
|
||||
struct coalescing_timeset int_coalescing_timeset;
|
||||
u8 reserved[7];
|
||||
};
|
||||
|
||||
/* ETH doorbell data */
|
||||
struct eth_db_data {
|
||||
u8 params;
|
||||
#define ETH_DB_DATA_DEST_MASK 0x3
|
||||
#define ETH_DB_DATA_DEST_SHIFT 0
|
||||
#define ETH_DB_DATA_AGG_CMD_MASK 0x3
|
||||
#define ETH_DB_DATA_AGG_CMD_SHIFT 2
|
||||
#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
|
||||
#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
|
||||
#define ETH_DB_DATA_RESERVED_MASK 0x1
|
||||
#define ETH_DB_DATA_RESERVED_SHIFT 5
|
||||
#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
|
||||
#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
|
||||
u8 agg_flags;
|
||||
__le16 bd_prod;
|
||||
};
|
||||
|
||||
/* RSS hash type */
|
||||
enum rss_hash_type {
|
||||
RSS_HASH_TYPE_DEFAULT = 0,
|
||||
RSS_HASH_TYPE_IPV4 = 1,
|
||||
RSS_HASH_TYPE_TCP_IPV4 = 2,
|
||||
RSS_HASH_TYPE_IPV6 = 3,
|
||||
RSS_HASH_TYPE_TCP_IPV6 = 4,
|
||||
RSS_HASH_TYPE_UDP_IPV4 = 5,
|
||||
RSS_HASH_TYPE_UDP_IPV6 = 6,
|
||||
MAX_RSS_HASH_TYPE
|
||||
};
|
||||
|
||||
#endif /* __ETH_COMMON__ */
|
742
linux-6.8.1/include/linux/qed/fcoe_common.h
Normal file
742
linux-6.8.1/include/linux/qed/fcoe_common.h
Normal file
|
@ -0,0 +1,742 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __FCOE_COMMON__
|
||||
#define __FCOE_COMMON__
|
||||
|
||||
/*********************/
|
||||
/* FCOE FW CONSTANTS */
|
||||
/*********************/
|
||||
|
||||
#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
|
||||
|
||||
/* The fcoe storm task context protection-information of Ystorm */
|
||||
struct protection_info_ctx {
|
||||
__le16 flags;
|
||||
#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
|
||||
#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
|
||||
#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
|
||||
#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
|
||||
#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
|
||||
#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
|
||||
#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
|
||||
#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
|
||||
#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
|
||||
#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
|
||||
#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
|
||||
#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
|
||||
u8 dix_block_size;
|
||||
u8 dst_size;
|
||||
};
|
||||
|
||||
/* The fcoe storm task context protection-information of Ystorm */
|
||||
union protection_info_union_ctx {
|
||||
struct protection_info_ctx info;
|
||||
__le32 value;
|
||||
};
|
||||
|
||||
/* FCP CMD payload */
|
||||
struct fcoe_fcp_cmd_payload {
|
||||
__le32 opaque[8];
|
||||
};
|
||||
|
||||
/* FCP RSP payload */
|
||||
struct fcoe_fcp_rsp_payload {
|
||||
__le32 opaque[6];
|
||||
};
|
||||
|
||||
/* FCP RSP payload */
|
||||
struct fcp_rsp_payload_padded {
|
||||
struct fcoe_fcp_rsp_payload rsp_payload;
|
||||
__le32 reserved[2];
|
||||
};
|
||||
|
||||
/* FCP RSP payload */
|
||||
struct fcoe_fcp_xfer_payload {
|
||||
__le32 opaque[3];
|
||||
};
|
||||
|
||||
/* FCP RSP payload */
|
||||
struct fcp_xfer_payload_padded {
|
||||
struct fcoe_fcp_xfer_payload xfer_payload;
|
||||
__le32 reserved[5];
|
||||
};
|
||||
|
||||
/* Task params */
|
||||
struct fcoe_tx_data_params {
|
||||
__le32 data_offset;
|
||||
__le32 offset_in_io;
|
||||
u8 flags;
|
||||
#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
|
||||
#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
|
||||
#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
|
||||
#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
|
||||
#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
|
||||
#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
|
||||
#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
|
||||
#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
|
||||
u8 dif_residual;
|
||||
__le16 seq_cnt;
|
||||
__le16 single_sge_saved_offset;
|
||||
__le16 next_dif_offset;
|
||||
__le16 seq_id;
|
||||
__le16 reserved3;
|
||||
};
|
||||
|
||||
/* Middle path parameters: FC header fields provided by the driver */
|
||||
struct fcoe_tx_mid_path_params {
|
||||
__le32 parameter;
|
||||
u8 r_ctl;
|
||||
u8 type;
|
||||
u8 cs_ctl;
|
||||
u8 df_ctl;
|
||||
__le16 rx_id;
|
||||
__le16 ox_id;
|
||||
};
|
||||
|
||||
/* Task params */
|
||||
struct fcoe_tx_params {
|
||||
struct fcoe_tx_data_params data;
|
||||
struct fcoe_tx_mid_path_params mid_path;
|
||||
};
|
||||
|
||||
/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */
|
||||
union fcoe_tx_info_union_ctx {
|
||||
struct fcoe_fcp_cmd_payload fcp_cmd_payload;
|
||||
struct fcp_rsp_payload_padded fcp_rsp_payload;
|
||||
struct fcp_xfer_payload_padded fcp_xfer_payload;
|
||||
struct fcoe_tx_params tx_params;
|
||||
};
|
||||
|
||||
/* Data sgl */
|
||||
struct fcoe_slow_sgl_ctx {
|
||||
struct regpair base_sgl_addr;
|
||||
__le16 curr_sge_off;
|
||||
__le16 remainder_num_sges;
|
||||
__le16 curr_sgl_index;
|
||||
__le16 reserved;
|
||||
};
|
||||
|
||||
/* Union of DIX SGL \ cached DIX sges */
|
||||
union fcoe_dix_desc_ctx {
|
||||
struct fcoe_slow_sgl_ctx dix_sgl;
|
||||
struct scsi_sge cached_dix_sge;
|
||||
};
|
||||
|
||||
/* The fcoe storm task context of Ystorm */
|
||||
struct ystorm_fcoe_task_st_ctx {
|
||||
u8 task_type;
|
||||
u8 sgl_mode;
|
||||
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
|
||||
#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
|
||||
#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
|
||||
u8 cached_dix_sge;
|
||||
u8 expect_first_xfer;
|
||||
__le32 num_pbf_zero_write;
|
||||
union protection_info_union_ctx protection_info_union;
|
||||
__le32 data_2_trns_rem;
|
||||
struct scsi_sgl_params sgl_params;
|
||||
u8 reserved1[12];
|
||||
union fcoe_tx_info_union_ctx tx_info_union;
|
||||
union fcoe_dix_desc_ctx dix_desc;
|
||||
struct scsi_cached_sges data_desc;
|
||||
__le16 ox_id;
|
||||
__le16 rx_id;
|
||||
__le32 task_rety_identifier;
|
||||
u8 reserved2[8];
|
||||
};
|
||||
|
||||
struct ystorm_fcoe_task_ag_ctx {
|
||||
u8 byte0;
|
||||
u8 byte1;
|
||||
__le16 word0;
|
||||
u8 flags0;
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
|
||||
u8 flags1;
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
|
||||
u8 flags2;
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
|
||||
#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
|
||||
u8 byte2;
|
||||
__le32 reg0;
|
||||
u8 byte3;
|
||||
u8 byte4;
|
||||
__le16 rx_id;
|
||||
__le16 word2;
|
||||
__le16 word3;
|
||||
__le16 word4;
|
||||
__le16 word5;
|
||||
__le32 reg1;
|
||||
__le32 reg2;
|
||||
};
|
||||
|
||||
struct tstorm_fcoe_task_ag_ctx {
|
||||
u8 reserved;
|
||||
u8 byte1;
|
||||
__le16 icid;
|
||||
u8 flags0;
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
|
||||
u8 flags1;
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
|
||||
u8 flags2;
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
|
||||
u8 flags3;
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
|
||||
u8 flags4;
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
|
||||
#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
|
||||
u8 cleanup_state;
|
||||
__le16 last_sent_tid;
|
||||
__le32 rec_rr_tov_exp_timeout;
|
||||
u8 byte3;
|
||||
u8 byte4;
|
||||
__le16 word2;
|
||||
__le16 word3;
|
||||
__le16 word4;
|
||||
__le32 data_offset_end_of_seq;
|
||||
__le32 data_offset_next;
|
||||
};
|
||||
|
||||
/* Cached data sges */
|
||||
struct fcoe_exp_ro {
|
||||
__le32 data_offset;
|
||||
__le32 reserved;
|
||||
};
|
||||
|
||||
/* Union of Cleanup address \ expected relative offsets */
|
||||
union fcoe_cleanup_addr_exp_ro_union {
|
||||
struct regpair abts_rsp_fc_payload_hi;
|
||||
struct fcoe_exp_ro exp_ro;
|
||||
};
|
||||
|
||||
/* Fields coppied from ABTSrsp pckt */
|
||||
struct fcoe_abts_pkt {
|
||||
__le32 abts_rsp_fc_payload_lo;
|
||||
__le16 abts_rsp_rx_id;
|
||||
u8 abts_rsp_rctl;
|
||||
u8 reserved2;
|
||||
};
|
||||
|
||||
/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */
|
||||
struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
|
||||
union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
|
||||
__le16 flags;
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
|
||||
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
|
||||
__le16 seq_cnt;
|
||||
u8 seq_id;
|
||||
u8 ooo_rx_seq_id;
|
||||
__le16 rx_id;
|
||||
struct fcoe_abts_pkt abts_data;
|
||||
__le32 e_d_tov_exp_timeout_val;
|
||||
__le16 ooo_rx_seq_cnt;
|
||||
__le16 reserved1;
|
||||
};
|
||||
|
||||
/* FW read only part The fcoe task storm context of Tstorm */
|
||||
struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
|
||||
u8 task_type;
|
||||
u8 dev_type;
|
||||
u8 conf_supported;
|
||||
u8 glbl_q_num;
|
||||
__le32 cid;
|
||||
__le32 fcp_cmd_trns_size;
|
||||
__le32 rsrv;
|
||||
};
|
||||
|
||||
/** The fcoe task storm context of Tstorm */
|
||||
struct tstorm_fcoe_task_st_ctx {
|
||||
struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
|
||||
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
|
||||
};
|
||||
|
||||
struct mstorm_fcoe_task_ag_ctx {
|
||||
u8 byte0;
|
||||
u8 byte1;
|
||||
__le16 icid;
|
||||
u8 flags0;
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
|
||||
u8 flags1;
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
|
||||
u8 flags2;
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
|
||||
u8 cleanup_state;
|
||||
__le32 received_bytes;
|
||||
u8 byte3;
|
||||
u8 glbl_q_num;
|
||||
__le16 word1;
|
||||
__le16 tid_to_xfer;
|
||||
__le16 word3;
|
||||
__le16 word4;
|
||||
__le16 word5;
|
||||
__le32 expected_bytes;
|
||||
__le32 reg2;
|
||||
};
|
||||
|
||||
/* The fcoe task storm context of Mstorm */
|
||||
struct mstorm_fcoe_task_st_ctx {
|
||||
struct regpair rsp_buf_addr;
|
||||
__le32 rsrv[2];
|
||||
struct scsi_sgl_params sgl_params;
|
||||
__le32 data_2_trns_rem;
|
||||
__le32 data_buffer_offset;
|
||||
__le16 parent_id;
|
||||
__le16 flags;
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
|
||||
#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
|
||||
struct scsi_cached_sges data_desc;
|
||||
};
|
||||
|
||||
struct ustorm_fcoe_task_ag_ctx {
|
||||
u8 reserved;
|
||||
u8 byte1;
|
||||
__le16 icid;
|
||||
u8 flags0;
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
|
||||
#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
|
||||
#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
|
||||
u8 flags1;
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
|
||||
#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
|
||||
#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
|
||||
u8 flags2;
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
|
||||
#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
|
||||
u8 flags3;
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
|
||||
#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
|
||||
#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
|
||||
#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
|
||||
__le32 dif_err_intervals;
|
||||
__le32 dif_error_1st_interval;
|
||||
__le32 global_cq_num;
|
||||
__le32 reg3;
|
||||
__le32 reg4;
|
||||
__le32 reg5;
|
||||
};
|
||||
|
||||
/* FCoE task context */
|
||||
struct fcoe_task_context {
|
||||
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
|
||||
struct regpair ystorm_st_padding[2];
|
||||
struct tdif_task_context tdif_context;
|
||||
struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
|
||||
struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
|
||||
struct timers_context timer_context;
|
||||
struct tstorm_fcoe_task_st_ctx tstorm_st_context;
|
||||
struct regpair tstorm_st_padding[2];
|
||||
struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
|
||||
struct mstorm_fcoe_task_st_ctx mstorm_st_context;
|
||||
struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
|
||||
struct rdif_task_context rdif_context;
|
||||
};
|
||||
|
||||
/* FCoE additional WQE (Sq/XferQ) information */
|
||||
union fcoe_additional_info_union {
|
||||
__le32 previous_tid;
|
||||
__le32 parent_tid;
|
||||
__le32 burst_length;
|
||||
__le32 seq_rec_updated_offset;
|
||||
};
|
||||
|
||||
/* FCoE Ramrod Command IDs */
|
||||
enum fcoe_completion_status {
|
||||
FCOE_COMPLETION_STATUS_SUCCESS,
|
||||
FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
|
||||
FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
|
||||
MAX_FCOE_COMPLETION_STATUS
|
||||
};
|
||||
|
||||
/* FC address (SID/DID) network presentation */
|
||||
struct fc_addr_nw {
|
||||
u8 addr_lo;
|
||||
u8 addr_mid;
|
||||
u8 addr_hi;
|
||||
};
|
||||
|
||||
/* FCoE connection offload */
|
||||
struct fcoe_conn_offload_ramrod_data {
|
||||
struct regpair sq_pbl_addr;
|
||||
struct regpair sq_curr_page_addr;
|
||||
struct regpair sq_next_page_addr;
|
||||
struct regpair xferq_pbl_addr;
|
||||
struct regpair xferq_curr_page_addr;
|
||||
struct regpair xferq_next_page_addr;
|
||||
struct regpair respq_pbl_addr;
|
||||
struct regpair respq_curr_page_addr;
|
||||
struct regpair respq_next_page_addr;
|
||||
__le16 dst_mac_addr_lo;
|
||||
__le16 dst_mac_addr_mid;
|
||||
__le16 dst_mac_addr_hi;
|
||||
__le16 src_mac_addr_lo;
|
||||
__le16 src_mac_addr_mid;
|
||||
__le16 src_mac_addr_hi;
|
||||
__le16 tx_max_fc_pay_len;
|
||||
__le16 e_d_tov_timer_val;
|
||||
__le16 rx_max_fc_pay_len;
|
||||
__le16 vlan_tag;
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
|
||||
__le16 physical_q0;
|
||||
__le16 rec_rr_tov_timer_val;
|
||||
struct fc_addr_nw s_id;
|
||||
u8 max_conc_seqs_c3;
|
||||
struct fc_addr_nw d_id;
|
||||
u8 flags;
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7
|
||||
__le16 conn_id;
|
||||
u8 def_q_idx;
|
||||
u8 reserved[5];
|
||||
};
|
||||
|
||||
/* FCoE terminate connection request */
|
||||
struct fcoe_conn_terminate_ramrod_data {
|
||||
struct regpair terminate_params_addr;
|
||||
};
|
||||
|
||||
/* FCoE device type */
|
||||
enum fcoe_device_type {
|
||||
FCOE_TASK_DEV_TYPE_DISK,
|
||||
FCOE_TASK_DEV_TYPE_TAPE,
|
||||
MAX_FCOE_DEVICE_TYPE
|
||||
};
|
||||
|
||||
/* Data sgl */
|
||||
struct fcoe_fast_sgl_ctx {
|
||||
struct regpair sgl_start_addr;
|
||||
__le32 sgl_byte_offset;
|
||||
__le16 task_reuse_cnt;
|
||||
__le16 init_offset_in_first_sge;
|
||||
};
|
||||
|
||||
/* FCoE firmware function init */
|
||||
struct fcoe_init_func_ramrod_data {
|
||||
struct scsi_init_func_params func_params;
|
||||
struct scsi_init_func_queues q_params;
|
||||
__le16 mtu;
|
||||
__le16 sq_num_pages_in_pbl;
|
||||
__le32 reserved[3];
|
||||
};
|
||||
|
||||
/* FCoE: Mode of the connection: Target or Initiator or both */
|
||||
enum fcoe_mode_type {
|
||||
FCOE_INITIATOR_MODE = 0x0,
|
||||
FCOE_TARGET_MODE = 0x1,
|
||||
FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
|
||||
MAX_FCOE_MODE_TYPE
|
||||
};
|
||||
|
||||
/* Per PF FCoE receive path statistics - tStorm RAM structure */
|
||||
struct fcoe_rx_stat {
|
||||
struct regpair fcoe_rx_byte_cnt;
|
||||
struct regpair fcoe_rx_data_pkt_cnt;
|
||||
struct regpair fcoe_rx_xfer_pkt_cnt;
|
||||
struct regpair fcoe_rx_other_pkt_cnt;
|
||||
__le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
|
||||
__le32 fcoe_silent_drop_pkt_rq_full_cnt;
|
||||
__le32 fcoe_silent_drop_pkt_crc_error_cnt;
|
||||
__le32 fcoe_silent_drop_pkt_task_invalid_cnt;
|
||||
__le32 fcoe_silent_drop_total_pkt_cnt;
|
||||
__le32 rsrv;
|
||||
};
|
||||
|
||||
/* FCoE SQE request type */
|
||||
enum fcoe_sqe_request_type {
|
||||
SEND_FCOE_CMD,
|
||||
SEND_FCOE_MIDPATH,
|
||||
SEND_FCOE_ABTS_REQUEST,
|
||||
FCOE_EXCHANGE_CLEANUP,
|
||||
FCOE_SEQUENCE_RECOVERY,
|
||||
SEND_FCOE_XFER_RDY,
|
||||
SEND_FCOE_RSP,
|
||||
SEND_FCOE_RSP_WITH_SENSE_DATA,
|
||||
SEND_FCOE_TARGET_DATA,
|
||||
SEND_FCOE_INITIATOR_DATA,
|
||||
SEND_FCOE_XFER_CONTINUATION_RDY,
|
||||
SEND_FCOE_TARGET_ABTS_RSP,
|
||||
MAX_FCOE_SQE_REQUEST_TYPE
|
||||
};
|
||||
|
||||
/* FCoe statistics request */
|
||||
struct fcoe_stat_ramrod_data {
|
||||
struct regpair stat_params_addr;
|
||||
};
|
||||
|
||||
/* FCoE task type */
|
||||
enum fcoe_task_type {
|
||||
FCOE_TASK_TYPE_WRITE_INITIATOR,
|
||||
FCOE_TASK_TYPE_READ_INITIATOR,
|
||||
FCOE_TASK_TYPE_MIDPATH,
|
||||
FCOE_TASK_TYPE_UNSOLICITED,
|
||||
FCOE_TASK_TYPE_ABTS,
|
||||
FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
|
||||
FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
|
||||
FCOE_TASK_TYPE_WRITE_TARGET,
|
||||
FCOE_TASK_TYPE_READ_TARGET,
|
||||
FCOE_TASK_TYPE_RSP,
|
||||
FCOE_TASK_TYPE_RSP_SENSE_DATA,
|
||||
FCOE_TASK_TYPE_ABTS_TARGET,
|
||||
FCOE_TASK_TYPE_ENUM_SIZE,
|
||||
MAX_FCOE_TASK_TYPE
|
||||
};
|
||||
|
||||
/* Per PF FCoE transmit path statistics - pStorm RAM structure */
|
||||
struct fcoe_tx_stat {
|
||||
struct regpair fcoe_tx_byte_cnt;
|
||||
struct regpair fcoe_tx_data_pkt_cnt;
|
||||
struct regpair fcoe_tx_xfer_pkt_cnt;
|
||||
struct regpair fcoe_tx_other_pkt_cnt;
|
||||
};
|
||||
|
||||
/* FCoE SQ/XferQ element */
|
||||
struct fcoe_wqe {
|
||||
__le16 task_id;
|
||||
__le16 flags;
|
||||
#define FCOE_WQE_REQ_TYPE_MASK 0xF
|
||||
#define FCOE_WQE_REQ_TYPE_SHIFT 0
|
||||
#define FCOE_WQE_SGL_MODE_MASK 0x1
|
||||
#define FCOE_WQE_SGL_MODE_SHIFT 4
|
||||
#define FCOE_WQE_CONTINUATION_MASK 0x1
|
||||
#define FCOE_WQE_CONTINUATION_SHIFT 5
|
||||
#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
|
||||
#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
|
||||
#define FCOE_WQE_RESERVED_MASK 0x1
|
||||
#define FCOE_WQE_RESERVED_SHIFT 7
|
||||
#define FCOE_WQE_NUM_SGES_MASK 0xF
|
||||
#define FCOE_WQE_NUM_SGES_SHIFT 8
|
||||
#define FCOE_WQE_RESERVED1_MASK 0xF
|
||||
#define FCOE_WQE_RESERVED1_SHIFT 12
|
||||
union fcoe_additional_info_union additional_info_union;
|
||||
};
|
||||
|
||||
/* FCoE XFRQ element */
|
||||
struct xfrqe_prot_flags {
|
||||
u8 flags;
|
||||
#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
|
||||
#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
|
||||
#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
|
||||
#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
|
||||
#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
|
||||
#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
|
||||
#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
|
||||
#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
|
||||
};
|
||||
|
||||
/* FCoE doorbell data */
|
||||
struct fcoe_db_data {
|
||||
u8 params;
|
||||
#define FCOE_DB_DATA_DEST_MASK 0x3
|
||||
#define FCOE_DB_DATA_DEST_SHIFT 0
|
||||
#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
|
||||
#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
|
||||
#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
|
||||
#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
|
||||
#define FCOE_DB_DATA_RESERVED_MASK 0x1
|
||||
#define FCOE_DB_DATA_RESERVED_SHIFT 5
|
||||
#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
|
||||
#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
|
||||
u8 agg_flags;
|
||||
__le16 sq_prod;
|
||||
};
|
||||
|
||||
#endif /* __FCOE_COMMON__ */
|
1530
linux-6.8.1/include/linux/qed/iscsi_common.h
Normal file
1530
linux-6.8.1/include/linux/qed/iscsi_common.h
Normal file
File diff suppressed because it is too large
Load diff
30
linux-6.8.1/include/linux/qed/iwarp_common.h
Normal file
30
linux-6.8.1/include/linux/qed/iwarp_common.h
Normal file
|
@ -0,0 +1,30 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __IWARP_COMMON__
|
||||
#define __IWARP_COMMON__
|
||||
|
||||
#include <linux/qed/rdma_common.h>
|
||||
|
||||
/************************/
|
||||
/* IWARP FW CONSTANTS */
|
||||
/************************/
|
||||
|
||||
#define IWARP_ACTIVE_MODE 0
|
||||
#define IWARP_PASSIVE_MODE 1
|
||||
|
||||
#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000)
|
||||
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
|
||||
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
|
||||
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
|
||||
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
|
||||
|
||||
#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
|
||||
#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
|
||||
|
||||
#define IWARP_MAX_QPS (64 * 1024)
|
||||
|
||||
#endif /* __IWARP_COMMON__ */
|
531
linux-6.8.1/include/linux/qed/nvmetcp_common.h
Normal file
531
linux-6.8.1/include/linux/qed/nvmetcp_common.h
Normal file
|
@ -0,0 +1,531 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* Copyright 2021 Marvell. All rights reserved. */
|
||||
|
||||
#ifndef __NVMETCP_COMMON__
|
||||
#define __NVMETCP_COMMON__
|
||||
|
||||
#include "tcp_common.h"
|
||||
#include <linux/nvme-tcp.h>
|
||||
|
||||
#define NVMETCP_SLOW_PATH_LAYER_CODE (6)
|
||||
#define NVMETCP_WQE_NUM_SGES_SLOWIO (0xf)
|
||||
|
||||
/* NVMeTCP firmware function init parameters */
|
||||
struct nvmetcp_spe_func_init {
|
||||
__le16 half_way_close_timeout;
|
||||
u8 num_sq_pages_in_ring;
|
||||
u8 num_r2tq_pages_in_ring;
|
||||
u8 num_uhq_pages_in_ring;
|
||||
u8 ll2_rx_queue_id;
|
||||
u8 flags;
|
||||
#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
|
||||
#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
|
||||
#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_MASK 0x1
|
||||
#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_SHIFT 1
|
||||
#define NVMETCP_SPE_FUNC_INIT_RESERVED0_MASK 0x3F
|
||||
#define NVMETCP_SPE_FUNC_INIT_RESERVED0_SHIFT 2
|
||||
u8 debug_flags;
|
||||
__le16 reserved1;
|
||||
u8 params;
|
||||
#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
|
||||
#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
|
||||
#define NVMETCP_SPE_FUNC_INIT_RESERVED1_MASK 0xF
|
||||
#define NVMETCP_SPE_FUNC_INIT_RESERVED1_SHIFT 4
|
||||
u8 reserved2[5];
|
||||
struct scsi_init_func_params func_params;
|
||||
struct scsi_init_func_queues q_params;
|
||||
};
|
||||
|
||||
/* NVMeTCP init params passed by driver to FW in NVMeTCP init ramrod. */
|
||||
struct nvmetcp_init_ramrod_params {
|
||||
struct nvmetcp_spe_func_init nvmetcp_init_spe;
|
||||
struct tcp_init_params tcp_init;
|
||||
};
|
||||
|
||||
/* NVMeTCP Ramrod Command IDs */
|
||||
enum nvmetcp_ramrod_cmd_id {
|
||||
NVMETCP_RAMROD_CMD_ID_UNUSED = 0,
|
||||
NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1,
|
||||
NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2,
|
||||
NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
|
||||
NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4,
|
||||
NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5,
|
||||
NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6,
|
||||
MAX_NVMETCP_RAMROD_CMD_ID
|
||||
};
|
||||
|
||||
struct nvmetcp_glbl_queue_entry {
|
||||
struct regpair cq_pbl_addr;
|
||||
struct regpair reserved;
|
||||
};
|
||||
|
||||
/* NVMeTCP conn level EQEs */
|
||||
enum nvmetcp_eqe_opcode {
|
||||
NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */
|
||||
NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */
|
||||
NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */
|
||||
NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */
|
||||
NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */
|
||||
NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */
|
||||
NVMETCP_EVENT_TYPE_RESERVED0,
|
||||
NVMETCP_EVENT_TYPE_RESERVED1,
|
||||
NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */
|
||||
NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */
|
||||
NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */
|
||||
MAX_NVMETCP_EQE_OPCODE
|
||||
};
|
||||
|
||||
struct nvmetcp_conn_offload_section {
|
||||
struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */
|
||||
__le16 cccid_max_range; /* CCCID max value - used for validation */
|
||||
__le16 reserved[3];
|
||||
};
|
||||
|
||||
/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */
|
||||
struct nvmetcp_conn_offload_params {
|
||||
struct regpair sq_pbl_addr;
|
||||
struct regpair r2tq_pbl_addr;
|
||||
struct regpair xhq_pbl_addr;
|
||||
struct regpair uhq_pbl_addr;
|
||||
__le16 physical_q0;
|
||||
__le16 physical_q1;
|
||||
u8 flags;
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF
|
||||
#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4
|
||||
u8 default_cq;
|
||||
__le16 reserved0;
|
||||
__le32 reserved1;
|
||||
__le32 initial_ack;
|
||||
|
||||
struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */
|
||||
};
|
||||
|
||||
/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */
|
||||
struct nvmetcp_spe_conn_offload {
|
||||
__le16 reserved;
|
||||
__le16 conn_id;
|
||||
__le32 fw_cid;
|
||||
struct nvmetcp_conn_offload_params nvmetcp;
|
||||
struct tcp_offload_params_opt2 tcp;
|
||||
};
|
||||
|
||||
/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */
|
||||
struct nvmetcp_conn_update_ramrod_params {
|
||||
__le16 reserved0;
|
||||
__le16 conn_id;
|
||||
__le32 reserved1;
|
||||
u8 flags;
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1
|
||||
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7
|
||||
u8 reserved3[3];
|
||||
__le32 max_seq_size;
|
||||
__le32 max_send_pdu_length;
|
||||
__le32 max_recv_pdu_length;
|
||||
__le32 first_seq_length;
|
||||
__le32 reserved4[5];
|
||||
};
|
||||
|
||||
/* NVMeTCP connection termination request */
|
||||
struct nvmetcp_spe_conn_termination {
|
||||
__le16 reserved0;
|
||||
__le16 conn_id;
|
||||
__le32 reserved1;
|
||||
u8 abortive;
|
||||
u8 reserved2[7];
|
||||
struct regpair reserved3;
|
||||
struct regpair reserved4;
|
||||
};
|
||||
|
||||
struct nvmetcp_dif_flags {
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
enum nvmetcp_wqe_type {
|
||||
NVMETCP_WQE_TYPE_NORMAL,
|
||||
NVMETCP_WQE_TYPE_TASK_CLEANUP,
|
||||
NVMETCP_WQE_TYPE_MIDDLE_PATH,
|
||||
NVMETCP_WQE_TYPE_IC,
|
||||
MAX_NVMETCP_WQE_TYPE
|
||||
};
|
||||
|
||||
struct nvmetcp_wqe {
|
||||
__le16 task_id;
|
||||
u8 flags;
|
||||
#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */
|
||||
#define NVMETCP_WQE_WQE_TYPE_SHIFT 0
|
||||
#define NVMETCP_WQE_NUM_SGES_MASK 0xF
|
||||
#define NVMETCP_WQE_NUM_SGES_SHIFT 3
|
||||
#define NVMETCP_WQE_RESPONSE_MASK 0x1
|
||||
#define NVMETCP_WQE_RESPONSE_SHIFT 7
|
||||
struct nvmetcp_dif_flags prot_flags;
|
||||
__le32 contlen_cdbsize;
|
||||
#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF
|
||||
#define NVMETCP_WQE_CONT_LEN_SHIFT 0
|
||||
#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF
|
||||
#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24
|
||||
};
|
||||
|
||||
struct nvmetcp_host_cccid_itid_entry {
|
||||
__le16 itid;
|
||||
};
|
||||
|
||||
struct nvmetcp_connect_done_results {
|
||||
__le16 icid;
|
||||
__le16 conn_id;
|
||||
struct tcp_ulp_connect_done_params params;
|
||||
};
|
||||
|
||||
struct nvmetcp_eqe_data {
|
||||
__le16 icid;
|
||||
__le16 conn_id;
|
||||
__le16 reserved;
|
||||
u8 error_code;
|
||||
u8 error_pdu_opcode_reserved;
|
||||
#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
|
||||
#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
|
||||
#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
|
||||
#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
|
||||
#define NVMETCP_EQE_DATA_RESERVED0_MASK 0x1
|
||||
#define NVMETCP_EQE_DATA_RESERVED0_SHIFT 7
|
||||
};
|
||||
|
||||
enum nvmetcp_task_type {
|
||||
NVMETCP_TASK_TYPE_HOST_WRITE,
|
||||
NVMETCP_TASK_TYPE_HOST_READ,
|
||||
NVMETCP_TASK_TYPE_INIT_CONN_REQUEST,
|
||||
NVMETCP_TASK_TYPE_RESERVED0,
|
||||
NVMETCP_TASK_TYPE_CLEANUP,
|
||||
NVMETCP_TASK_TYPE_HOST_READ_NO_CQE,
|
||||
MAX_NVMETCP_TASK_TYPE
|
||||
};
|
||||
|
||||
struct nvmetcp_db_data {
|
||||
u8 params;
|
||||
#define NVMETCP_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */
|
||||
#define NVMETCP_DB_DATA_DEST_SHIFT 0
|
||||
#define NVMETCP_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
|
||||
#define NVMETCP_DB_DATA_AGG_CMD_SHIFT 2
|
||||
#define NVMETCP_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
|
||||
#define NVMETCP_DB_DATA_BYPASS_EN_SHIFT 4
|
||||
#define NVMETCP_DB_DATA_RESERVED_MASK 0x1
|
||||
#define NVMETCP_DB_DATA_RESERVED_SHIFT 5
|
||||
#define NVMETCP_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */
|
||||
#define NVMETCP_DB_DATA_AGG_VAL_SEL_SHIFT 6
|
||||
u8 agg_flags; /* bit for every DQ counter flags in CM context that DQ can increment */
|
||||
__le16 sq_prod;
|
||||
};
|
||||
|
||||
struct nvmetcp_fw_nvmf_cqe {
|
||||
__le32 reserved[4];
|
||||
};
|
||||
|
||||
struct nvmetcp_icresp_mdata {
|
||||
u8 digest;
|
||||
u8 cpda;
|
||||
__le16 pfv;
|
||||
__le32 maxdata;
|
||||
__le16 rsvd[4];
|
||||
};
|
||||
|
||||
union nvmetcp_fw_cqe_data {
|
||||
struct nvmetcp_fw_nvmf_cqe nvme_cqe;
|
||||
struct nvmetcp_icresp_mdata icresp_mdata;
|
||||
};
|
||||
|
||||
struct nvmetcp_fw_cqe {
|
||||
__le16 conn_id;
|
||||
u8 cqe_type;
|
||||
u8 cqe_error_status_bits;
|
||||
#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
|
||||
#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
|
||||
#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
|
||||
#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
|
||||
#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
|
||||
#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
|
||||
__le16 itid;
|
||||
u8 task_type;
|
||||
u8 fw_dbg_field;
|
||||
u8 caused_conn_err;
|
||||
u8 reserved0[3];
|
||||
__le32 reserved1;
|
||||
union nvmetcp_fw_cqe_data cqe_data;
|
||||
struct regpair task_opaque;
|
||||
__le32 reserved[6];
|
||||
};
|
||||
|
||||
enum nvmetcp_fw_cqes_type {
|
||||
NVMETCP_FW_CQE_TYPE_NORMAL = 1,
|
||||
NVMETCP_FW_CQE_TYPE_RESERVED0,
|
||||
NVMETCP_FW_CQE_TYPE_RESERVED1,
|
||||
NVMETCP_FW_CQE_TYPE_CLEANUP,
|
||||
NVMETCP_FW_CQE_TYPE_DUMMY,
|
||||
MAX_NVMETCP_FW_CQES_TYPE
|
||||
};
|
||||
|
||||
struct ystorm_nvmetcp_task_state {
|
||||
struct scsi_cached_sges data_desc;
|
||||
struct scsi_sgl_params sgl_params;
|
||||
__le32 resrved0;
|
||||
__le32 buffer_offset;
|
||||
__le16 cccid;
|
||||
struct nvmetcp_dif_flags dif_flags;
|
||||
u8 flags;
|
||||
#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_MASK 0x1
|
||||
#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_SHIFT 0
|
||||
#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_MASK 0x1
|
||||
#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_SHIFT 1
|
||||
#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
|
||||
#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
|
||||
#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_MASK 0x1
|
||||
#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_SHIFT 3
|
||||
};
|
||||
|
||||
struct ystorm_nvmetcp_task_rxmit_opt {
|
||||
__le32 reserved[4];
|
||||
};
|
||||
|
||||
struct nvmetcp_task_hdr {
|
||||
__le32 reg[18];
|
||||
};
|
||||
|
||||
struct nvmetcp_task_hdr_aligned {
|
||||
struct nvmetcp_task_hdr task_hdr;
|
||||
__le32 reserved[2]; /* HSI_COMMENT: Align to QREG */
|
||||
};
|
||||
|
||||
struct e5_tdif_task_context {
|
||||
__le32 reserved[16];
|
||||
};
|
||||
|
||||
struct e5_rdif_task_context {
|
||||
__le32 reserved[12];
|
||||
};
|
||||
|
||||
struct ystorm_nvmetcp_task_st_ctx {
|
||||
struct ystorm_nvmetcp_task_state state;
|
||||
struct ystorm_nvmetcp_task_rxmit_opt rxmit_opt;
|
||||
struct nvmetcp_task_hdr_aligned pdu_hdr;
|
||||
};
|
||||
|
||||
struct mstorm_nvmetcp_task_st_ctx {
|
||||
struct scsi_cached_sges data_desc;
|
||||
struct scsi_sgl_params sgl_params;
|
||||
__le32 rem_task_size;
|
||||
__le32 data_buffer_offset;
|
||||
u8 task_type;
|
||||
struct nvmetcp_dif_flags dif_flags;
|
||||
__le16 dif_task_icid;
|
||||
struct regpair reserved0;
|
||||
__le32 expected_itt;
|
||||
__le32 reserved1;
|
||||
};
|
||||
|
||||
struct ustorm_nvmetcp_task_st_ctx {
|
||||
__le32 rem_rcv_len;
|
||||
__le32 exp_data_transfer_len;
|
||||
__le32 exp_data_sn;
|
||||
struct regpair reserved0;
|
||||
__le32 reg1_map;
|
||||
#define REG1_NUM_SGES_MASK 0xF
|
||||
#define REG1_NUM_SGES_SHIFT 0
|
||||
#define REG1_RESERVED1_MASK 0xFFFFFFF
|
||||
#define REG1_RESERVED1_SHIFT 4
|
||||
u8 flags2;
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_SHIFT 0
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_MASK 0x7F
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_SHIFT 1
|
||||
struct nvmetcp_dif_flags dif_flags;
|
||||
__le16 reserved3;
|
||||
__le16 tqe_opaque[2];
|
||||
__le32 reserved5;
|
||||
__le32 nvme_tcp_opaque_lo;
|
||||
__le32 nvme_tcp_opaque_hi;
|
||||
u8 task_type;
|
||||
u8 error_flags;
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_SHIFT 3
|
||||
u8 flags;
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_MASK 0x3
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_SHIFT 0
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
|
||||
#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
|
||||
u8 cq_rss_number;
|
||||
};
|
||||
|
||||
struct e5_ystorm_nvmetcp_task_ag_ctx {
|
||||
u8 reserved /* cdu_validation */;
|
||||
u8 byte1 /* state_and_core_id */;
|
||||
__le16 word0 /* icid */;
|
||||
u8 flags0;
|
||||
u8 flags1;
|
||||
u8 flags2;
|
||||
u8 flags3;
|
||||
__le32 TTT;
|
||||
u8 byte2;
|
||||
u8 byte3;
|
||||
u8 byte4;
|
||||
u8 reserved7;
|
||||
};
|
||||
|
||||
struct e5_mstorm_nvmetcp_task_ag_ctx {
|
||||
u8 cdu_validation;
|
||||
u8 byte1;
|
||||
__le16 task_cid;
|
||||
u8 flags0;
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_MASK 0x1
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_SHIFT 6
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
|
||||
u8 flags1;
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_MASK 0x3
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_SHIFT 2
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_MASK 0x3
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_SHIFT 4
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_MASK 0x1
|
||||
#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_SHIFT 7
|
||||
u8 flags2;
|
||||
u8 flags3;
|
||||
__le32 reg0;
|
||||
u8 byte2;
|
||||
u8 byte3;
|
||||
u8 byte4;
|
||||
u8 reserved7;
|
||||
};
|
||||
|
||||
struct e5_ustorm_nvmetcp_task_ag_ctx {
|
||||
u8 reserved;
|
||||
u8 state_and_core_id;
|
||||
__le16 icid;
|
||||
u8 flags0;
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
|
||||
u8 flags1;
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_MASK 0x3
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_SHIFT 0
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_MASK 0x3
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_SHIFT 2
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_MASK 0x3
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_SHIFT 4
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
|
||||
u8 flags2;
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_SHIFT 3
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_SHIFT 6
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
|
||||
u8 flags3;
|
||||
u8 flags4;
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_MASK 0x3
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_SHIFT 0
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_SHIFT 2
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_MASK 0x1
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_SHIFT 3
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
|
||||
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
|
||||
u8 byte2;
|
||||
u8 byte3;
|
||||
u8 reserved8;
|
||||
__le32 dif_err_intervals;
|
||||
__le32 dif_error_1st_interval;
|
||||
__le32 rcv_cont_len;
|
||||
__le32 exp_cont_len;
|
||||
__le32 total_data_acked;
|
||||
__le32 exp_data_acked;
|
||||
__le16 word1;
|
||||
__le16 next_tid;
|
||||
__le32 hdr_residual_count;
|
||||
__le32 exp_r2t_sn;
|
||||
};
|
||||
|
||||
struct e5_nvmetcp_task_context {
|
||||
struct ystorm_nvmetcp_task_st_ctx ystorm_st_context;
|
||||
struct e5_ystorm_nvmetcp_task_ag_ctx ystorm_ag_context;
|
||||
struct regpair ystorm_ag_padding[2];
|
||||
struct e5_tdif_task_context tdif_context;
|
||||
struct e5_mstorm_nvmetcp_task_ag_ctx mstorm_ag_context;
|
||||
struct regpair mstorm_ag_padding[2];
|
||||
struct e5_ustorm_nvmetcp_task_ag_ctx ustorm_ag_context;
|
||||
struct regpair ustorm_ag_padding[2];
|
||||
struct mstorm_nvmetcp_task_st_ctx mstorm_st_context;
|
||||
struct regpair mstorm_st_padding[2];
|
||||
struct ustorm_nvmetcp_task_st_ctx ustorm_st_context;
|
||||
struct regpair ustorm_st_padding[2];
|
||||
struct e5_rdif_task_context rdif_context;
|
||||
};
|
||||
|
||||
#endif /* __NVMETCP_COMMON__*/
|
638
linux-6.8.1/include/linux/qed/qed_chain.h
Normal file
638
linux-6.8.1/include/linux/qed/qed_chain.h
Normal file
|
@ -0,0 +1,638 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef _QED_CHAIN_H
|
||||
#define _QED_CHAIN_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/qed/common_hsi.h>
|
||||
|
||||
enum qed_chain_mode {
|
||||
/* Each Page contains a next pointer at its end */
|
||||
QED_CHAIN_MODE_NEXT_PTR,
|
||||
|
||||
/* Chain is a single page (next ptr) is not required */
|
||||
QED_CHAIN_MODE_SINGLE,
|
||||
|
||||
/* Page pointers are located in a side list */
|
||||
QED_CHAIN_MODE_PBL,
|
||||
};
|
||||
|
||||
enum qed_chain_use_mode {
|
||||
QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
|
||||
QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
|
||||
QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
|
||||
};
|
||||
|
||||
enum qed_chain_cnt_type {
|
||||
/* The chain's size/prod/cons are kept in 16-bit variables */
|
||||
QED_CHAIN_CNT_TYPE_U16,
|
||||
|
||||
/* The chain's size/prod/cons are kept in 32-bit variables */
|
||||
QED_CHAIN_CNT_TYPE_U32,
|
||||
};
|
||||
|
||||
struct qed_chain_next {
|
||||
struct regpair next_phys;
|
||||
void *next_virt;
|
||||
};
|
||||
|
||||
struct qed_chain_pbl_u16 {
|
||||
u16 prod_page_idx;
|
||||
u16 cons_page_idx;
|
||||
};
|
||||
|
||||
struct qed_chain_pbl_u32 {
|
||||
u32 prod_page_idx;
|
||||
u32 cons_page_idx;
|
||||
};
|
||||
|
||||
struct qed_chain_u16 {
|
||||
/* Cyclic index of next element to produce/consume */
|
||||
u16 prod_idx;
|
||||
u16 cons_idx;
|
||||
};
|
||||
|
||||
struct qed_chain_u32 {
|
||||
/* Cyclic index of next element to produce/consume */
|
||||
u32 prod_idx;
|
||||
u32 cons_idx;
|
||||
};
|
||||
|
||||
struct addr_tbl_entry {
|
||||
void *virt_addr;
|
||||
dma_addr_t dma_map;
|
||||
};
|
||||
|
||||
struct qed_chain {
|
||||
/* Fastpath portion of the chain - required for commands such
|
||||
* as produce / consume.
|
||||
*/
|
||||
|
||||
/* Point to next element to produce/consume */
|
||||
void *p_prod_elem;
|
||||
void *p_cons_elem;
|
||||
|
||||
/* Fastpath portions of the PBL [if exists] */
|
||||
|
||||
struct {
|
||||
/* Table for keeping the virtual and physical addresses of the
|
||||
* chain pages, respectively to the physical addresses
|
||||
* in the pbl table.
|
||||
*/
|
||||
struct addr_tbl_entry *pp_addr_tbl;
|
||||
|
||||
union {
|
||||
struct qed_chain_pbl_u16 u16;
|
||||
struct qed_chain_pbl_u32 u32;
|
||||
} c;
|
||||
} pbl;
|
||||
|
||||
union {
|
||||
struct qed_chain_u16 chain16;
|
||||
struct qed_chain_u32 chain32;
|
||||
} u;
|
||||
|
||||
/* Capacity counts only usable elements */
|
||||
u32 capacity;
|
||||
u32 page_cnt;
|
||||
|
||||
enum qed_chain_mode mode;
|
||||
|
||||
/* Elements information for fast calculations */
|
||||
u16 elem_per_page;
|
||||
u16 elem_per_page_mask;
|
||||
u16 elem_size;
|
||||
u16 next_page_mask;
|
||||
u16 usable_per_page;
|
||||
u8 elem_unusable;
|
||||
|
||||
enum qed_chain_cnt_type cnt_type;
|
||||
|
||||
/* Slowpath of the chain - required for initialization and destruction,
|
||||
* but isn't involved in regular functionality.
|
||||
*/
|
||||
|
||||
u32 page_size;
|
||||
|
||||
/* Base address of a pre-allocated buffer for pbl */
|
||||
struct {
|
||||
__le64 *table_virt;
|
||||
dma_addr_t table_phys;
|
||||
size_t table_size;
|
||||
} pbl_sp;
|
||||
|
||||
/* Address of first page of the chain - the address is required
|
||||
* for fastpath operation [consume/produce] but only for the SINGLE
|
||||
* flavour which isn't considered fastpath [== SPQ].
|
||||
*/
|
||||
void *p_virt_addr;
|
||||
dma_addr_t p_phys_addr;
|
||||
|
||||
/* Total number of elements [for entire chain] */
|
||||
u32 size;
|
||||
|
||||
enum qed_chain_use_mode intended_use;
|
||||
|
||||
bool b_external_pbl;
|
||||
};
|
||||
|
||||
struct qed_chain_init_params {
|
||||
enum qed_chain_mode mode;
|
||||
enum qed_chain_use_mode intended_use;
|
||||
enum qed_chain_cnt_type cnt_type;
|
||||
|
||||
u32 page_size;
|
||||
u32 num_elems;
|
||||
size_t elem_size;
|
||||
|
||||
void *ext_pbl_virt;
|
||||
dma_addr_t ext_pbl_phys;
|
||||
};
|
||||
|
||||
#define QED_CHAIN_PAGE_SIZE SZ_4K
|
||||
|
||||
#define ELEMS_PER_PAGE(elem_size, page_size) \
|
||||
((page_size) / (elem_size))
|
||||
|
||||
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
|
||||
(((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
|
||||
(u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \
|
||||
0)
|
||||
|
||||
#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \
|
||||
((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \
|
||||
UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode))))
|
||||
|
||||
#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \
|
||||
DIV_ROUND_UP((elem_cnt), \
|
||||
USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode)))
|
||||
|
||||
#define is_chain_u16(p) \
|
||||
((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
|
||||
#define is_chain_u32(p) \
|
||||
((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
|
||||
|
||||
/* Accessors */
|
||||
|
||||
static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain)
|
||||
{
|
||||
return chain->u.chain16.prod_idx;
|
||||
}
|
||||
|
||||
static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain)
|
||||
{
|
||||
return chain->u.chain16.cons_idx;
|
||||
}
|
||||
|
||||
static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain)
|
||||
{
|
||||
return chain->u.chain32.prod_idx;
|
||||
}
|
||||
|
||||
static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain)
|
||||
{
|
||||
return chain->u.chain32.cons_idx;
|
||||
}
|
||||
|
||||
static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain)
|
||||
{
|
||||
u32 prod = qed_chain_get_prod_idx(chain);
|
||||
u32 cons = qed_chain_get_cons_idx(chain);
|
||||
u16 elem_per_page = chain->elem_per_page;
|
||||
u16 used;
|
||||
|
||||
if (prod < cons)
|
||||
prod += (u32)U16_MAX + 1;
|
||||
|
||||
used = (u16)(prod - cons);
|
||||
if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
|
||||
used -= (u16)(prod / elem_per_page - cons / elem_per_page);
|
||||
|
||||
return used;
|
||||
}
|
||||
|
||||
static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain)
|
||||
{
|
||||
return (u16)(chain->capacity - qed_chain_get_elem_used(chain));
|
||||
}
|
||||
|
||||
static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain)
|
||||
{
|
||||
u64 prod = qed_chain_get_prod_idx_u32(chain);
|
||||
u64 cons = qed_chain_get_cons_idx_u32(chain);
|
||||
u16 elem_per_page = chain->elem_per_page;
|
||||
u32 used;
|
||||
|
||||
if (prod < cons)
|
||||
prod += (u64)U32_MAX + 1;
|
||||
|
||||
used = (u32)(prod - cons);
|
||||
if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
|
||||
used -= (u32)(prod / elem_per_page - cons / elem_per_page);
|
||||
|
||||
return used;
|
||||
}
|
||||
|
||||
static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain)
|
||||
{
|
||||
return chain->capacity - qed_chain_get_elem_used_u32(chain);
|
||||
}
|
||||
|
||||
static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain)
|
||||
{
|
||||
return chain->usable_per_page;
|
||||
}
|
||||
|
||||
static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain)
|
||||
{
|
||||
return chain->elem_unusable;
|
||||
}
|
||||
|
||||
static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain)
|
||||
{
|
||||
return chain->page_cnt;
|
||||
}
|
||||
|
||||
static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
|
||||
{
|
||||
return chain->pbl_sp.table_phys;
|
||||
}
|
||||
|
||||
/**
|
||||
* qed_chain_advance_page(): Advance the next element across pages for a
|
||||
* linked chain.
|
||||
*
|
||||
* @p_chain: P_chain.
|
||||
* @p_next_elem: P_next_elem.
|
||||
* @idx_to_inc: Idx_to_inc.
|
||||
* @page_to_inc: page_to_inc.
|
||||
*
|
||||
* Return: Void.
|
||||
*/
|
||||
static inline void
|
||||
qed_chain_advance_page(struct qed_chain *p_chain,
|
||||
void **p_next_elem, void *idx_to_inc, void *page_to_inc)
|
||||
{
|
||||
struct qed_chain_next *p_next = NULL;
|
||||
u32 page_index = 0;
|
||||
|
||||
switch (p_chain->mode) {
|
||||
case QED_CHAIN_MODE_NEXT_PTR:
|
||||
p_next = *p_next_elem;
|
||||
*p_next_elem = p_next->next_virt;
|
||||
if (is_chain_u16(p_chain))
|
||||
*(u16 *)idx_to_inc += p_chain->elem_unusable;
|
||||
else
|
||||
*(u32 *)idx_to_inc += p_chain->elem_unusable;
|
||||
break;
|
||||
case QED_CHAIN_MODE_SINGLE:
|
||||
*p_next_elem = p_chain->p_virt_addr;
|
||||
break;
|
||||
|
||||
case QED_CHAIN_MODE_PBL:
|
||||
if (is_chain_u16(p_chain)) {
|
||||
if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
|
||||
*(u16 *)page_to_inc = 0;
|
||||
page_index = *(u16 *)page_to_inc;
|
||||
} else {
|
||||
if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
|
||||
*(u32 *)page_to_inc = 0;
|
||||
page_index = *(u32 *)page_to_inc;
|
||||
}
|
||||
*p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
|
||||
}
|
||||
}
|
||||
|
||||
#define is_unusable_idx(p, idx) \
|
||||
(((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
|
||||
|
||||
#define is_unusable_idx_u32(p, idx) \
|
||||
(((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
|
||||
#define is_unusable_next_idx(p, idx) \
|
||||
((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
|
||||
(p)->usable_per_page)
|
||||
|
||||
#define is_unusable_next_idx_u32(p, idx) \
|
||||
((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
|
||||
(p)->usable_per_page)
|
||||
|
||||
#define test_and_skip(p, idx) \
|
||||
do { \
|
||||
if (is_chain_u16(p)) { \
|
||||
if (is_unusable_idx(p, idx)) \
|
||||
(p)->u.chain16.idx += (p)->elem_unusable; \
|
||||
} else { \
|
||||
if (is_unusable_idx_u32(p, idx)) \
|
||||
(p)->u.chain32.idx += (p)->elem_unusable; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* qed_chain_return_produced(): A chain in which the driver "Produces"
|
||||
* elements should use this API
|
||||
* to indicate previous produced elements
|
||||
* are now consumed.
|
||||
*
|
||||
* @p_chain: Chain.
|
||||
*
|
||||
* Return: Void.
|
||||
*/
|
||||
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
|
||||
{
|
||||
if (is_chain_u16(p_chain))
|
||||
p_chain->u.chain16.cons_idx++;
|
||||
else
|
||||
p_chain->u.chain32.cons_idx++;
|
||||
test_and_skip(p_chain, cons_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* qed_chain_produce(): A chain in which the driver "Produces"
|
||||
* elements should use this to get a pointer to
|
||||
* the next element which can be "Produced". It's driver
|
||||
* responsibility to validate that the chain has room for
|
||||
* new element.
|
||||
*
|
||||
* @p_chain: Chain.
|
||||
*
|
||||
* Return: void*, a pointer to next element.
|
||||
*/
|
||||
static inline void *qed_chain_produce(struct qed_chain *p_chain)
|
||||
{
|
||||
void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
|
||||
|
||||
if (is_chain_u16(p_chain)) {
|
||||
if ((p_chain->u.chain16.prod_idx &
|
||||
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
|
||||
p_prod_idx = &p_chain->u.chain16.prod_idx;
|
||||
p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
|
||||
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
|
||||
p_prod_idx, p_prod_page_idx);
|
||||
}
|
||||
p_chain->u.chain16.prod_idx++;
|
||||
} else {
|
||||
if ((p_chain->u.chain32.prod_idx &
|
||||
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
|
||||
p_prod_idx = &p_chain->u.chain32.prod_idx;
|
||||
p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
|
||||
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
|
||||
p_prod_idx, p_prod_page_idx);
|
||||
}
|
||||
p_chain->u.chain32.prod_idx++;
|
||||
}
|
||||
|
||||
p_ret = p_chain->p_prod_elem;
|
||||
p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
|
||||
p_chain->elem_size);
|
||||
|
||||
return p_ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* qed_chain_get_capacity(): Get the maximum number of BDs in chain
|
||||
*
|
||||
* @p_chain: Chain.
|
||||
*
|
||||
* Return: number of unusable BDs.
|
||||
*/
|
||||
static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
|
||||
{
|
||||
return p_chain->capacity;
|
||||
}
|
||||
|
||||
/**
|
||||
* qed_chain_recycle_consumed(): Returns an element which was
|
||||
* previously consumed;
|
||||
* Increments producers so they could
|
||||
* be written to FW.
|
||||
*
|
||||
* @p_chain: Chain.
|
||||
*
|
||||
* Return: Void.
|
||||
*/
|
||||
static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
|
||||
{
|
||||
test_and_skip(p_chain, prod_idx);
|
||||
if (is_chain_u16(p_chain))
|
||||
p_chain->u.chain16.prod_idx++;
|
||||
else
|
||||
p_chain->u.chain32.prod_idx++;
|
||||
}
|
||||
|
||||
/**
|
||||
* qed_chain_consume(): A Chain in which the driver utilizes data written
|
||||
* by a different source (i.e., FW) should use this to
|
||||
* access passed buffers.
|
||||
*
|
||||
* @p_chain: Chain.
|
||||
*
|
||||
* Return: void*, a pointer to the next buffer written.
|
||||
*/
|
||||
static inline void *qed_chain_consume(struct qed_chain *p_chain)
|
||||
{
|
||||
void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
|
||||
|
||||
if (is_chain_u16(p_chain)) {
|
||||
if ((p_chain->u.chain16.cons_idx &
|
||||
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
|
||||
p_cons_idx = &p_chain->u.chain16.cons_idx;
|
||||
p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
|
||||
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
|
||||
p_cons_idx, p_cons_page_idx);
|
||||
}
|
||||
p_chain->u.chain16.cons_idx++;
|
||||
} else {
|
||||
if ((p_chain->u.chain32.cons_idx &
|
||||
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
|
||||
p_cons_idx = &p_chain->u.chain32.cons_idx;
|
||||
p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
|
||||
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
|
||||
p_cons_idx, p_cons_page_idx);
|
||||
}
|
||||
p_chain->u.chain32.cons_idx++;
|
||||
}
|
||||
|
||||
p_ret = p_chain->p_cons_elem;
|
||||
p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
|
||||
p_chain->elem_size);
|
||||
|
||||
return p_ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* qed_chain_reset(): Resets the chain to its start state.
|
||||
*
|
||||
* @p_chain: pointer to a previously allocated chain.
|
||||
*
|
||||
* Return Void.
|
||||
*/
|
||||
static inline void qed_chain_reset(struct qed_chain *p_chain)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (is_chain_u16(p_chain)) {
|
||||
p_chain->u.chain16.prod_idx = 0;
|
||||
p_chain->u.chain16.cons_idx = 0;
|
||||
} else {
|
||||
p_chain->u.chain32.prod_idx = 0;
|
||||
p_chain->u.chain32.cons_idx = 0;
|
||||
}
|
||||
p_chain->p_cons_elem = p_chain->p_virt_addr;
|
||||
p_chain->p_prod_elem = p_chain->p_virt_addr;
|
||||
|
||||
if (p_chain->mode == QED_CHAIN_MODE_PBL) {
|
||||
/* Use (page_cnt - 1) as a reset value for the prod/cons page's
|
||||
* indices, to avoid unnecessary page advancing on the first
|
||||
* call to qed_chain_produce/consume. Instead, the indices
|
||||
* will be advanced to page_cnt and then will be wrapped to 0.
|
||||
*/
|
||||
u32 reset_val = p_chain->page_cnt - 1;
|
||||
|
||||
if (is_chain_u16(p_chain)) {
|
||||
p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
|
||||
p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
|
||||
} else {
|
||||
p_chain->pbl.c.u32.prod_page_idx = reset_val;
|
||||
p_chain->pbl.c.u32.cons_page_idx = reset_val;
|
||||
}
|
||||
}
|
||||
|
||||
switch (p_chain->intended_use) {
|
||||
case QED_CHAIN_USE_TO_CONSUME:
|
||||
/* produce empty elements */
|
||||
for (i = 0; i < p_chain->capacity; i++)
|
||||
qed_chain_recycle_consumed(p_chain);
|
||||
break;
|
||||
|
||||
case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
|
||||
case QED_CHAIN_USE_TO_PRODUCE:
|
||||
default:
|
||||
/* Do nothing */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* qed_chain_get_last_elem(): Returns a pointer to the last element of the
|
||||
* chain.
|
||||
*
|
||||
* @p_chain: Chain.
|
||||
*
|
||||
* Return: void*.
|
||||
*/
|
||||
static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
|
||||
{
|
||||
struct qed_chain_next *p_next = NULL;
|
||||
void *p_virt_addr = NULL;
|
||||
u32 size, last_page_idx;
|
||||
|
||||
if (!p_chain->p_virt_addr)
|
||||
goto out;
|
||||
|
||||
switch (p_chain->mode) {
|
||||
case QED_CHAIN_MODE_NEXT_PTR:
|
||||
size = p_chain->elem_size * p_chain->usable_per_page;
|
||||
p_virt_addr = p_chain->p_virt_addr;
|
||||
p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
|
||||
while (p_next->next_virt != p_chain->p_virt_addr) {
|
||||
p_virt_addr = p_next->next_virt;
|
||||
p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
|
||||
size);
|
||||
}
|
||||
break;
|
||||
case QED_CHAIN_MODE_SINGLE:
|
||||
p_virt_addr = p_chain->p_virt_addr;
|
||||
break;
|
||||
case QED_CHAIN_MODE_PBL:
|
||||
last_page_idx = p_chain->page_cnt - 1;
|
||||
p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
|
||||
break;
|
||||
}
|
||||
/* p_virt_addr points at this stage to the last page of the chain */
|
||||
size = p_chain->elem_size * (p_chain->usable_per_page - 1);
|
||||
p_virt_addr = (u8 *)p_virt_addr + size;
|
||||
out:
|
||||
return p_virt_addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* qed_chain_set_prod(): sets the prod to the given value.
|
||||
*
|
||||
* @p_chain: Chain.
|
||||
* @prod_idx: Prod Idx.
|
||||
* @p_prod_elem: Prod elem.
|
||||
*
|
||||
* Return Void.
|
||||
*/
|
||||
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
|
||||
u32 prod_idx, void *p_prod_elem)
|
||||
{
|
||||
if (p_chain->mode == QED_CHAIN_MODE_PBL) {
|
||||
u32 cur_prod, page_mask, page_cnt, page_diff;
|
||||
|
||||
cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
|
||||
p_chain->u.chain32.prod_idx;
|
||||
|
||||
/* Assume that number of elements in a page is power of 2 */
|
||||
page_mask = ~p_chain->elem_per_page_mask;
|
||||
|
||||
/* Use "cur_prod - 1" and "prod_idx - 1" since producer index
|
||||
* reaches the first element of next page before the page index
|
||||
* is incremented. See qed_chain_produce().
|
||||
* Index wrap around is not a problem because the difference
|
||||
* between current and given producer indices is always
|
||||
* positive and lower than the chain's capacity.
|
||||
*/
|
||||
page_diff = (((cur_prod - 1) & page_mask) -
|
||||
((prod_idx - 1) & page_mask)) /
|
||||
p_chain->elem_per_page;
|
||||
|
||||
page_cnt = qed_chain_get_page_cnt(p_chain);
|
||||
if (is_chain_u16(p_chain))
|
||||
p_chain->pbl.c.u16.prod_page_idx =
|
||||
(p_chain->pbl.c.u16.prod_page_idx -
|
||||
page_diff + page_cnt) % page_cnt;
|
||||
else
|
||||
p_chain->pbl.c.u32.prod_page_idx =
|
||||
(p_chain->pbl.c.u32.prod_page_idx -
|
||||
page_diff + page_cnt) % page_cnt;
|
||||
}
|
||||
|
||||
if (is_chain_u16(p_chain))
|
||||
p_chain->u.chain16.prod_idx = (u16) prod_idx;
|
||||
else
|
||||
p_chain->u.chain32.prod_idx = prod_idx;
|
||||
p_chain->p_prod_elem = p_prod_elem;
|
||||
}
|
||||
|
||||
/**
|
||||
* qed_chain_pbl_zero_mem(): set chain memory to 0.
|
||||
*
|
||||
* @p_chain: Chain.
|
||||
*
|
||||
* Return: Void.
|
||||
*/
|
||||
static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
|
||||
{
|
||||
u32 i, page_cnt;
|
||||
|
||||
if (p_chain->mode != QED_CHAIN_MODE_PBL)
|
||||
return;
|
||||
|
||||
page_cnt = qed_chain_get_page_cnt(p_chain);
|
||||
|
||||
for (i = 0; i < page_cnt; i++)
|
||||
memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
|
||||
p_chain->page_size);
|
||||
}
|
||||
|
||||
#endif
|
340
linux-6.8.1/include/linux/qed/qed_eth_if.h
Normal file
340
linux-6.8.1/include/linux/qed/qed_eth_if.h
Normal file
|
@ -0,0 +1,340 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef _QED_ETH_IF_H
|
||||
#define _QED_ETH_IF_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/if_link.h>
|
||||
#include <linux/qed/eth_common.h>
|
||||
#include <linux/qed/qed_if.h>
|
||||
#include <linux/qed/qed_iov_if.h>
|
||||
|
||||
/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */
|
||||
#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2)
|
||||
#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS))
|
||||
|
||||
struct qed_queue_start_common_params {
|
||||
/* Should always be relative to entity sending this. */
|
||||
u8 vport_id;
|
||||
u16 queue_id;
|
||||
|
||||
/* Relative, but relevant only for PFs */
|
||||
u8 stats_id;
|
||||
|
||||
struct qed_sb_info *p_sb;
|
||||
u8 sb_idx;
|
||||
|
||||
u8 tc;
|
||||
};
|
||||
|
||||
struct qed_rxq_start_ret_params {
|
||||
void __iomem *p_prod;
|
||||
void *p_handle;
|
||||
};
|
||||
|
||||
struct qed_txq_start_ret_params {
|
||||
void __iomem *p_doorbell;
|
||||
void *p_handle;
|
||||
};
|
||||
|
||||
enum qed_filter_config_mode {
|
||||
QED_FILTER_CONFIG_MODE_DISABLE,
|
||||
QED_FILTER_CONFIG_MODE_5_TUPLE,
|
||||
QED_FILTER_CONFIG_MODE_L4_PORT,
|
||||
QED_FILTER_CONFIG_MODE_IP_DEST,
|
||||
QED_FILTER_CONFIG_MODE_IP_SRC,
|
||||
};
|
||||
|
||||
struct qed_ntuple_filter_params {
|
||||
/* Physically mapped address containing header of buffer to be used
|
||||
* as filter.
|
||||
*/
|
||||
dma_addr_t addr;
|
||||
|
||||
/* Length of header in bytes */
|
||||
u16 length;
|
||||
|
||||
/* Relative queue-id to receive classified packet */
|
||||
#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
|
||||
u16 qid;
|
||||
|
||||
/* Identifier can either be according to vport-id or vfid */
|
||||
bool b_is_vf;
|
||||
u8 vport_id;
|
||||
u8 vf_id;
|
||||
|
||||
/* true iff this filter is to be added. Else to be removed */
|
||||
bool b_is_add;
|
||||
|
||||
/* If flow needs to be dropped */
|
||||
bool b_is_drop;
|
||||
};
|
||||
|
||||
struct qed_dev_eth_info {
|
||||
struct qed_dev_info common;
|
||||
|
||||
u8 num_queues;
|
||||
u8 num_tc;
|
||||
|
||||
u8 port_mac[ETH_ALEN];
|
||||
u16 num_vlan_filters;
|
||||
u16 num_mac_filters;
|
||||
|
||||
/* Legacy VF - this affects the datapath, so qede has to know */
|
||||
bool is_legacy;
|
||||
|
||||
/* Might depend on available resources [in case of VF] */
|
||||
bool xdp_supported;
|
||||
};
|
||||
|
||||
struct qed_update_vport_rss_params {
|
||||
void *rss_ind_table[128];
|
||||
u32 rss_key[10];
|
||||
u8 rss_caps;
|
||||
};
|
||||
|
||||
struct qed_update_vport_params {
|
||||
u8 vport_id;
|
||||
u8 update_vport_active_flg;
|
||||
u8 vport_active_flg;
|
||||
u8 update_tx_switching_flg;
|
||||
u8 tx_switching_flg;
|
||||
u8 update_accept_any_vlan_flg;
|
||||
u8 accept_any_vlan;
|
||||
u8 update_rss_flg;
|
||||
struct qed_update_vport_rss_params rss_params;
|
||||
};
|
||||
|
||||
struct qed_start_vport_params {
|
||||
bool remove_inner_vlan;
|
||||
bool handle_ptp_pkts;
|
||||
bool gro_enable;
|
||||
bool drop_ttl0;
|
||||
u8 vport_id;
|
||||
u16 mtu;
|
||||
bool clear_stats;
|
||||
};
|
||||
|
||||
enum qed_filter_rx_mode_type {
|
||||
QED_FILTER_RX_MODE_TYPE_REGULAR,
|
||||
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
|
||||
QED_FILTER_RX_MODE_TYPE_PROMISC,
|
||||
};
|
||||
|
||||
enum qed_filter_xcast_params_type {
|
||||
QED_FILTER_XCAST_TYPE_ADD,
|
||||
QED_FILTER_XCAST_TYPE_DEL,
|
||||
QED_FILTER_XCAST_TYPE_REPLACE,
|
||||
};
|
||||
|
||||
struct qed_filter_ucast_params {
|
||||
enum qed_filter_xcast_params_type type;
|
||||
u8 vlan_valid;
|
||||
u16 vlan;
|
||||
u8 mac_valid;
|
||||
unsigned char mac[ETH_ALEN];
|
||||
};
|
||||
|
||||
struct qed_filter_mcast_params {
|
||||
enum qed_filter_xcast_params_type type;
|
||||
u8 num;
|
||||
unsigned char mac[64][ETH_ALEN];
|
||||
};
|
||||
|
||||
enum qed_filter_type {
|
||||
QED_FILTER_TYPE_UCAST,
|
||||
QED_FILTER_TYPE_MCAST,
|
||||
QED_FILTER_TYPE_RX_MODE,
|
||||
QED_MAX_FILTER_TYPES,
|
||||
};
|
||||
|
||||
struct qed_tunn_params {
|
||||
u16 vxlan_port;
|
||||
u8 update_vxlan_port;
|
||||
u16 geneve_port;
|
||||
u8 update_geneve_port;
|
||||
};
|
||||
|
||||
struct qed_eth_cb_ops {
|
||||
struct qed_common_cb_ops common;
|
||||
void (*force_mac) (void *dev, u8 *mac, bool forced);
|
||||
void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port);
|
||||
};
|
||||
|
||||
#define QED_MAX_PHC_DRIFT_PPB 291666666
|
||||
|
||||
enum qed_ptp_filter_type {
|
||||
QED_PTP_FILTER_NONE,
|
||||
QED_PTP_FILTER_ALL,
|
||||
QED_PTP_FILTER_V1_L4_EVENT,
|
||||
QED_PTP_FILTER_V1_L4_GEN,
|
||||
QED_PTP_FILTER_V2_L4_EVENT,
|
||||
QED_PTP_FILTER_V2_L4_GEN,
|
||||
QED_PTP_FILTER_V2_L2_EVENT,
|
||||
QED_PTP_FILTER_V2_L2_GEN,
|
||||
QED_PTP_FILTER_V2_EVENT,
|
||||
QED_PTP_FILTER_V2_GEN
|
||||
};
|
||||
|
||||
enum qed_ptp_hwtstamp_tx_type {
|
||||
QED_PTP_HWTSTAMP_TX_OFF,
|
||||
QED_PTP_HWTSTAMP_TX_ON,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DCB
|
||||
/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration
|
||||
* of dcbnl_rtnl_ops structure.
|
||||
*/
|
||||
struct qed_eth_dcbnl_ops {
|
||||
/* IEEE 802.1Qaz std */
|
||||
int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
|
||||
int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
|
||||
int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
|
||||
int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets);
|
||||
int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
|
||||
int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
|
||||
int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app);
|
||||
int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app);
|
||||
|
||||
/* CEE std */
|
||||
u8 (*getstate)(struct qed_dev *cdev);
|
||||
u8 (*setstate)(struct qed_dev *cdev, u8 state);
|
||||
void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type,
|
||||
u8 *pgid, u8 *bw_pct, u8 *up_map);
|
||||
void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
|
||||
void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type,
|
||||
u8 *pgid, u8 *bw_pct, u8 *up_map);
|
||||
void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
|
||||
void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting);
|
||||
void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting);
|
||||
u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap);
|
||||
int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num);
|
||||
u8 (*getpfcstate)(struct qed_dev *cdev);
|
||||
int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id);
|
||||
u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags);
|
||||
|
||||
/* DCBX configuration */
|
||||
u8 (*getdcbx)(struct qed_dev *cdev);
|
||||
void (*setpgtccfgtx)(struct qed_dev *cdev, int prio,
|
||||
u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
|
||||
void (*setpgtccfgrx)(struct qed_dev *cdev, int prio,
|
||||
u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
|
||||
void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
|
||||
void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
|
||||
u8 (*setall)(struct qed_dev *cdev);
|
||||
int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num);
|
||||
void (*setpfcstate)(struct qed_dev *cdev, u8 state);
|
||||
int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up);
|
||||
u8 (*setdcbx)(struct qed_dev *cdev, u8 state);
|
||||
u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags);
|
||||
|
||||
/* Peer apps */
|
||||
int (*peer_getappinfo)(struct qed_dev *cdev,
|
||||
struct dcb_peer_app_info *info,
|
||||
u16 *app_count);
|
||||
int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table);
|
||||
|
||||
/* CEE peer */
|
||||
int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc);
|
||||
int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg);
|
||||
};
|
||||
#endif
|
||||
|
||||
struct qed_eth_ptp_ops {
|
||||
int (*cfg_filters)(struct qed_dev *, enum qed_ptp_filter_type,
|
||||
enum qed_ptp_hwtstamp_tx_type);
|
||||
int (*read_rx_ts)(struct qed_dev *, u64 *);
|
||||
int (*read_tx_ts)(struct qed_dev *, u64 *);
|
||||
int (*read_cc)(struct qed_dev *, u64 *);
|
||||
int (*disable)(struct qed_dev *);
|
||||
int (*adjfreq)(struct qed_dev *, s32);
|
||||
int (*enable)(struct qed_dev *);
|
||||
};
|
||||
|
||||
struct qed_eth_ops {
|
||||
const struct qed_common_ops *common;
|
||||
#ifdef CONFIG_QED_SRIOV
|
||||
const struct qed_iov_hv_ops *iov;
|
||||
#endif
|
||||
#ifdef CONFIG_DCB
|
||||
const struct qed_eth_dcbnl_ops *dcb;
|
||||
#endif
|
||||
const struct qed_eth_ptp_ops *ptp;
|
||||
|
||||
int (*fill_dev_info)(struct qed_dev *cdev,
|
||||
struct qed_dev_eth_info *info);
|
||||
|
||||
void (*register_ops)(struct qed_dev *cdev,
|
||||
struct qed_eth_cb_ops *ops,
|
||||
void *cookie);
|
||||
|
||||
bool(*check_mac) (struct qed_dev *cdev, u8 *mac);
|
||||
|
||||
int (*vport_start)(struct qed_dev *cdev,
|
||||
struct qed_start_vport_params *params);
|
||||
|
||||
int (*vport_stop)(struct qed_dev *cdev,
|
||||
u8 vport_id);
|
||||
|
||||
int (*vport_update)(struct qed_dev *cdev,
|
||||
struct qed_update_vport_params *params);
|
||||
|
||||
int (*q_rx_start)(struct qed_dev *cdev,
|
||||
u8 rss_num,
|
||||
struct qed_queue_start_common_params *params,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
u16 cqe_pbl_size,
|
||||
struct qed_rxq_start_ret_params *ret_params);
|
||||
|
||||
int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
|
||||
|
||||
int (*q_tx_start)(struct qed_dev *cdev,
|
||||
u8 rss_num,
|
||||
struct qed_queue_start_common_params *params,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size,
|
||||
struct qed_txq_start_ret_params *ret_params);
|
||||
|
||||
int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
|
||||
|
||||
int (*filter_config_rx_mode)(struct qed_dev *cdev,
|
||||
enum qed_filter_rx_mode_type type);
|
||||
|
||||
int (*filter_config_ucast)(struct qed_dev *cdev,
|
||||
struct qed_filter_ucast_params *params);
|
||||
|
||||
int (*filter_config_mcast)(struct qed_dev *cdev,
|
||||
struct qed_filter_mcast_params *params);
|
||||
|
||||
int (*fastpath_stop)(struct qed_dev *cdev);
|
||||
|
||||
int (*eth_cqe_completion)(struct qed_dev *cdev,
|
||||
u8 rss_id,
|
||||
struct eth_slow_path_rx_cqe *cqe);
|
||||
|
||||
void (*get_vport_stats)(struct qed_dev *cdev,
|
||||
struct qed_eth_stats *stats);
|
||||
|
||||
int (*tunn_config)(struct qed_dev *cdev,
|
||||
struct qed_tunn_params *params);
|
||||
|
||||
int (*ntuple_filter_config)(struct qed_dev *cdev,
|
||||
void *cookie,
|
||||
struct qed_ntuple_filter_params *params);
|
||||
|
||||
int (*configure_arfs_searcher)(struct qed_dev *cdev,
|
||||
enum qed_filter_config_mode mode);
|
||||
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
|
||||
int (*req_bulletin_update_mac)(struct qed_dev *cdev, const u8 *mac);
|
||||
};
|
||||
|
||||
const struct qed_eth_ops *qed_get_eth_ops(void);
|
||||
void qed_put_eth_ops(void);
|
||||
|
||||
#endif
|
150
linux-6.8.1/include/linux/qed/qed_fcoe_if.h
Normal file
150
linux-6.8.1/include/linux/qed/qed_fcoe_if.h
Normal file
|
@ -0,0 +1,150 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* Copyright (c) 2019-2020 Marvell International Ltd. */
|
||||
|
||||
#ifndef _QED_FCOE_IF_H
|
||||
#define _QED_FCOE_IF_H
|
||||
#include <linux/types.h>
|
||||
#include <linux/qed/qed_if.h>
|
||||
struct qed_fcoe_stats {
|
||||
u64 fcoe_rx_byte_cnt;
|
||||
u64 fcoe_rx_data_pkt_cnt;
|
||||
u64 fcoe_rx_xfer_pkt_cnt;
|
||||
u64 fcoe_rx_other_pkt_cnt;
|
||||
u32 fcoe_silent_drop_pkt_cmdq_full_cnt;
|
||||
u32 fcoe_silent_drop_pkt_rq_full_cnt;
|
||||
u32 fcoe_silent_drop_pkt_crc_error_cnt;
|
||||
u32 fcoe_silent_drop_pkt_task_invalid_cnt;
|
||||
u32 fcoe_silent_drop_total_pkt_cnt;
|
||||
|
||||
u64 fcoe_tx_byte_cnt;
|
||||
u64 fcoe_tx_data_pkt_cnt;
|
||||
u64 fcoe_tx_xfer_pkt_cnt;
|
||||
u64 fcoe_tx_other_pkt_cnt;
|
||||
};
|
||||
|
||||
struct qed_dev_fcoe_info {
|
||||
struct qed_dev_info common;
|
||||
|
||||
void __iomem *primary_dbq_rq_addr;
|
||||
void __iomem *secondary_bdq_rq_addr;
|
||||
|
||||
u64 wwpn;
|
||||
u64 wwnn;
|
||||
|
||||
u8 num_cqs;
|
||||
};
|
||||
|
||||
struct qed_fcoe_params_offload {
|
||||
dma_addr_t sq_pbl_addr;
|
||||
dma_addr_t sq_curr_page_addr;
|
||||
dma_addr_t sq_next_page_addr;
|
||||
|
||||
u8 src_mac[ETH_ALEN];
|
||||
u8 dst_mac[ETH_ALEN];
|
||||
|
||||
u16 tx_max_fc_pay_len;
|
||||
u16 e_d_tov_timer_val;
|
||||
u16 rec_tov_timer_val;
|
||||
u16 rx_max_fc_pay_len;
|
||||
u16 vlan_tag;
|
||||
|
||||
struct fc_addr_nw s_id;
|
||||
u8 max_conc_seqs_c3;
|
||||
struct fc_addr_nw d_id;
|
||||
u8 flags;
|
||||
u8 def_q_idx;
|
||||
};
|
||||
|
||||
#define MAX_TID_BLOCKS_FCOE (512)
|
||||
struct qed_fcoe_tid {
|
||||
u32 size; /* In bytes per task */
|
||||
u32 num_tids_per_block;
|
||||
u8 *blocks[MAX_TID_BLOCKS_FCOE];
|
||||
};
|
||||
|
||||
struct qed_fcoe_cb_ops {
|
||||
struct qed_common_cb_ops common;
|
||||
u32 (*get_login_failures)(void *cookie);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qed_fcoe_ops - qed FCoE operations.
|
||||
* @common: common operations pointer
|
||||
* @fill_dev_info: fills FCoE specific information
|
||||
* @param cdev
|
||||
* @param info
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @register_ops: register FCoE operations
|
||||
* @param cdev
|
||||
* @param ops - specified using qed_iscsi_cb_ops
|
||||
* @param cookie - driver private
|
||||
* @ll2: light L2 operations pointer
|
||||
* @start: fcoe in FW
|
||||
* @param cdev
|
||||
* @param tasks - qed will fill information about tasks
|
||||
* return 0 on success, otherwise error value.
|
||||
* @stop: stops fcoe in FW
|
||||
* @param cdev
|
||||
* return 0 on success, otherwise error value.
|
||||
* @acquire_conn: acquire a new fcoe connection
|
||||
* @param cdev
|
||||
* @param handle - qed will fill handle that should be
|
||||
* used henceforth as identifier of the
|
||||
* connection.
|
||||
* @param p_doorbell - qed will fill the address of the
|
||||
* doorbell.
|
||||
* return 0 on success, otherwise error value.
|
||||
* @release_conn: release a previously acquired fcoe connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* return 0 on success, otherwise error value.
|
||||
* @offload_conn: configures an offloaded connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @param conn_info - the configuration to use for the
|
||||
* offload.
|
||||
* return 0 on success, otherwise error value.
|
||||
* @destroy_conn: stops an offloaded connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @param terminate_params
|
||||
* return 0 on success, otherwise error value.
|
||||
* @get_stats: gets FCoE related statistics
|
||||
* @param cdev
|
||||
* @param stats - pointer to struck that would be filled
|
||||
* we stats
|
||||
* return 0 on success, error otherwise.
|
||||
*/
|
||||
struct qed_fcoe_ops {
|
||||
const struct qed_common_ops *common;
|
||||
|
||||
int (*fill_dev_info)(struct qed_dev *cdev,
|
||||
struct qed_dev_fcoe_info *info);
|
||||
|
||||
void (*register_ops)(struct qed_dev *cdev,
|
||||
struct qed_fcoe_cb_ops *ops, void *cookie);
|
||||
|
||||
const struct qed_ll2_ops *ll2;
|
||||
|
||||
int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks);
|
||||
|
||||
int (*stop)(struct qed_dev *cdev);
|
||||
|
||||
int (*acquire_conn)(struct qed_dev *cdev,
|
||||
u32 *handle,
|
||||
u32 *fw_cid, void __iomem **p_doorbell);
|
||||
|
||||
int (*release_conn)(struct qed_dev *cdev, u32 handle);
|
||||
|
||||
int (*offload_conn)(struct qed_dev *cdev,
|
||||
u32 handle,
|
||||
struct qed_fcoe_params_offload *conn_info);
|
||||
int (*destroy_conn)(struct qed_dev *cdev,
|
||||
u32 handle, dma_addr_t terminate_params);
|
||||
|
||||
int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats);
|
||||
};
|
||||
|
||||
const struct qed_fcoe_ops *qed_get_fcoe_ops(void);
|
||||
void qed_put_fcoe_ops(void);
|
||||
#endif
|
1502
linux-6.8.1/include/linux/qed/qed_if.h
Normal file
1502
linux-6.8.1/include/linux/qed/qed_if.h
Normal file
File diff suppressed because it is too large
Load diff
34
linux-6.8.1/include/linux/qed/qed_iov_if.h
Normal file
34
linux-6.8.1/include/linux/qed/qed_iov_if.h
Normal file
|
@ -0,0 +1,34 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef _QED_IOV_IF_H
|
||||
#define _QED_IOV_IF_H
|
||||
|
||||
#include <linux/qed/qed_if.h>
|
||||
|
||||
/* Structs used by PF to control and manipulate child VFs */
|
||||
struct qed_iov_hv_ops {
|
||||
int (*configure)(struct qed_dev *cdev, int num_vfs_param);
|
||||
|
||||
int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid);
|
||||
|
||||
int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid);
|
||||
|
||||
int (*get_config) (struct qed_dev *cdev, int vf_id,
|
||||
struct ifla_vf_info *ivi);
|
||||
|
||||
int (*set_link_state) (struct qed_dev *cdev, int vf_id,
|
||||
int link_state);
|
||||
|
||||
int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val);
|
||||
|
||||
int (*set_rate) (struct qed_dev *cdev, int vfid,
|
||||
u32 min_rate, u32 max_rate);
|
||||
|
||||
int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust);
|
||||
};
|
||||
|
||||
#endif
|
234
linux-6.8.1/include/linux/qed/qed_iscsi_if.h
Normal file
234
linux-6.8.1/include/linux/qed/qed_iscsi_if.h
Normal file
|
@ -0,0 +1,234 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef _QED_ISCSI_IF_H
|
||||
#define _QED_ISCSI_IF_H
|
||||
#include <linux/types.h>
|
||||
#include <linux/qed/qed_if.h>
|
||||
|
||||
typedef int (*iscsi_event_cb_t) (void *context,
|
||||
u8 fw_event_code, void *fw_handle);
|
||||
struct qed_iscsi_stats {
|
||||
u64 iscsi_rx_bytes_cnt;
|
||||
u64 iscsi_rx_packet_cnt;
|
||||
u64 iscsi_rx_new_ooo_isle_events_cnt;
|
||||
u32 iscsi_cmdq_threshold_cnt;
|
||||
u32 iscsi_rq_threshold_cnt;
|
||||
u32 iscsi_immq_threshold_cnt;
|
||||
|
||||
u64 iscsi_rx_dropped_pdus_task_not_valid;
|
||||
|
||||
u64 iscsi_rx_data_pdu_cnt;
|
||||
u64 iscsi_rx_r2t_pdu_cnt;
|
||||
u64 iscsi_rx_total_pdu_cnt;
|
||||
|
||||
u64 iscsi_tx_go_to_slow_start_event_cnt;
|
||||
u64 iscsi_tx_fast_retransmit_event_cnt;
|
||||
|
||||
u64 iscsi_tx_data_pdu_cnt;
|
||||
u64 iscsi_tx_r2t_pdu_cnt;
|
||||
u64 iscsi_tx_total_pdu_cnt;
|
||||
|
||||
u64 iscsi_tx_bytes_cnt;
|
||||
u64 iscsi_tx_packet_cnt;
|
||||
};
|
||||
|
||||
struct qed_dev_iscsi_info {
|
||||
struct qed_dev_info common;
|
||||
|
||||
void __iomem *primary_dbq_rq_addr;
|
||||
void __iomem *secondary_bdq_rq_addr;
|
||||
|
||||
u8 num_cqs;
|
||||
};
|
||||
|
||||
struct qed_iscsi_id_params {
|
||||
u8 mac[ETH_ALEN];
|
||||
u32 ip[4];
|
||||
u16 port;
|
||||
};
|
||||
|
||||
struct qed_iscsi_params_offload {
|
||||
u8 layer_code;
|
||||
dma_addr_t sq_pbl_addr;
|
||||
u32 initial_ack;
|
||||
|
||||
struct qed_iscsi_id_params src;
|
||||
struct qed_iscsi_id_params dst;
|
||||
u16 vlan_id;
|
||||
u8 tcp_flags;
|
||||
u8 ip_version;
|
||||
u8 default_cq;
|
||||
|
||||
u8 ka_max_probe_cnt;
|
||||
u8 dup_ack_theshold;
|
||||
u32 rcv_next;
|
||||
u32 snd_una;
|
||||
u32 snd_next;
|
||||
u32 snd_max;
|
||||
u32 snd_wnd;
|
||||
u32 rcv_wnd;
|
||||
u32 snd_wl1;
|
||||
u32 cwnd;
|
||||
u32 ss_thresh;
|
||||
u16 srtt;
|
||||
u16 rtt_var;
|
||||
u32 ts_recent;
|
||||
u32 ts_recent_age;
|
||||
u32 total_rt;
|
||||
u32 ka_timeout_delta;
|
||||
u32 rt_timeout_delta;
|
||||
u8 dup_ack_cnt;
|
||||
u8 snd_wnd_probe_cnt;
|
||||
u8 ka_probe_cnt;
|
||||
u8 rt_cnt;
|
||||
u32 flow_label;
|
||||
u32 ka_timeout;
|
||||
u32 ka_interval;
|
||||
u32 max_rt_time;
|
||||
u32 initial_rcv_wnd;
|
||||
u8 ttl;
|
||||
u8 tos_or_tc;
|
||||
u16 remote_port;
|
||||
u16 local_port;
|
||||
u16 mss;
|
||||
u8 snd_wnd_scale;
|
||||
u8 rcv_wnd_scale;
|
||||
u16 da_timeout_value;
|
||||
u8 ack_frequency;
|
||||
};
|
||||
|
||||
struct qed_iscsi_params_update {
|
||||
u8 update_flag;
|
||||
#define QED_ISCSI_CONN_HD_EN BIT(0)
|
||||
#define QED_ISCSI_CONN_DD_EN BIT(1)
|
||||
#define QED_ISCSI_CONN_INITIAL_R2T BIT(2)
|
||||
#define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3)
|
||||
|
||||
u32 max_seq_size;
|
||||
u32 max_recv_pdu_length;
|
||||
u32 max_send_pdu_length;
|
||||
u32 first_seq_length;
|
||||
u32 exp_stat_sn;
|
||||
};
|
||||
|
||||
#define MAX_TID_BLOCKS_ISCSI (512)
|
||||
struct qed_iscsi_tid {
|
||||
u32 size; /* In bytes per task */
|
||||
u32 num_tids_per_block;
|
||||
u8 *blocks[MAX_TID_BLOCKS_ISCSI];
|
||||
};
|
||||
|
||||
struct qed_iscsi_cb_ops {
|
||||
struct qed_common_cb_ops common;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qed_iscsi_ops - qed iSCSI operations.
|
||||
* @common: common operations pointer
|
||||
* @ll2: light L2 operations pointer
|
||||
* @fill_dev_info: fills iSCSI specific information
|
||||
* @param cdev
|
||||
* @param info
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @register_ops: register iscsi operations
|
||||
* @param cdev
|
||||
* @param ops - specified using qed_iscsi_cb_ops
|
||||
* @param cookie - driver private
|
||||
* @start: iscsi in FW
|
||||
* @param cdev
|
||||
* @param tasks - qed will fill information about tasks
|
||||
* return 0 on success, otherwise error value.
|
||||
* @stop: iscsi in FW
|
||||
* @param cdev
|
||||
* return 0 on success, otherwise error value.
|
||||
* @acquire_conn: acquire a new iscsi connection
|
||||
* @param cdev
|
||||
* @param handle - qed will fill handle that should be
|
||||
* used henceforth as identifier of the
|
||||
* connection.
|
||||
* @param p_doorbell - qed will fill the address of the
|
||||
* doorbell.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @release_conn: release a previously acquired iscsi connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @offload_conn: configures an offloaded connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @param conn_info - the configuration to use for the
|
||||
* offload.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @update_conn: updates an offloaded connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @param conn_info - the configuration to use for the
|
||||
* offload.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @destroy_conn: stops an offloaded connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @clear_sq: clear all task in sq
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @get_stats: iSCSI related statistics
|
||||
* @param cdev
|
||||
* @param stats - pointer to struck that would be filled
|
||||
* we stats
|
||||
* @return 0 on success, error otherwise.
|
||||
* @change_mac: Change MAC of interface
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @param mac - new MAC to configure.
|
||||
* @return 0 on success, otherwise error value.
|
||||
*/
|
||||
struct qed_iscsi_ops {
|
||||
const struct qed_common_ops *common;
|
||||
|
||||
const struct qed_ll2_ops *ll2;
|
||||
|
||||
int (*fill_dev_info)(struct qed_dev *cdev,
|
||||
struct qed_dev_iscsi_info *info);
|
||||
|
||||
void (*register_ops)(struct qed_dev *cdev,
|
||||
struct qed_iscsi_cb_ops *ops, void *cookie);
|
||||
|
||||
int (*start)(struct qed_dev *cdev,
|
||||
struct qed_iscsi_tid *tasks,
|
||||
void *event_context, iscsi_event_cb_t async_event_cb);
|
||||
|
||||
int (*stop)(struct qed_dev *cdev);
|
||||
|
||||
int (*acquire_conn)(struct qed_dev *cdev,
|
||||
u32 *handle,
|
||||
u32 *fw_cid, void __iomem **p_doorbell);
|
||||
|
||||
int (*release_conn)(struct qed_dev *cdev, u32 handle);
|
||||
|
||||
int (*offload_conn)(struct qed_dev *cdev,
|
||||
u32 handle,
|
||||
struct qed_iscsi_params_offload *conn_info);
|
||||
|
||||
int (*update_conn)(struct qed_dev *cdev,
|
||||
u32 handle,
|
||||
struct qed_iscsi_params_update *conn_info);
|
||||
|
||||
int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
|
||||
|
||||
int (*clear_sq)(struct qed_dev *cdev, u32 handle);
|
||||
|
||||
int (*get_stats)(struct qed_dev *cdev,
|
||||
struct qed_iscsi_stats *stats);
|
||||
|
||||
int (*change_mac)(struct qed_dev *cdev, u32 handle, const u8 *mac);
|
||||
};
|
||||
|
||||
const struct qed_iscsi_ops *qed_get_iscsi_ops(void);
|
||||
void qed_put_iscsi_ops(void);
|
||||
#endif
|
287
linux-6.8.1/include/linux/qed/qed_ll2_if.h
Normal file
287
linux-6.8.1/include/linux/qed/qed_ll2_if.h
Normal file
|
@ -0,0 +1,287 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef _QED_LL2_IF_H
|
||||
#define _QED_LL2_IF_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/qed/qed_if.h>
|
||||
|
||||
enum qed_ll2_conn_type {
|
||||
QED_LL2_TYPE_FCOE,
|
||||
QED_LL2_TYPE_TCP_ULP,
|
||||
QED_LL2_TYPE_TEST,
|
||||
QED_LL2_TYPE_OOO,
|
||||
QED_LL2_TYPE_RESERVED2,
|
||||
QED_LL2_TYPE_ROCE,
|
||||
QED_LL2_TYPE_IWARP,
|
||||
QED_LL2_TYPE_RESERVED3,
|
||||
MAX_QED_LL2_CONN_TYPE
|
||||
};
|
||||
|
||||
enum qed_ll2_rx_conn_type {
|
||||
QED_LL2_RX_TYPE_LEGACY,
|
||||
QED_LL2_RX_TYPE_CTX,
|
||||
MAX_QED_LL2_RX_CONN_TYPE
|
||||
};
|
||||
|
||||
enum qed_ll2_roce_flavor_type {
|
||||
QED_LL2_ROCE,
|
||||
QED_LL2_RROCE,
|
||||
MAX_QED_LL2_ROCE_FLAVOR_TYPE
|
||||
};
|
||||
|
||||
enum qed_ll2_tx_dest {
|
||||
QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
|
||||
QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
|
||||
QED_LL2_TX_DEST_DROP, /* Light L2 Drop the TX packet */
|
||||
QED_LL2_TX_DEST_MAX
|
||||
};
|
||||
|
||||
enum qed_ll2_error_handle {
|
||||
QED_LL2_DROP_PACKET,
|
||||
QED_LL2_DO_NOTHING,
|
||||
QED_LL2_ASSERT,
|
||||
};
|
||||
|
||||
struct qed_ll2_stats {
|
||||
u64 gsi_invalid_hdr;
|
||||
u64 gsi_invalid_pkt_length;
|
||||
u64 gsi_unsupported_pkt_typ;
|
||||
u64 gsi_crcchksm_error;
|
||||
|
||||
u64 packet_too_big_discard;
|
||||
u64 no_buff_discard;
|
||||
|
||||
u64 rcv_ucast_bytes;
|
||||
u64 rcv_mcast_bytes;
|
||||
u64 rcv_bcast_bytes;
|
||||
u64 rcv_ucast_pkts;
|
||||
u64 rcv_mcast_pkts;
|
||||
u64 rcv_bcast_pkts;
|
||||
|
||||
u64 sent_ucast_bytes;
|
||||
u64 sent_mcast_bytes;
|
||||
u64 sent_bcast_bytes;
|
||||
u64 sent_ucast_pkts;
|
||||
u64 sent_mcast_pkts;
|
||||
u64 sent_bcast_pkts;
|
||||
};
|
||||
|
||||
struct qed_ll2_comp_rx_data {
|
||||
void *cookie;
|
||||
dma_addr_t rx_buf_addr;
|
||||
u16 parse_flags;
|
||||
u16 err_flags;
|
||||
u16 vlan;
|
||||
bool b_last_packet;
|
||||
u8 connection_handle;
|
||||
|
||||
union {
|
||||
u16 packet_length;
|
||||
u16 data_length;
|
||||
} length;
|
||||
|
||||
u32 opaque_data_0;
|
||||
u32 opaque_data_1;
|
||||
|
||||
/* GSI only */
|
||||
u32 src_qp;
|
||||
u16 qp_id;
|
||||
|
||||
union {
|
||||
u8 placement_offset;
|
||||
u8 data_length_error;
|
||||
} u;
|
||||
};
|
||||
|
||||
typedef
|
||||
void (*qed_ll2_complete_rx_packet_cb)(void *cxt,
|
||||
struct qed_ll2_comp_rx_data *data);
|
||||
|
||||
typedef
|
||||
void (*qed_ll2_release_rx_packet_cb)(void *cxt,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t rx_buf_addr,
|
||||
bool b_last_packet);
|
||||
|
||||
typedef
|
||||
void (*qed_ll2_complete_tx_packet_cb)(void *cxt,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment,
|
||||
bool b_last_packet);
|
||||
|
||||
typedef
|
||||
void (*qed_ll2_release_tx_packet_cb)(void *cxt,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment, bool b_last_packet);
|
||||
|
||||
typedef
|
||||
void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle,
|
||||
u32 opaque_data_0, u32 opaque_data_1);
|
||||
|
||||
struct qed_ll2_cbs {
|
||||
qed_ll2_complete_rx_packet_cb rx_comp_cb;
|
||||
qed_ll2_release_rx_packet_cb rx_release_cb;
|
||||
qed_ll2_complete_tx_packet_cb tx_comp_cb;
|
||||
qed_ll2_release_tx_packet_cb tx_release_cb;
|
||||
qed_ll2_slowpath_cb slowpath_cb;
|
||||
void *cookie;
|
||||
};
|
||||
|
||||
struct qed_ll2_acquire_data_inputs {
|
||||
enum qed_ll2_rx_conn_type rx_conn_type;
|
||||
enum qed_ll2_conn_type conn_type;
|
||||
u16 mtu;
|
||||
u16 rx_num_desc;
|
||||
u16 rx_num_ooo_buffers;
|
||||
u8 rx_drop_ttl0_flg;
|
||||
u8 rx_vlan_removal_en;
|
||||
u16 tx_num_desc;
|
||||
u8 tx_max_bds_per_packet;
|
||||
u8 tx_tc;
|
||||
enum qed_ll2_tx_dest tx_dest;
|
||||
enum qed_ll2_error_handle ai_err_packet_too_big;
|
||||
enum qed_ll2_error_handle ai_err_no_buf;
|
||||
bool secondary_queue;
|
||||
u8 gsi_enable;
|
||||
};
|
||||
|
||||
struct qed_ll2_acquire_data {
|
||||
struct qed_ll2_acquire_data_inputs input;
|
||||
const struct qed_ll2_cbs *cbs;
|
||||
|
||||
/* Output container for LL2 connection's handle */
|
||||
u8 *p_connection_handle;
|
||||
};
|
||||
|
||||
struct qed_ll2_tx_pkt_info {
|
||||
void *cookie;
|
||||
dma_addr_t first_frag;
|
||||
enum qed_ll2_tx_dest tx_dest;
|
||||
enum qed_ll2_roce_flavor_type qed_roce_flavor;
|
||||
u16 vlan;
|
||||
u16 l4_hdr_offset_w; /* from start of packet */
|
||||
u16 first_frag_len;
|
||||
u8 num_of_bds;
|
||||
u8 bd_flags;
|
||||
bool enable_ip_cksum;
|
||||
bool enable_l4_cksum;
|
||||
bool calc_ip_len;
|
||||
bool remove_stag;
|
||||
};
|
||||
|
||||
#define QED_LL2_UNUSED_HANDLE (0xff)
|
||||
|
||||
struct qed_ll2_cb_ops {
|
||||
int (*rx_cb)(void *, struct sk_buff *, u32, u32);
|
||||
int (*tx_cb)(void *, struct sk_buff *, bool);
|
||||
};
|
||||
|
||||
struct qed_ll2_params {
|
||||
u16 mtu;
|
||||
bool drop_ttl0_packets;
|
||||
bool rx_vlan_stripping;
|
||||
u8 tx_tc;
|
||||
bool frags_mapped;
|
||||
u8 ll2_mac_address[ETH_ALEN];
|
||||
};
|
||||
|
||||
enum qed_ll2_xmit_flags {
|
||||
/* FIP discovery packet */
|
||||
QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
|
||||
};
|
||||
|
||||
struct qed_ll2_ops {
|
||||
/**
|
||||
* start(): Initializes ll2.
|
||||
*
|
||||
* @cdev: Qed dev pointer.
|
||||
* @params: Protocol driver configuration for the ll2.
|
||||
*
|
||||
* Return: 0 on success, otherwise error value.
|
||||
*/
|
||||
int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
|
||||
|
||||
/**
|
||||
* stop(): Stops the ll2
|
||||
*
|
||||
* @cdev: Qed dev pointer.
|
||||
*
|
||||
* Return: 0 on success, otherwise error value.
|
||||
*/
|
||||
int (*stop)(struct qed_dev *cdev);
|
||||
|
||||
/**
|
||||
* start_xmit(): Transmits an skb over the ll2 interface
|
||||
*
|
||||
* @cdev: Qed dev pointer.
|
||||
* @skb: SKB.
|
||||
* @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags.
|
||||
*
|
||||
* Return: 0 on success, otherwise error value.
|
||||
*/
|
||||
int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
|
||||
unsigned long xmit_flags);
|
||||
|
||||
/**
|
||||
* register_cb_ops(): Protocol driver register the callback for Rx/Tx
|
||||
* packets. Should be called before `start'.
|
||||
*
|
||||
* @cdev: Qed dev pointer.
|
||||
* @cookie: to be passed to the callback functions.
|
||||
* @ops: the callback functions to register for Rx / Tx.
|
||||
*
|
||||
* Return: 0 on success, otherwise error value.
|
||||
*/
|
||||
void (*register_cb_ops)(struct qed_dev *cdev,
|
||||
const struct qed_ll2_cb_ops *ops,
|
||||
void *cookie);
|
||||
|
||||
/**
|
||||
* get_stats(): Get LL2 related statistics.
|
||||
*
|
||||
* @cdev: Qed dev pointer.
|
||||
* @stats: Pointer to struct that would be filled with stats.
|
||||
*
|
||||
* Return: 0 on success, error otherwise.
|
||||
*/
|
||||
int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_QED_LL2
|
||||
int qed_ll2_alloc_if(struct qed_dev *);
|
||||
void qed_ll2_dealloc_if(struct qed_dev *);
|
||||
#else
|
||||
static const struct qed_ll2_ops qed_ll2_ops_pass = {
|
||||
.start = NULL,
|
||||
.stop = NULL,
|
||||
.start_xmit = NULL,
|
||||
.register_cb_ops = NULL,
|
||||
.get_stats = NULL,
|
||||
};
|
||||
|
||||
static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif
|
257
linux-6.8.1/include/linux/qed/qed_nvmetcp_if.h
Normal file
257
linux-6.8.1/include/linux/qed/qed_nvmetcp_if.h
Normal file
|
@ -0,0 +1,257 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* Copyright 2021 Marvell. All rights reserved. */
|
||||
|
||||
#ifndef _QED_NVMETCP_IF_H
|
||||
#define _QED_NVMETCP_IF_H
|
||||
#include <linux/types.h>
|
||||
#include <linux/qed/qed_if.h>
|
||||
#include <linux/qed/storage_common.h>
|
||||
#include <linux/qed/nvmetcp_common.h>
|
||||
|
||||
#define QED_NVMETCP_MAX_IO_SIZE 0x800000
|
||||
#define QED_NVMETCP_CMN_HDR_SIZE (sizeof(struct nvme_tcp_hdr))
|
||||
#define QED_NVMETCP_CMD_HDR_SIZE (sizeof(struct nvme_tcp_cmd_pdu))
|
||||
#define QED_NVMETCP_NON_IO_HDR_SIZE ((QED_NVMETCP_CMN_HDR_SIZE + 16))
|
||||
|
||||
typedef int (*nvmetcp_event_cb_t) (void *context,
|
||||
u8 fw_event_code, void *fw_handle);
|
||||
|
||||
struct qed_dev_nvmetcp_info {
|
||||
struct qed_dev_info common;
|
||||
u8 port_id; /* Physical port */
|
||||
u8 num_cqs;
|
||||
};
|
||||
|
||||
#define MAX_TID_BLOCKS_NVMETCP (512)
|
||||
struct qed_nvmetcp_tid {
|
||||
u32 size; /* In bytes per task */
|
||||
u32 num_tids_per_block;
|
||||
u8 *blocks[MAX_TID_BLOCKS_NVMETCP];
|
||||
};
|
||||
|
||||
struct qed_nvmetcp_id_params {
|
||||
u8 mac[ETH_ALEN];
|
||||
u32 ip[4];
|
||||
u16 port;
|
||||
};
|
||||
|
||||
struct qed_nvmetcp_params_offload {
|
||||
/* FW initializations */
|
||||
dma_addr_t sq_pbl_addr;
|
||||
dma_addr_t nvmetcp_cccid_itid_table_addr;
|
||||
u16 nvmetcp_cccid_max_range;
|
||||
u8 default_cq;
|
||||
|
||||
/* Networking and TCP stack initializations */
|
||||
struct qed_nvmetcp_id_params src;
|
||||
struct qed_nvmetcp_id_params dst;
|
||||
u32 ka_timeout;
|
||||
u32 ka_interval;
|
||||
u32 max_rt_time;
|
||||
u32 cwnd;
|
||||
u16 mss;
|
||||
u16 vlan_id;
|
||||
bool timestamp_en;
|
||||
bool delayed_ack_en;
|
||||
bool tcp_keep_alive_en;
|
||||
bool ecn_en;
|
||||
u8 ip_version;
|
||||
u8 ka_max_probe_cnt;
|
||||
u8 ttl;
|
||||
u8 tos_or_tc;
|
||||
u8 rcv_wnd_scale;
|
||||
};
|
||||
|
||||
struct qed_nvmetcp_params_update {
|
||||
u32 max_io_size;
|
||||
u32 max_recv_pdu_length;
|
||||
u32 max_send_pdu_length;
|
||||
|
||||
/* Placeholder: pfv, cpda, hpda */
|
||||
|
||||
bool hdr_digest_en;
|
||||
bool data_digest_en;
|
||||
};
|
||||
|
||||
struct qed_nvmetcp_cb_ops {
|
||||
struct qed_common_cb_ops common;
|
||||
};
|
||||
|
||||
struct nvmetcp_sge {
|
||||
struct regpair sge_addr; /* SGE address */
|
||||
__le32 sge_len; /* SGE length */
|
||||
__le32 reserved;
|
||||
};
|
||||
|
||||
/* IO path HSI function SGL params */
|
||||
struct storage_sgl_task_params {
|
||||
struct nvmetcp_sge *sgl;
|
||||
struct regpair sgl_phys_addr;
|
||||
u32 total_buffer_size;
|
||||
u16 num_sges;
|
||||
bool small_mid_sge;
|
||||
};
|
||||
|
||||
/* IO path HSI function FW task context params */
|
||||
struct nvmetcp_task_params {
|
||||
void *context; /* Output parameter - set/filled by the HSI function */
|
||||
struct nvmetcp_wqe *sqe;
|
||||
u32 tx_io_size; /* in bytes (Without DIF, if exists) */
|
||||
u32 rx_io_size; /* in bytes (Without DIF, if exists) */
|
||||
u16 conn_icid;
|
||||
u16 itid;
|
||||
struct regpair opq; /* qedn_task_ctx address */
|
||||
u16 host_cccid;
|
||||
u8 cq_rss_number;
|
||||
bool send_write_incapsule;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qed_nvmetcp_ops - qed NVMeTCP operations.
|
||||
* @common: common operations pointer
|
||||
* @ll2: light L2 operations pointer
|
||||
* @fill_dev_info: fills NVMeTCP specific information
|
||||
* @param cdev
|
||||
* @param info
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @register_ops: register nvmetcp operations
|
||||
* @param cdev
|
||||
* @param ops - specified using qed_nvmetcp_cb_ops
|
||||
* @param cookie - driver private
|
||||
* @start: nvmetcp in FW
|
||||
* @param cdev
|
||||
* @param tasks - qed will fill information about tasks
|
||||
* return 0 on success, otherwise error value.
|
||||
* @stop: nvmetcp in FW
|
||||
* @param cdev
|
||||
* return 0 on success, otherwise error value.
|
||||
* @acquire_conn: acquire a new nvmetcp connection
|
||||
* @param cdev
|
||||
* @param handle - qed will fill handle that should be
|
||||
* used henceforth as identifier of the
|
||||
* connection.
|
||||
* @param p_doorbell - qed will fill the address of the
|
||||
* doorbell.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @release_conn: release a previously acquired nvmetcp connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @offload_conn: configures an offloaded connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @param conn_info - the configuration to use for the
|
||||
* offload.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @update_conn: updates an offloaded connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @param conn_info - the configuration to use for the
|
||||
* offload.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @destroy_conn: stops an offloaded connection
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @clear_sq: clear all task in sq
|
||||
* @param cdev
|
||||
* @param handle - the connection handle.
|
||||
* @return 0 on success, otherwise error value.
|
||||
* @add_src_tcp_port_filter: Add source tcp port filter
|
||||
* @param cdev
|
||||
* @param src_port
|
||||
* @remove_src_tcp_port_filter: Remove source tcp port filter
|
||||
* @param cdev
|
||||
* @param src_port
|
||||
* @add_dst_tcp_port_filter: Add destination tcp port filter
|
||||
* @param cdev
|
||||
* @param dest_port
|
||||
* @remove_dst_tcp_port_filter: Remove destination tcp port filter
|
||||
* @param cdev
|
||||
* @param dest_port
|
||||
* @clear_all_filters: Clear all filters.
|
||||
* @param cdev
|
||||
* @init_read_io: Init read IO.
|
||||
* @task_params
|
||||
* @cmd_pdu_header
|
||||
* @nvme_cmd
|
||||
* @sgl_task_params
|
||||
* @init_write_io: Init write IO.
|
||||
* @task_params
|
||||
* @cmd_pdu_header
|
||||
* @nvme_cmd
|
||||
* @sgl_task_params
|
||||
* @init_icreq_exchange: Exchange ICReq.
|
||||
* @task_params
|
||||
* @init_conn_req_pdu_hdr
|
||||
* @tx_sgl_task_params
|
||||
* @rx_sgl_task_params
|
||||
* @init_task_cleanup: Init task cleanup.
|
||||
* @task_params
|
||||
*/
|
||||
struct qed_nvmetcp_ops {
|
||||
const struct qed_common_ops *common;
|
||||
|
||||
const struct qed_ll2_ops *ll2;
|
||||
|
||||
int (*fill_dev_info)(struct qed_dev *cdev,
|
||||
struct qed_dev_nvmetcp_info *info);
|
||||
|
||||
void (*register_ops)(struct qed_dev *cdev,
|
||||
struct qed_nvmetcp_cb_ops *ops, void *cookie);
|
||||
|
||||
int (*start)(struct qed_dev *cdev,
|
||||
struct qed_nvmetcp_tid *tasks,
|
||||
void *event_context, nvmetcp_event_cb_t async_event_cb);
|
||||
|
||||
int (*stop)(struct qed_dev *cdev);
|
||||
|
||||
int (*acquire_conn)(struct qed_dev *cdev,
|
||||
u32 *handle,
|
||||
u32 *fw_cid, void __iomem **p_doorbell);
|
||||
|
||||
int (*release_conn)(struct qed_dev *cdev, u32 handle);
|
||||
|
||||
int (*offload_conn)(struct qed_dev *cdev,
|
||||
u32 handle,
|
||||
struct qed_nvmetcp_params_offload *conn_info);
|
||||
|
||||
int (*update_conn)(struct qed_dev *cdev,
|
||||
u32 handle,
|
||||
struct qed_nvmetcp_params_update *conn_info);
|
||||
|
||||
int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
|
||||
|
||||
int (*clear_sq)(struct qed_dev *cdev, u32 handle);
|
||||
|
||||
int (*add_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
|
||||
|
||||
void (*remove_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
|
||||
|
||||
int (*add_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
|
||||
|
||||
void (*remove_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
|
||||
|
||||
void (*clear_all_filters)(struct qed_dev *cdev);
|
||||
|
||||
void (*init_read_io)(struct nvmetcp_task_params *task_params,
|
||||
struct nvme_tcp_cmd_pdu *cmd_pdu_header,
|
||||
struct nvme_command *nvme_cmd,
|
||||
struct storage_sgl_task_params *sgl_task_params);
|
||||
|
||||
void (*init_write_io)(struct nvmetcp_task_params *task_params,
|
||||
struct nvme_tcp_cmd_pdu *cmd_pdu_header,
|
||||
struct nvme_command *nvme_cmd,
|
||||
struct storage_sgl_task_params *sgl_task_params);
|
||||
|
||||
void (*init_icreq_exchange)(struct nvmetcp_task_params *task_params,
|
||||
struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
|
||||
struct storage_sgl_task_params *tx_sgl_task_params,
|
||||
struct storage_sgl_task_params *rx_sgl_task_params);
|
||||
|
||||
void (*init_task_cleanup)(struct nvmetcp_task_params *task_params);
|
||||
};
|
||||
|
||||
const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void);
|
||||
void qed_put_nvmetcp_ops(void);
|
||||
#endif
|
692
linux-6.8.1/include/linux/qed/qed_rdma_if.h
Normal file
692
linux-6.8.1/include/linux/qed/qed_rdma_if.h
Normal file
|
@ -0,0 +1,692 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef _QED_RDMA_IF_H
|
||||
#define _QED_RDMA_IF_H
|
||||
#include <linux/types.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/qed/qed_if.h>
|
||||
#include <linux/qed/qed_ll2_if.h>
|
||||
#include <linux/qed/rdma_common.h>
|
||||
|
||||
#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
|
||||
|
||||
/* rdma interface */
|
||||
|
||||
enum qed_roce_qp_state {
|
||||
QED_ROCE_QP_STATE_RESET,
|
||||
QED_ROCE_QP_STATE_INIT,
|
||||
QED_ROCE_QP_STATE_RTR,
|
||||
QED_ROCE_QP_STATE_RTS,
|
||||
QED_ROCE_QP_STATE_SQD,
|
||||
QED_ROCE_QP_STATE_ERR,
|
||||
QED_ROCE_QP_STATE_SQE
|
||||
};
|
||||
|
||||
enum qed_rdma_qp_type {
|
||||
QED_RDMA_QP_TYPE_RC,
|
||||
QED_RDMA_QP_TYPE_XRC_INI,
|
||||
QED_RDMA_QP_TYPE_XRC_TGT,
|
||||
QED_RDMA_QP_TYPE_INVAL = 0xffff,
|
||||
};
|
||||
|
||||
enum qed_rdma_tid_type {
|
||||
QED_RDMA_TID_REGISTERED_MR,
|
||||
QED_RDMA_TID_FMR,
|
||||
QED_RDMA_TID_MW
|
||||
};
|
||||
|
||||
struct qed_rdma_events {
|
||||
void *context;
|
||||
void (*affiliated_event)(void *context, u8 fw_event_code,
|
||||
void *fw_handle);
|
||||
void (*unaffiliated_event)(void *context, u8 event_code);
|
||||
};
|
||||
|
||||
struct qed_rdma_device {
|
||||
u32 vendor_id;
|
||||
u32 vendor_part_id;
|
||||
u32 hw_ver;
|
||||
u64 fw_ver;
|
||||
|
||||
u64 node_guid;
|
||||
u64 sys_image_guid;
|
||||
|
||||
u8 max_cnq;
|
||||
u8 max_sge;
|
||||
u8 max_srq_sge;
|
||||
u16 max_inline;
|
||||
u32 max_wqe;
|
||||
u32 max_srq_wqe;
|
||||
u8 max_qp_resp_rd_atomic_resc;
|
||||
u8 max_qp_req_rd_atomic_resc;
|
||||
u64 max_dev_resp_rd_atomic_resc;
|
||||
u32 max_cq;
|
||||
u32 max_qp;
|
||||
u32 max_srq;
|
||||
u32 max_mr;
|
||||
u64 max_mr_size;
|
||||
u32 max_cqe;
|
||||
u32 max_mw;
|
||||
u32 max_mr_mw_fmr_pbl;
|
||||
u64 max_mr_mw_fmr_size;
|
||||
u32 max_pd;
|
||||
u32 max_ah;
|
||||
u8 max_pkey;
|
||||
u16 max_srq_wr;
|
||||
u8 max_stats_queues;
|
||||
u32 dev_caps;
|
||||
|
||||
/* Abilty to support RNR-NAK generation */
|
||||
|
||||
#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
|
||||
/* Abilty to support shutdown port */
|
||||
#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
|
||||
/* Abilty to support port active event */
|
||||
#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
|
||||
/* Abilty to support port change event */
|
||||
#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
|
||||
/* Abilty to support system image GUID */
|
||||
#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
|
||||
/* Abilty to support bad P_Key counter support */
|
||||
#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
|
||||
/* Abilty to support atomic operations */
|
||||
#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
|
||||
#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
|
||||
/* Abilty to support modifying the maximum number of
|
||||
* outstanding work requests per QP
|
||||
*/
|
||||
#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
|
||||
/* Abilty to support automatic path migration */
|
||||
#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
|
||||
/* Abilty to support the base memory management extensions */
|
||||
#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
|
||||
#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
|
||||
/* Abilty to support multipile page sizes per memory region */
|
||||
#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
|
||||
/* Abilty to support block list physical buffer list */
|
||||
#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
|
||||
/* Abilty to support zero based virtual addresses */
|
||||
#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14
|
||||
/* Abilty to support local invalidate fencing */
|
||||
#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
|
||||
/* Abilty to support Loopback on QP */
|
||||
#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
|
||||
#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
|
||||
u64 page_size_caps;
|
||||
u8 dev_ack_delay;
|
||||
u32 reserved_lkey;
|
||||
u32 bad_pkey_counter;
|
||||
struct qed_rdma_events events;
|
||||
};
|
||||
|
||||
enum qed_port_state {
|
||||
QED_RDMA_PORT_UP,
|
||||
QED_RDMA_PORT_DOWN,
|
||||
};
|
||||
|
||||
enum qed_roce_capability {
|
||||
QED_ROCE_V1 = 1 << 0,
|
||||
QED_ROCE_V2 = 1 << 1,
|
||||
};
|
||||
|
||||
struct qed_rdma_port {
|
||||
enum qed_port_state port_state;
|
||||
int link_speed;
|
||||
u64 max_msg_size;
|
||||
u8 source_gid_table_len;
|
||||
void *source_gid_table_ptr;
|
||||
u8 pkey_table_len;
|
||||
void *pkey_table_ptr;
|
||||
u32 pkey_bad_counter;
|
||||
enum qed_roce_capability capability;
|
||||
};
|
||||
|
||||
struct qed_rdma_cnq_params {
|
||||
u8 num_pbl_pages;
|
||||
u64 pbl_ptr;
|
||||
};
|
||||
|
||||
/* The CQ Mode affects the CQ doorbell transaction size.
|
||||
* 64/32 bit machines should configure to 32/16 bits respectively.
|
||||
*/
|
||||
enum qed_rdma_cq_mode {
|
||||
QED_RDMA_CQ_MODE_16_BITS,
|
||||
QED_RDMA_CQ_MODE_32_BITS,
|
||||
};
|
||||
|
||||
struct qed_roce_dcqcn_params {
|
||||
u8 notification_point;
|
||||
u8 reaction_point;
|
||||
|
||||
/* fields for notification point */
|
||||
u32 cnp_send_timeout;
|
||||
|
||||
/* fields for reaction point */
|
||||
u32 rl_bc_rate;
|
||||
u16 rl_max_rate;
|
||||
u16 rl_r_ai;
|
||||
u16 rl_r_hai;
|
||||
u16 dcqcn_g;
|
||||
u32 dcqcn_k_us;
|
||||
u32 dcqcn_timeout_us;
|
||||
};
|
||||
|
||||
struct qed_rdma_start_in_params {
|
||||
struct qed_rdma_events *events;
|
||||
struct qed_rdma_cnq_params cnq_pbl_list[128];
|
||||
u8 desired_cnq;
|
||||
enum qed_rdma_cq_mode cq_mode;
|
||||
struct qed_roce_dcqcn_params dcqcn_params;
|
||||
u16 max_mtu;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
u8 iwarp_flags;
|
||||
};
|
||||
|
||||
struct qed_rdma_add_user_out_params {
|
||||
u16 dpi;
|
||||
void __iomem *dpi_addr;
|
||||
u64 dpi_phys_addr;
|
||||
u32 dpi_size;
|
||||
u16 wid_count;
|
||||
};
|
||||
|
||||
enum roce_mode {
|
||||
ROCE_V1,
|
||||
ROCE_V2_IPV4,
|
||||
ROCE_V2_IPV6,
|
||||
MAX_ROCE_MODE
|
||||
};
|
||||
|
||||
union qed_gid {
|
||||
u8 bytes[16];
|
||||
u16 words[8];
|
||||
u32 dwords[4];
|
||||
u64 qwords[2];
|
||||
u32 ipv4_addr;
|
||||
};
|
||||
|
||||
struct qed_rdma_register_tid_in_params {
|
||||
u32 itid;
|
||||
enum qed_rdma_tid_type tid_type;
|
||||
u8 key;
|
||||
u16 pd;
|
||||
bool local_read;
|
||||
bool local_write;
|
||||
bool remote_read;
|
||||
bool remote_write;
|
||||
bool remote_atomic;
|
||||
bool mw_bind;
|
||||
u64 pbl_ptr;
|
||||
bool pbl_two_level;
|
||||
u8 pbl_page_size_log;
|
||||
u8 page_size_log;
|
||||
u64 length;
|
||||
u64 vaddr;
|
||||
bool phy_mr;
|
||||
bool dma_mr;
|
||||
|
||||
bool dif_enabled;
|
||||
u64 dif_error_addr;
|
||||
};
|
||||
|
||||
struct qed_rdma_create_cq_in_params {
|
||||
u32 cq_handle_lo;
|
||||
u32 cq_handle_hi;
|
||||
u32 cq_size;
|
||||
u16 dpi;
|
||||
bool pbl_two_level;
|
||||
u64 pbl_ptr;
|
||||
u16 pbl_num_pages;
|
||||
u8 pbl_page_size_log;
|
||||
u8 cnq_id;
|
||||
u16 int_timeout;
|
||||
};
|
||||
|
||||
struct qed_rdma_create_srq_in_params {
|
||||
u64 pbl_base_addr;
|
||||
u64 prod_pair_addr;
|
||||
u16 num_pages;
|
||||
u16 pd_id;
|
||||
u16 page_size;
|
||||
|
||||
/* XRC related only */
|
||||
bool reserved_key_en;
|
||||
bool is_xrc;
|
||||
u32 cq_cid;
|
||||
u16 xrcd_id;
|
||||
};
|
||||
|
||||
struct qed_rdma_destroy_cq_in_params {
|
||||
u16 icid;
|
||||
};
|
||||
|
||||
struct qed_rdma_destroy_cq_out_params {
|
||||
u16 num_cq_notif;
|
||||
};
|
||||
|
||||
struct qed_rdma_create_qp_in_params {
|
||||
u32 qp_handle_lo;
|
||||
u32 qp_handle_hi;
|
||||
u32 qp_handle_async_lo;
|
||||
u32 qp_handle_async_hi;
|
||||
bool use_srq;
|
||||
bool signal_all;
|
||||
bool fmr_and_reserved_lkey;
|
||||
u16 pd;
|
||||
u16 dpi;
|
||||
u16 sq_cq_id;
|
||||
u16 sq_num_pages;
|
||||
u64 sq_pbl_ptr;
|
||||
u8 max_sq_sges;
|
||||
u16 rq_cq_id;
|
||||
u16 rq_num_pages;
|
||||
u64 rq_pbl_ptr;
|
||||
u16 srq_id;
|
||||
u16 xrcd_id;
|
||||
u8 stats_queue;
|
||||
enum qed_rdma_qp_type qp_type;
|
||||
u8 flags;
|
||||
#define QED_ROCE_EDPM_MODE_MASK 0x1
|
||||
#define QED_ROCE_EDPM_MODE_SHIFT 0
|
||||
};
|
||||
|
||||
struct qed_rdma_create_qp_out_params {
|
||||
u32 qp_id;
|
||||
u16 icid;
|
||||
void *rq_pbl_virt;
|
||||
dma_addr_t rq_pbl_phys;
|
||||
void *sq_pbl_virt;
|
||||
dma_addr_t sq_pbl_phys;
|
||||
};
|
||||
|
||||
struct qed_rdma_modify_qp_in_params {
|
||||
u32 modify_flags;
|
||||
#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
|
||||
#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
|
||||
#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
|
||||
#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
|
||||
#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
|
||||
#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
|
||||
#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
|
||||
#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
|
||||
#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
|
||||
#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
|
||||
#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
|
||||
#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
|
||||
#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
|
||||
#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
|
||||
#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
|
||||
#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
|
||||
#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
|
||||
#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
|
||||
#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
|
||||
#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
|
||||
|
||||
enum qed_roce_qp_state new_state;
|
||||
u16 pkey;
|
||||
bool incoming_rdma_read_en;
|
||||
bool incoming_rdma_write_en;
|
||||
bool incoming_atomic_en;
|
||||
bool e2e_flow_control_en;
|
||||
u32 dest_qp;
|
||||
bool lb_indication;
|
||||
u16 mtu;
|
||||
u8 traffic_class_tos;
|
||||
u8 hop_limit_ttl;
|
||||
u32 flow_label;
|
||||
union qed_gid sgid;
|
||||
union qed_gid dgid;
|
||||
u16 udp_src_port;
|
||||
|
||||
u16 vlan_id;
|
||||
|
||||
u32 rq_psn;
|
||||
u32 sq_psn;
|
||||
u8 max_rd_atomic_resp;
|
||||
u8 max_rd_atomic_req;
|
||||
u32 ack_timeout;
|
||||
u8 retry_cnt;
|
||||
u8 rnr_retry_cnt;
|
||||
u8 min_rnr_nak_timer;
|
||||
bool sqd_async;
|
||||
u8 remote_mac_addr[6];
|
||||
u8 local_mac_addr[6];
|
||||
bool use_local_mac;
|
||||
enum roce_mode roce_mode;
|
||||
};
|
||||
|
||||
struct qed_rdma_query_qp_out_params {
|
||||
enum qed_roce_qp_state state;
|
||||
u32 rq_psn;
|
||||
u32 sq_psn;
|
||||
bool draining;
|
||||
u16 mtu;
|
||||
u32 dest_qp;
|
||||
bool incoming_rdma_read_en;
|
||||
bool incoming_rdma_write_en;
|
||||
bool incoming_atomic_en;
|
||||
bool e2e_flow_control_en;
|
||||
union qed_gid sgid;
|
||||
union qed_gid dgid;
|
||||
u32 flow_label;
|
||||
u8 hop_limit_ttl;
|
||||
u8 traffic_class_tos;
|
||||
u32 timeout;
|
||||
u8 rnr_retry;
|
||||
u8 retry_cnt;
|
||||
u8 min_rnr_nak_timer;
|
||||
u16 pkey_index;
|
||||
u8 max_rd_atomic;
|
||||
u8 max_dest_rd_atomic;
|
||||
bool sqd_async;
|
||||
};
|
||||
|
||||
struct qed_rdma_create_srq_out_params {
|
||||
u16 srq_id;
|
||||
};
|
||||
|
||||
struct qed_rdma_destroy_srq_in_params {
|
||||
u16 srq_id;
|
||||
bool is_xrc;
|
||||
};
|
||||
|
||||
struct qed_rdma_modify_srq_in_params {
|
||||
u32 wqe_limit;
|
||||
u16 srq_id;
|
||||
bool is_xrc;
|
||||
};
|
||||
|
||||
struct qed_rdma_stats_out_params {
|
||||
u64 sent_bytes;
|
||||
u64 sent_pkts;
|
||||
u64 rcv_bytes;
|
||||
u64 rcv_pkts;
|
||||
};
|
||||
|
||||
struct qed_rdma_counters_out_params {
|
||||
u64 pd_count;
|
||||
u64 max_pd;
|
||||
u64 dpi_count;
|
||||
u64 max_dpi;
|
||||
u64 cq_count;
|
||||
u64 max_cq;
|
||||
u64 qp_count;
|
||||
u64 max_qp;
|
||||
u64 tid_count;
|
||||
u64 max_tid;
|
||||
};
|
||||
|
||||
#define QED_ROCE_TX_HEAD_FAILURE (1)
|
||||
#define QED_ROCE_TX_FRAG_FAILURE (2)
|
||||
|
||||
enum qed_iwarp_event_type {
|
||||
QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
|
||||
QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */
|
||||
QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
|
||||
QED_IWARP_EVENT_DISCONNECT,
|
||||
QED_IWARP_EVENT_CLOSE,
|
||||
QED_IWARP_EVENT_IRQ_FULL,
|
||||
QED_IWARP_EVENT_RQ_EMPTY,
|
||||
QED_IWARP_EVENT_LLP_TIMEOUT,
|
||||
QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
|
||||
QED_IWARP_EVENT_CQ_OVERFLOW,
|
||||
QED_IWARP_EVENT_QP_CATASTROPHIC,
|
||||
QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
|
||||
QED_IWARP_EVENT_LOCAL_ACCESS_ERROR,
|
||||
QED_IWARP_EVENT_REMOTE_OPERATION_ERROR,
|
||||
QED_IWARP_EVENT_TERMINATE_RECEIVED,
|
||||
QED_IWARP_EVENT_SRQ_LIMIT,
|
||||
QED_IWARP_EVENT_SRQ_EMPTY,
|
||||
};
|
||||
|
||||
enum qed_tcp_ip_version {
|
||||
QED_TCP_IPV4,
|
||||
QED_TCP_IPV6,
|
||||
};
|
||||
|
||||
struct qed_iwarp_cm_info {
|
||||
enum qed_tcp_ip_version ip_version;
|
||||
u32 remote_ip[4];
|
||||
u32 local_ip[4];
|
||||
u16 remote_port;
|
||||
u16 local_port;
|
||||
u16 vlan;
|
||||
u8 ord;
|
||||
u8 ird;
|
||||
u16 private_data_len;
|
||||
const void *private_data;
|
||||
};
|
||||
|
||||
struct qed_iwarp_cm_event_params {
|
||||
enum qed_iwarp_event_type event;
|
||||
const struct qed_iwarp_cm_info *cm_info;
|
||||
void *ep_context; /* To be passed to accept call */
|
||||
int status;
|
||||
};
|
||||
|
||||
typedef int (*iwarp_event_handler) (void *context,
|
||||
struct qed_iwarp_cm_event_params *event);
|
||||
|
||||
struct qed_iwarp_connect_in {
|
||||
iwarp_event_handler event_cb;
|
||||
void *cb_context;
|
||||
struct qed_rdma_qp *qp;
|
||||
struct qed_iwarp_cm_info cm_info;
|
||||
u16 mss;
|
||||
u8 remote_mac_addr[ETH_ALEN];
|
||||
u8 local_mac_addr[ETH_ALEN];
|
||||
};
|
||||
|
||||
struct qed_iwarp_connect_out {
|
||||
void *ep_context;
|
||||
};
|
||||
|
||||
struct qed_iwarp_listen_in {
|
||||
iwarp_event_handler event_cb;
|
||||
void *cb_context; /* passed to event_cb */
|
||||
u32 max_backlog;
|
||||
enum qed_tcp_ip_version ip_version;
|
||||
u32 ip_addr[4];
|
||||
u16 port;
|
||||
u16 vlan;
|
||||
};
|
||||
|
||||
struct qed_iwarp_listen_out {
|
||||
void *handle;
|
||||
};
|
||||
|
||||
struct qed_iwarp_accept_in {
|
||||
void *ep_context;
|
||||
void *cb_context;
|
||||
struct qed_rdma_qp *qp;
|
||||
const void *private_data;
|
||||
u16 private_data_len;
|
||||
u8 ord;
|
||||
u8 ird;
|
||||
};
|
||||
|
||||
struct qed_iwarp_reject_in {
|
||||
void *ep_context;
|
||||
void *cb_context;
|
||||
const void *private_data;
|
||||
u16 private_data_len;
|
||||
};
|
||||
|
||||
struct qed_iwarp_send_rtr_in {
|
||||
void *ep_context;
|
||||
};
|
||||
|
||||
struct qed_roce_ll2_header {
|
||||
void *vaddr;
|
||||
dma_addr_t baddr;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
struct qed_roce_ll2_buffer {
|
||||
dma_addr_t baddr;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
struct qed_roce_ll2_packet {
|
||||
struct qed_roce_ll2_header header;
|
||||
int n_seg;
|
||||
struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
|
||||
int roce_mode;
|
||||
enum qed_ll2_tx_dest tx_dest;
|
||||
};
|
||||
|
||||
enum qed_rdma_type {
|
||||
QED_RDMA_TYPE_ROCE,
|
||||
QED_RDMA_TYPE_IWARP
|
||||
};
|
||||
|
||||
struct qed_dev_rdma_info {
|
||||
struct qed_dev_info common;
|
||||
enum qed_rdma_type rdma_type;
|
||||
u8 user_dpm_enabled;
|
||||
};
|
||||
|
||||
struct qed_rdma_ops {
|
||||
const struct qed_common_ops *common;
|
||||
|
||||
int (*fill_dev_info)(struct qed_dev *cdev,
|
||||
struct qed_dev_rdma_info *info);
|
||||
void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
|
||||
|
||||
int (*rdma_init)(struct qed_dev *dev,
|
||||
struct qed_rdma_start_in_params *iparams);
|
||||
|
||||
int (*rdma_add_user)(void *rdma_cxt,
|
||||
struct qed_rdma_add_user_out_params *oparams);
|
||||
|
||||
void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
|
||||
int (*rdma_stop)(void *rdma_cxt);
|
||||
struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
|
||||
struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
|
||||
int (*rdma_get_start_sb)(struct qed_dev *cdev);
|
||||
int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
|
||||
void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
|
||||
int (*rdma_get_rdma_int)(struct qed_dev *cdev,
|
||||
struct qed_int_info *info);
|
||||
int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
|
||||
int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
|
||||
void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
|
||||
int (*rdma_alloc_xrcd)(void *rdma_cxt, u16 *xrcd);
|
||||
void (*rdma_dealloc_xrcd)(void *rdma_cxt, u16 xrcd);
|
||||
int (*rdma_create_cq)(void *rdma_cxt,
|
||||
struct qed_rdma_create_cq_in_params *params,
|
||||
u16 *icid);
|
||||
int (*rdma_destroy_cq)(void *rdma_cxt,
|
||||
struct qed_rdma_destroy_cq_in_params *iparams,
|
||||
struct qed_rdma_destroy_cq_out_params *oparams);
|
||||
struct qed_rdma_qp *
|
||||
(*rdma_create_qp)(void *rdma_cxt,
|
||||
struct qed_rdma_create_qp_in_params *iparams,
|
||||
struct qed_rdma_create_qp_out_params *oparams);
|
||||
|
||||
int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_modify_qp_in_params *iparams);
|
||||
|
||||
int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_query_qp_out_params *oparams);
|
||||
int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
|
||||
|
||||
int
|
||||
(*rdma_register_tid)(void *rdma_cxt,
|
||||
struct qed_rdma_register_tid_in_params *iparams);
|
||||
|
||||
int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
|
||||
int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
|
||||
void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
|
||||
|
||||
int (*rdma_create_srq)(void *rdma_cxt,
|
||||
struct qed_rdma_create_srq_in_params *iparams,
|
||||
struct qed_rdma_create_srq_out_params *oparams);
|
||||
int (*rdma_destroy_srq)(void *rdma_cxt,
|
||||
struct qed_rdma_destroy_srq_in_params *iparams);
|
||||
int (*rdma_modify_srq)(void *rdma_cxt,
|
||||
struct qed_rdma_modify_srq_in_params *iparams);
|
||||
|
||||
int (*ll2_acquire_connection)(void *rdma_cxt,
|
||||
struct qed_ll2_acquire_data *data);
|
||||
|
||||
int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle);
|
||||
int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle);
|
||||
void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle);
|
||||
|
||||
int (*ll2_prepare_tx_packet)(void *rdma_cxt,
|
||||
u8 connection_handle,
|
||||
struct qed_ll2_tx_pkt_info *pkt,
|
||||
bool notify_fw);
|
||||
|
||||
int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt,
|
||||
u8 connection_handle,
|
||||
dma_addr_t addr,
|
||||
u16 nbytes);
|
||||
int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle,
|
||||
dma_addr_t addr, u16 buf_len, void *cookie,
|
||||
u8 notify_fw);
|
||||
int (*ll2_get_stats)(void *rdma_cxt,
|
||||
u8 connection_handle,
|
||||
struct qed_ll2_stats *p_stats);
|
||||
int (*ll2_set_mac_filter)(struct qed_dev *cdev,
|
||||
u8 *old_mac_address,
|
||||
const u8 *new_mac_address);
|
||||
|
||||
int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset);
|
||||
|
||||
int (*iwarp_connect)(void *rdma_cxt,
|
||||
struct qed_iwarp_connect_in *iparams,
|
||||
struct qed_iwarp_connect_out *oparams);
|
||||
|
||||
int (*iwarp_create_listen)(void *rdma_cxt,
|
||||
struct qed_iwarp_listen_in *iparams,
|
||||
struct qed_iwarp_listen_out *oparams);
|
||||
|
||||
int (*iwarp_accept)(void *rdma_cxt,
|
||||
struct qed_iwarp_accept_in *iparams);
|
||||
|
||||
int (*iwarp_reject)(void *rdma_cxt,
|
||||
struct qed_iwarp_reject_in *iparams);
|
||||
|
||||
int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle);
|
||||
|
||||
int (*iwarp_send_rtr)(void *rdma_cxt,
|
||||
struct qed_iwarp_send_rtr_in *iparams);
|
||||
};
|
||||
|
||||
const struct qed_rdma_ops *qed_get_rdma_ops(void);
|
||||
|
||||
#endif
|
73
linux-6.8.1/include/linux/qed/qede_rdma.h
Normal file
73
linux-6.8.1/include/linux/qed/qede_rdma.h
Normal file
|
@ -0,0 +1,73 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qedr NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef QEDE_ROCE_H
|
||||
#define QEDE_ROCE_H
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct qedr_dev;
|
||||
struct qed_dev;
|
||||
struct qede_dev;
|
||||
|
||||
enum qede_rdma_event {
|
||||
QEDE_UP,
|
||||
QEDE_DOWN,
|
||||
QEDE_CHANGE_ADDR,
|
||||
QEDE_CLOSE,
|
||||
QEDE_CHANGE_MTU,
|
||||
};
|
||||
|
||||
struct qede_rdma_event_work {
|
||||
struct list_head list;
|
||||
struct work_struct work;
|
||||
void *ptr;
|
||||
enum qede_rdma_event event;
|
||||
};
|
||||
|
||||
struct qedr_driver {
|
||||
unsigned char name[32];
|
||||
|
||||
struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *,
|
||||
struct net_device *);
|
||||
|
||||
void (*remove)(struct qedr_dev *);
|
||||
void (*notify)(struct qedr_dev *, enum qede_rdma_event);
|
||||
};
|
||||
|
||||
/* APIs for RDMA driver to register callback handlers,
|
||||
* which will be invoked when device is added, removed, ifup, ifdown
|
||||
*/
|
||||
int qede_rdma_register_driver(struct qedr_driver *drv);
|
||||
void qede_rdma_unregister_driver(struct qedr_driver *drv);
|
||||
|
||||
bool qede_rdma_supported(struct qede_dev *dev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_QED_RDMA)
|
||||
int qede_rdma_dev_add(struct qede_dev *dev, bool recovery);
|
||||
void qede_rdma_dev_event_open(struct qede_dev *dev);
|
||||
void qede_rdma_dev_event_close(struct qede_dev *dev);
|
||||
void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery);
|
||||
void qede_rdma_event_changeaddr(struct qede_dev *edr);
|
||||
void qede_rdma_event_change_mtu(struct qede_dev *edev);
|
||||
|
||||
#else
|
||||
static inline int qede_rdma_dev_add(struct qede_dev *dev,
|
||||
bool recovery)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {}
|
||||
static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {}
|
||||
static inline void qede_rdma_dev_remove(struct qede_dev *dev,
|
||||
bool recovery) {}
|
||||
static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {}
|
||||
#endif
|
||||
#endif
|
48
linux-6.8.1/include/linux/qed/rdma_common.h
Normal file
48
linux-6.8.1/include/linux/qed/rdma_common.h
Normal file
|
@ -0,0 +1,48 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __RDMA_COMMON__
|
||||
#define __RDMA_COMMON__
|
||||
|
||||
/************************/
|
||||
/* RDMA FW CONSTANTS */
|
||||
/************************/
|
||||
|
||||
#define RDMA_RESERVED_LKEY (0)
|
||||
#define RDMA_RING_PAGE_SIZE (0x1000)
|
||||
|
||||
#define RDMA_MAX_SGE_PER_SQ_WQE (4)
|
||||
#define RDMA_MAX_SGE_PER_RQ_WQE (4)
|
||||
|
||||
#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000)
|
||||
|
||||
#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
|
||||
#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
|
||||
|
||||
#define RDMA_MAX_CQS (64 * 1024)
|
||||
#define RDMA_MAX_TIDS (128 * 1024 - 1)
|
||||
#define RDMA_MAX_PDS (64 * 1024)
|
||||
#define RDMA_MAX_XRC_SRQS (1024)
|
||||
#define RDMA_MAX_SRQS (32 * 1024)
|
||||
#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128)
|
||||
|
||||
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
|
||||
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
|
||||
#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
|
||||
|
||||
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
|
||||
|
||||
struct rdma_srq_id {
|
||||
__le16 srq_idx;
|
||||
__le16 opaque_fid;
|
||||
};
|
||||
|
||||
struct rdma_srq_producers {
|
||||
__le32 sge_prod;
|
||||
__le32 wqe_prod;
|
||||
};
|
||||
|
||||
#endif /* __RDMA_COMMON__ */
|
43
linux-6.8.1/include/linux/qed/roce_common.h
Normal file
43
linux-6.8.1/include/linux/qed/roce_common.h
Normal file
|
@ -0,0 +1,43 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __ROCE_COMMON__
|
||||
#define __ROCE_COMMON__
|
||||
|
||||
/************************/
|
||||
/* ROCE FW CONSTANTS */
|
||||
/************************/
|
||||
|
||||
#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
|
||||
#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
|
||||
|
||||
#define ROCE_MAX_QPS (32 * 1024)
|
||||
#define ROCE_DCQCN_NP_MAX_QPS (64)
|
||||
#define ROCE_DCQCN_RP_MAX_QPS (64)
|
||||
#define ROCE_LKEY_MW_DIF_EN_BIT (28)
|
||||
|
||||
/* Affiliated asynchronous events / errors enumeration */
|
||||
enum roce_async_events_type {
|
||||
ROCE_ASYNC_EVENT_NONE = 0,
|
||||
ROCE_ASYNC_EVENT_COMM_EST = 1,
|
||||
ROCE_ASYNC_EVENT_SQ_DRAINED,
|
||||
ROCE_ASYNC_EVENT_SRQ_LIMIT,
|
||||
ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
|
||||
ROCE_ASYNC_EVENT_CQ_ERR,
|
||||
ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
|
||||
ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
|
||||
ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
|
||||
ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
|
||||
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
|
||||
ROCE_ASYNC_EVENT_SRQ_EMPTY,
|
||||
ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
|
||||
ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR,
|
||||
ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR,
|
||||
ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR,
|
||||
MAX_ROCE_ASYNC_EVENTS_TYPE
|
||||
};
|
||||
|
||||
#endif /* __ROCE_COMMON__ */
|
157
linux-6.8.1/include/linux/qed/storage_common.h
Normal file
157
linux-6.8.1/include/linux/qed/storage_common.h
Normal file
|
@ -0,0 +1,157 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __STORAGE_COMMON__
|
||||
#define __STORAGE_COMMON__
|
||||
|
||||
/*********************/
|
||||
/* SCSI CONSTANTS */
|
||||
/*********************/
|
||||
|
||||
#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2)
|
||||
#define BDQ_NUM_RESOURCES (4)
|
||||
|
||||
#define BDQ_ID_RQ (0)
|
||||
#define BDQ_ID_IMM_DATA (1)
|
||||
#define BDQ_ID_TQ (2)
|
||||
#define BDQ_NUM_IDS (3)
|
||||
|
||||
#define SCSI_NUM_SGES_SLOW_SGL_THR 8
|
||||
|
||||
#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
|
||||
|
||||
/* SCSI op codes */
|
||||
#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89)
|
||||
#define SCSI_OPCODE_READ_10 (0x28)
|
||||
#define SCSI_OPCODE_WRITE_6 (0x0A)
|
||||
#define SCSI_OPCODE_WRITE_10 (0x2A)
|
||||
#define SCSI_OPCODE_WRITE_12 (0xAA)
|
||||
#define SCSI_OPCODE_WRITE_16 (0x8A)
|
||||
#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E)
|
||||
#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE)
|
||||
#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E)
|
||||
|
||||
/* iSCSI Drv opaque */
|
||||
struct iscsi_drv_opaque {
|
||||
__le16 reserved_zero[3];
|
||||
__le16 opaque;
|
||||
};
|
||||
|
||||
/* Scsi 2B/8B opaque union */
|
||||
union scsi_opaque {
|
||||
struct regpair fcoe_opaque;
|
||||
struct iscsi_drv_opaque iscsi_opaque;
|
||||
};
|
||||
|
||||
/* SCSI buffer descriptor */
|
||||
struct scsi_bd {
|
||||
struct regpair address;
|
||||
union scsi_opaque opaque;
|
||||
};
|
||||
|
||||
/* Scsi Drv BDQ struct */
|
||||
struct scsi_bdq_ram_drv_data {
|
||||
__le16 external_producer;
|
||||
__le16 reserved0[3];
|
||||
};
|
||||
|
||||
/* SCSI SGE entry */
|
||||
struct scsi_sge {
|
||||
struct regpair sge_addr;
|
||||
__le32 sge_len;
|
||||
__le32 reserved;
|
||||
};
|
||||
|
||||
/* Cached SGEs section */
|
||||
struct scsi_cached_sges {
|
||||
struct scsi_sge sge[4];
|
||||
};
|
||||
|
||||
/* Scsi Drv CMDQ struct */
|
||||
struct scsi_drv_cmdq {
|
||||
__le16 cmdq_cons;
|
||||
__le16 reserved0;
|
||||
__le32 reserved1;
|
||||
};
|
||||
|
||||
/* Common SCSI init params passed by driver to FW in function init ramrod */
|
||||
struct scsi_init_func_params {
|
||||
__le16 num_tasks;
|
||||
u8 log_page_size;
|
||||
u8 log_page_size_conn;
|
||||
u8 debug_mode;
|
||||
u8 reserved2[11];
|
||||
};
|
||||
|
||||
/* SCSI RQ/CQ/CMDQ firmware function init parameters */
|
||||
struct scsi_init_func_queues {
|
||||
struct regpair glbl_q_params_addr;
|
||||
__le16 rq_buffer_size;
|
||||
__le16 cq_num_entries;
|
||||
__le16 cmdq_num_entries;
|
||||
u8 bdq_resource_id;
|
||||
u8 q_validity;
|
||||
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
|
||||
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
|
||||
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
|
||||
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5
|
||||
__le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
|
||||
u8 num_queues;
|
||||
u8 queue_relative_offset;
|
||||
u8 cq_sb_pi;
|
||||
u8 cmdq_sb_pi;
|
||||
u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
|
||||
u8 reserved1;
|
||||
struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
|
||||
__le16 bdq_xoff_threshold[BDQ_NUM_IDS];
|
||||
__le16 cmdq_xoff_threshold;
|
||||
__le16 bdq_xon_threshold[BDQ_NUM_IDS];
|
||||
__le16 cmdq_xon_threshold;
|
||||
};
|
||||
|
||||
/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
|
||||
struct scsi_ram_per_bdq_resource_drv_data {
|
||||
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
|
||||
};
|
||||
|
||||
/* SCSI SGL types */
|
||||
enum scsi_sgl_mode {
|
||||
SCSI_TX_SLOW_SGL,
|
||||
SCSI_FAST_SGL,
|
||||
MAX_SCSI_SGL_MODE
|
||||
};
|
||||
|
||||
/* SCSI SGL parameters */
|
||||
struct scsi_sgl_params {
|
||||
struct regpair sgl_addr;
|
||||
__le32 sgl_total_length;
|
||||
__le32 sge_offset;
|
||||
__le16 sgl_num_sges;
|
||||
u8 sgl_index;
|
||||
u8 reserved;
|
||||
};
|
||||
|
||||
/* SCSI terminate connection params */
|
||||
struct scsi_terminate_extra_params {
|
||||
__le16 unsolicited_cq_count;
|
||||
__le16 cmdq_count;
|
||||
u8 reserved[4];
|
||||
};
|
||||
|
||||
/* SCSI Task Queue Element */
|
||||
struct scsi_tqe {
|
||||
__le16 itid;
|
||||
};
|
||||
|
||||
#endif /* __STORAGE_COMMON__ */
|
255
linux-6.8.1/include/linux/qed/tcp_common.h
Normal file
255
linux-6.8.1/include/linux/qed/tcp_common.h
Normal file
|
@ -0,0 +1,255 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
|
||||
/* QLogic qed NIC Driver
|
||||
* Copyright (c) 2015-2017 QLogic Corporation
|
||||
* Copyright (c) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __TCP_COMMON__
|
||||
#define __TCP_COMMON__
|
||||
|
||||
/********************/
|
||||
/* TCP FW CONSTANTS */
|
||||
/********************/
|
||||
|
||||
#define TCP_INVALID_TIMEOUT_VAL -1
|
||||
|
||||
/* OOO opaque data received from LL2 */
|
||||
struct ooo_opaque {
|
||||
__le32 cid;
|
||||
u8 drop_isle;
|
||||
u8 drop_size;
|
||||
u8 ooo_opcode;
|
||||
u8 ooo_isle;
|
||||
};
|
||||
|
||||
/* tcp connect mode enum */
|
||||
enum tcp_connect_mode {
|
||||
TCP_CONNECT_ACTIVE,
|
||||
TCP_CONNECT_PASSIVE,
|
||||
MAX_TCP_CONNECT_MODE
|
||||
};
|
||||
|
||||
/* tcp function init parameters */
|
||||
struct tcp_init_params {
|
||||
__le32 two_msl_timer;
|
||||
__le16 tx_sws_timer;
|
||||
u8 max_fin_rt;
|
||||
u8 reserved[9];
|
||||
};
|
||||
|
||||
/* tcp IPv4/IPv6 enum */
|
||||
enum tcp_ip_version {
|
||||
TCP_IPV4,
|
||||
TCP_IPV6,
|
||||
MAX_TCP_IP_VERSION
|
||||
};
|
||||
|
||||
/* tcp offload parameters */
|
||||
struct tcp_offload_params {
|
||||
__le16 local_mac_addr_lo;
|
||||
__le16 local_mac_addr_mid;
|
||||
__le16 local_mac_addr_hi;
|
||||
__le16 remote_mac_addr_lo;
|
||||
__le16 remote_mac_addr_mid;
|
||||
__le16 remote_mac_addr_hi;
|
||||
__le16 vlan_id;
|
||||
__le16 flags;
|
||||
#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
|
||||
#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
|
||||
#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4
|
||||
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5
|
||||
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9
|
||||
u8 ip_version;
|
||||
u8 reserved0[3];
|
||||
__le32 remote_ip[4];
|
||||
__le32 local_ip[4];
|
||||
__le32 flow_label;
|
||||
u8 ttl;
|
||||
u8 tos_or_tc;
|
||||
__le16 remote_port;
|
||||
__le16 local_port;
|
||||
__le16 mss;
|
||||
u8 rcv_wnd_scale;
|
||||
u8 connect_mode;
|
||||
__le16 srtt;
|
||||
__le32 ss_thresh;
|
||||
__le32 rcv_wnd;
|
||||
__le32 cwnd;
|
||||
u8 ka_max_probe_cnt;
|
||||
u8 dup_ack_theshold;
|
||||
__le16 reserved1;
|
||||
__le32 ka_timeout;
|
||||
__le32 ka_interval;
|
||||
__le32 max_rt_time;
|
||||
__le32 initial_rcv_wnd;
|
||||
__le32 rcv_next;
|
||||
__le32 snd_una;
|
||||
__le32 snd_next;
|
||||
__le32 snd_max;
|
||||
__le32 snd_wnd;
|
||||
__le32 snd_wl1;
|
||||
__le32 ts_recent;
|
||||
__le32 ts_recent_age;
|
||||
__le32 total_rt;
|
||||
__le32 ka_timeout_delta;
|
||||
__le32 rt_timeout_delta;
|
||||
u8 dup_ack_cnt;
|
||||
u8 snd_wnd_probe_cnt;
|
||||
u8 ka_probe_cnt;
|
||||
u8 rt_cnt;
|
||||
__le16 rtt_var;
|
||||
__le16 fw_internal;
|
||||
u8 snd_wnd_scale;
|
||||
u8 ack_frequency;
|
||||
__le16 da_timeout_value;
|
||||
__le32 reserved3;
|
||||
};
|
||||
|
||||
/* tcp offload parameters */
|
||||
struct tcp_offload_params_opt2 {
|
||||
__le16 local_mac_addr_lo;
|
||||
__le16 local_mac_addr_mid;
|
||||
__le16 local_mac_addr_hi;
|
||||
__le16 remote_mac_addr_lo;
|
||||
__le16 remote_mac_addr_mid;
|
||||
__le16 remote_mac_addr_hi;
|
||||
__le16 vlan_id;
|
||||
__le16 flags;
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4
|
||||
u8 ip_version;
|
||||
u8 reserved1[3];
|
||||
__le32 remote_ip[4];
|
||||
__le32 local_ip[4];
|
||||
__le32 flow_label;
|
||||
u8 ttl;
|
||||
u8 tos_or_tc;
|
||||
__le16 remote_port;
|
||||
__le16 local_port;
|
||||
__le16 mss;
|
||||
u8 rcv_wnd_scale;
|
||||
u8 connect_mode;
|
||||
__le16 syn_ip_payload_length;
|
||||
__le32 syn_phy_addr_lo;
|
||||
__le32 syn_phy_addr_hi;
|
||||
__le32 cwnd;
|
||||
u8 ka_max_probe_cnt;
|
||||
u8 reserved2[3];
|
||||
__le32 ka_timeout;
|
||||
__le32 ka_interval;
|
||||
__le32 max_rt_time;
|
||||
__le32 reserved3[16];
|
||||
};
|
||||
|
||||
/* tcp IPv4/IPv6 enum */
|
||||
enum tcp_seg_placement_event {
|
||||
TCP_EVENT_ADD_PEN,
|
||||
TCP_EVENT_ADD_NEW_ISLE,
|
||||
TCP_EVENT_ADD_ISLE_RIGHT,
|
||||
TCP_EVENT_ADD_ISLE_LEFT,
|
||||
TCP_EVENT_JOIN,
|
||||
TCP_EVENT_DELETE_ISLES,
|
||||
TCP_EVENT_NOP,
|
||||
MAX_TCP_SEG_PLACEMENT_EVENT
|
||||
};
|
||||
|
||||
/* tcp init parameters */
|
||||
struct tcp_update_params {
|
||||
__le16 flags;
|
||||
#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
|
||||
#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
|
||||
#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
|
||||
#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
|
||||
#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
|
||||
#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
|
||||
#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
|
||||
#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
|
||||
#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
|
||||
#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
|
||||
#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
|
||||
#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
|
||||
#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
|
||||
#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
|
||||
#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
|
||||
#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
|
||||
#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
|
||||
__le16 remote_mac_addr_lo;
|
||||
__le16 remote_mac_addr_mid;
|
||||
__le16 remote_mac_addr_hi;
|
||||
__le16 mss;
|
||||
u8 ttl;
|
||||
u8 tos_or_tc;
|
||||
__le32 ka_timeout;
|
||||
__le32 ka_interval;
|
||||
__le32 max_rt_time;
|
||||
__le32 flow_label;
|
||||
__le32 initial_rcv_wnd;
|
||||
u8 ka_max_probe_cnt;
|
||||
u8 reserved1[7];
|
||||
};
|
||||
|
||||
/* toe upload parameters */
|
||||
struct tcp_upload_params {
|
||||
__le32 rcv_next;
|
||||
__le32 snd_una;
|
||||
__le32 snd_next;
|
||||
__le32 snd_max;
|
||||
__le32 snd_wnd;
|
||||
__le32 rcv_wnd;
|
||||
__le32 snd_wl1;
|
||||
__le32 cwnd;
|
||||
__le32 ss_thresh;
|
||||
__le16 srtt;
|
||||
__le16 rtt_var;
|
||||
__le32 ts_time;
|
||||
__le32 ts_recent;
|
||||
__le32 ts_recent_age;
|
||||
__le32 total_rt;
|
||||
__le32 ka_timeout_delta;
|
||||
__le32 rt_timeout_delta;
|
||||
u8 dup_ack_cnt;
|
||||
u8 snd_wnd_probe_cnt;
|
||||
u8 ka_probe_cnt;
|
||||
u8 rt_cnt;
|
||||
__le32 reserved;
|
||||
};
|
||||
|
||||
#endif /* __TCP_COMMON__ */
|
Loading…
Add table
Add a link
Reference in a new issue