next linux/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h:5893 │ linux/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h:5561
│
│ __le16 error_code;
__le16 req_type; │ __le16 req_type;
__le16 cmpl_ring; │
__le16 seq_id; │ __le16 seq_id;
__le16 target_id; │ __le16 resp_len;
__le64 resp_addr; │
__le32 flags; │
__le32 enables; │
#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL │
#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL │
#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL │
#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL │
#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL │
#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL │
#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL │
#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL │
__le16 port_id; │
u8 queue_id0; │ u8 queue_id0;
u8 unused_0; │ u8 unused_0;
│ __le16 unused_1;
__le32 queue_id0_min_bw; │ __le32 queue_id0_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE
__le32 queue_id0_max_bw; │ __le32 queue_id0_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE
u8 queue_id0_tsa_assign; │ u8 queue_id0_tsa_assign;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id0_pri_lvl; │ u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight; │ u8 queue_id0_bw_weight;
u8 queue_id1; │ u8 queue_id1;
__le32 queue_id1_min_bw; │ __le32 queue_id1_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE
__le32 queue_id1_max_bw; │ __le32 queue_id1_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE
u8 queue_id1_tsa_assign; │ u8 queue_id1_tsa_assign;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id1_pri_lvl; │ u8 queue_id1_pri_lvl;
u8 queue_id1_bw_weight; │ u8 queue_id1_bw_weight;
u8 queue_id2; │ u8 queue_id2;
__le32 queue_id2_min_bw; │ __le32 queue_id2_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE
__le32 queue_id2_max_bw; │ __le32 queue_id2_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE
u8 queue_id2_tsa_assign; │ u8 queue_id2_tsa_assign;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id2_pri_lvl; │ u8 queue_id2_pri_lvl;
u8 queue_id2_bw_weight; │ u8 queue_id2_bw_weight;
u8 queue_id3; │ u8 queue_id3;
__le32 queue_id3_min_bw; │ __le32 queue_id3_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE
__le32 queue_id3_max_bw; │ __le32 queue_id3_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE
u8 queue_id3_tsa_assign; │ u8 queue_id3_tsa_assign;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id3_pri_lvl; │ u8 queue_id3_pri_lvl;
u8 queue_id3_bw_weight; │ u8 queue_id3_bw_weight;
u8 queue_id4; │ u8 queue_id4;
__le32 queue_id4_min_bw; │ __le32 queue_id4_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE
__le32 queue_id4_max_bw; │ __le32 queue_id4_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE
u8 queue_id4_tsa_assign; │ u8 queue_id4_tsa_assign;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id4_pri_lvl; │ u8 queue_id4_pri_lvl;
u8 queue_id4_bw_weight; │ u8 queue_id4_bw_weight;
u8 queue_id5; │ u8 queue_id5;
__le32 queue_id5_min_bw; │ __le32 queue_id5_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE
__le32 queue_id5_max_bw; │ __le32 queue_id5_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE
u8 queue_id5_tsa_assign; │ u8 queue_id5_tsa_assign;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id5_pri_lvl; │ u8 queue_id5_pri_lvl;
u8 queue_id5_bw_weight; │ u8 queue_id5_bw_weight;
u8 queue_id6; │ u8 queue_id6;
__le32 queue_id6_min_bw; │ __le32 queue_id6_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE
__le32 queue_id6_max_bw; │ __le32 queue_id6_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE
u8 queue_id6_tsa_assign; │ u8 queue_id6_tsa_assign;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id6_pri_lvl; │ u8 queue_id6_pri_lvl;
u8 queue_id6_bw_weight; │ u8 queue_id6_bw_weight;
u8 queue_id7; │ u8 queue_id7;
__le32 queue_id7_min_bw; │ __le32 queue_id7_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE
__le32 queue_id7_max_bw; │ __le32 queue_id7_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xffffff │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xffff
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x100000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x1000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe00000 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe000
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_C │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE
u8 queue_id7_tsa_assign; │ u8 queue_id7_tsa_assign;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL │ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id7_pri_lvl; │ u8 queue_id7_pri_lvl;
u8 queue_id7_bw_weight; │ u8 queue_id7_bw_weight;
u8 unused_1[5]; │ u8 unused_2[4];
│ u8 valid;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:4655 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:3267
│
u8 reserved0; │ u8 reserved0;
u8 state; │ u8 state;
u8 flags0; │ u8 flags0;
#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 │ #define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1; │ u8 flags1;
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 │ #define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 │ #define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 │ #define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2; │ u8 flags2;
#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3; │ u8 flags3;
#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4; │ u8 flags4;
#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5; │ u8 flags5;
#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6; │ u8 flags6;
#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7; │ u8 flags7;
#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 │ #define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8; │ u8 flags8;
#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 │ #define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 │ #define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 │ #define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 │ #define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9; │ u8 flags9;
#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 │ #define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 │ #define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 │ #define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 │ #define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10; │ u8 flags10;
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 │ #define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 │ #define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11; │ u8 flags11;
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 │ #define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 │ #define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 │ #define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 │ #define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12; │ u8 flags12;
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 │ #define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 │ #define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13; │ u8 flags13;
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 │ #define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 │ #define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14; │ u8 flags14;
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 │ #define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 │ #define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 │ #define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 │ #define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 │ #define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 │ #define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 │ #define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 │ #define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 │ #define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id; │ u8 edpm_event_id;
__le16 physical_q0; │ __le16 physical_q0;
__le16 e5_reserved1; │ __le16 e5_reserved1;
__le16 edpm_num_bds; │ __le16 edpm_num_bds;
__le16 tx_bd_cons; │ __le16 tx_bd_cons;
__le16 tx_bd_prod; │ __le16 tx_bd_prod;
__le16 updated_qm_pq_id; │ __le16 updated_qm_pq_id;
__le16 conn_dpi; │ __le16 conn_dpi;
│ u8 byte3;
│ u8 byte4;
│ u8 byte5;
│ u8 byte6;
│ __le32 reg0;
│ __le32 reg1;
│ __le32 reg2;
│ __le32 reg3;
│ __le32 reg4;
│ __le32 reg5;
│ __le32 reg6;
│ __le16 word7;
│ __le16 word8;
│ __le16 word9;
│ __le16 word10;
│ __le32 reg7;
│ __le32 reg8;
│ __le32 reg9;
│ u8 byte7;
│ u8 byte8;
│ u8 byte9;
│ u8 byte10;
│ u8 byte11;
│ u8 byte12;
│ u8 byte13;
│ u8 byte14;
│ u8 byte15;
│ u8 e5_reserved;
│ __le16 word11;
│ __le32 reg10;
│ __le32 reg11;
│ __le32 reg12;
│ __le32 reg13;
│ __le32 reg14;
│ __le32 reg15;
│ __le32 reg16;
│ __le32 reg17;
│ __le32 reg18;
│ __le32 reg19;
│ __le16 word12;
│ __le16 word13;
│ __le16 word14;
│ __le16 word15;
} │
next prev up linux/drivers/net/ethernet/broadcom/cnic_defs.h:2141 │ linux/drivers/net/ethernet/broadcom/cnic_defs.h:1904
│
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 agg_val1; │ u16 agg_val1;
u8 agg_vars1; │ u8 agg_vars1;
#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) │ #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 │ #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) │ #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 │ #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) │ #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 │ #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) │ #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 │ #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
u8 state; │ u8 state;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 state; │ u8 state;
u8 agg_vars1; │ u8 agg_vars1;
#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) │ #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 │ #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 │ #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) │ #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 │ #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) │ #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 │ #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) │ #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 │ #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
u16 agg_val1; │ u16 agg_val1;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 cdu_reserved; │ u8 cdu_reserved;
u8 __agg_vars4; │ u8 __agg_vars4;
u8 agg_vars3; │ u8 agg_vars3;
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) │ #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 │ #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
u8 agg_vars2; │ u8 agg_vars2;
#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 agg_vars2; │ u8 agg_vars2;
#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 │ #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
u8 agg_vars3; │ u8 agg_vars3;
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) │ #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 │ #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
u8 __agg_vars4; │ u8 __agg_vars4;
u8 cdu_reserved; │ u8 cdu_reserved;
#endif │ #endif
u32 more_to_send; │ u32 more_to_send;
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 agg_vars5; │ u16 agg_vars5;
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
u16 agg_val4_th; │ u16 sq_cons;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 agg_val4_th; │ u16 sq_cons;
u16 agg_vars5; │ u16 agg_vars5;
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 │ #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
#endif │ #endif
struct xstorm_tcp_tcp_ag_context_section tcp; │ struct xstorm_tcp_tcp_ag_context_section tcp;
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 agg_vars7; │ u16 agg_vars7;
#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) │ #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 │ #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) │ #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 │ #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) │ #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 │ #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) │ #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 │ #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) │ #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 │ #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
u8 agg_val3_th; │ u8 agg_val3_th;
u8 agg_vars6; │ u8 agg_vars6;
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 agg_vars6; │ u8 agg_vars6;
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
u8 agg_val3_th; │ u8 agg_val3_th;
u16 agg_vars7; │ u16 agg_vars7;
#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) │ #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 │ #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) │ #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 │ #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 │ #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) │ #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 │ #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) │ #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 │ #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 │ #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) │ #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 │ #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 __agg_val11_th; │ u16 __agg_val11_th;
u16 __gen_data; │ u16 __gen_data;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 __gen_data; │ u16 __gen_data;
u16 __agg_val11_th; │ u16 __agg_val11_th;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 __reserved1; │ u8 __reserved1;
u8 __agg_val6_th; │ u8 __agg_val6_th;
u16 __agg_val9; │ u16 __agg_val9;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 __agg_val9; │ u16 __agg_val9;
u8 __agg_val6_th; │ u8 __agg_val6_th;
u8 __reserved1; │ u8 __reserved1;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 agg_val2_th; │ u16 hq_prod;
u16 agg_val2; │ u16 hq_cons;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 agg_val2; │ u16 hq_cons;
u16 agg_val2_th; │ u16 hq_prod;
#endif │ #endif
u32 agg_vars8; │ u32 agg_vars8;
#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) │ #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0 │ #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0
#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24) │ #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24 │ #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 agg_misc0; │ u16 r2tq_prod;
u16 agg_val4; │ u16 sq_prod;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 agg_val4; │ u16 sq_prod;
u16 agg_misc0; │ u16 r2tq_prod;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 agg_val3; │ u8 agg_val3;
u8 agg_val6; │ u8 agg_val6;
u8 agg_val5_th; │ u8 agg_val5_th;
u8 agg_val5; │ u8 agg_val5;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 agg_val5; │ u8 agg_val5;
u8 agg_val5_th; │ u8 agg_val5_th;
u8 agg_val6; │ u8 agg_val6;
u8 agg_val3; │ u8 agg_val3;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 __agg_misc1; │ u16 __agg_misc1;
u16 agg_limit1; │ u16 agg_limit1;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 agg_limit1; │ u16 agg_limit1;
u16 __agg_misc1; │ u16 __agg_misc1;
#endif │ #endif
u32 completion_seq; │ u32 hq_cons_tcp_seq;
u32 agg_misc4; │ u32 exp_stat_sn;
u32 rst_seq_num; │ u32 rst_seq_num;
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/nfdk/dp.c:980 │ linux/drivers/net/ethernet/netronome/nfp/nfd3/dp.c:844
│
struct nfp_net_r_vector *r_vec = rx_ring->r_vec; │ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp; │ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
struct nfp_net_tx_ring *tx_ring; │ struct nfp_net_tx_ring *tx_ring;
struct bpf_prog *xdp_prog; │ struct bpf_prog *xdp_prog;
bool xdp_tx_cmpl = false; │ bool xdp_tx_cmpl = false;
unsigned int true_bufsz; │ unsigned int true_bufsz;
struct sk_buff *skb; │ struct sk_buff *skb;
int pkts_polled = 0; │ int pkts_polled = 0;
struct xdp_buff xdp; │ struct xdp_buff xdp;
int idx; │ int idx;
│
xdp_prog = READ_ONCE(dp->xdp_prog); │ xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; │ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM, │ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
&rx_ring->xdp_rxq); │ &rx_ring->xdp_rxq);
tx_ring = r_vec->xdp_ring; │ tx_ring = r_vec->xdp_ring;
│
while (pkts_polled < budget) { │ while (pkts_polled < budget) {
unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off; │ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
struct nfp_net_rx_buf *rxbuf; │ struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd; │ struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta; │ struct nfp_meta_parsed meta;
bool redir_egress = false; │ bool redir_egress = false;
struct net_device *netdev; │ struct net_device *netdev;
dma_addr_t new_dma_addr; │ dma_addr_t new_dma_addr;
u32 meta_len_xdp = 0; │ u32 meta_len_xdp = 0;
void *new_frag; │ void *new_frag;
│
idx = D_IDX(rx_ring, rx_ring->rd_p); │ idx = D_IDX(rx_ring, rx_ring->rd_p);
│
rxd = &rx_ring->rxds[idx]; │ rxd = &rx_ring->rxds[idx];
if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) │ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
break; │ break;
│
/* Memory barrier to ensure that we won't do other reads │ /* Memory barrier to ensure that we won't do other reads
* before the DD bit. │ * before the DD bit.
*/ │ */
dma_rmb(); │ dma_rmb();
│
memset(&meta, 0, sizeof(meta)); │ memset(&meta, 0, sizeof(meta));
│
rx_ring->rd_p++; │ rx_ring->rd_p++;
pkts_polled++; │ pkts_polled++;
│
rxbuf = &rx_ring->rxbufs[idx]; │ rxbuf = &rx_ring->rxbufs[idx];
/* < meta_len > │ /* < meta_len >
* <-- [rx_offset] --> │ * <-- [rx_offset] -->
* --------------------------------------------------------- │ * ---------------------------------------------------------
* | [XX] | metadata | packet | XXXX | │ * | [XX] | metadata | packet | XXXX |
* --------------------------------------------------------- │ * ---------------------------------------------------------
* <---------------- data_len ---------------> │ * <---------------- data_len --------------->
* │ *
* The rx_offset is fixed for all packets, the meta_len can vary │ * The rx_offset is fixed for all packets, the meta_len can vary
* on a packet by packet basis. If rx_offset is set to zero │ * on a packet by packet basis. If rx_offset is set to zero
* (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the │ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
* buffer and is immediately followed by the packet (no [XX]). │ * buffer and is immediately followed by the packet (no [XX]).
*/ │ */
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; │ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
data_len = le16_to_cpu(rxd->rxd.data_len); │ data_len = le16_to_cpu(rxd->rxd.data_len);
pkt_len = data_len - meta_len; │ pkt_len = data_len - meta_len;
│
pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; │ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) │ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
pkt_off += meta_len; │ pkt_off += meta_len;
else │ else
pkt_off += dp->rx_offset; │ pkt_off += dp->rx_offset;
meta_off = pkt_off - meta_len; │ meta_off = pkt_off - meta_len;
│
/* Stats update */ │ /* Stats update */
u64_stats_update_begin(&r_vec->rx_sync); │ u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++; │ r_vec->rx_pkts++;
r_vec->rx_bytes += pkt_len; │ r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync); │ u64_stats_update_end(&r_vec->rx_sync);
│
if (unlikely(meta_len > NFP_NET_MAX_PREPEND || │ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
(dp->rx_offset && meta_len > dp->rx_offset))) { │ (dp->rx_offset && meta_len > dp->rx_offset))) {
nn_dp_warn(dp, "oversized RX packet metadata %u\n", │ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
meta_len); │ meta_len);
nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); │ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
continue; │ continue;
} │ }
│
nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, │ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
data_len); │ data_len);
│
if (meta_len) { │ if (!dp->chained_metadata_format) {
if (unlikely(nfp_nfdk_parse_meta(dp->netdev, &meta, │ nfp_nfd3_set_hash_desc(dp->netdev, &meta,
│ rxbuf->frag + meta_off, rxd);
│ } else if (meta_len) {
│ if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
rxbuf->frag + meta_off, │ rxbuf->frag + meta_off,
rxbuf->frag + pkt_off, │ rxbuf->frag + pkt_off,
pkt_len, meta_len))) { │ pkt_len, meta_len))) {
nn_dp_warn(dp, "invalid RX packet metadata\n"); │ nn_dp_warn(dp, "invalid RX packet metadata\n");
nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, │ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL); │ NULL);
continue; │ continue;
} │ }
} │ }
│
if (xdp_prog && !meta.portid) { │ if (xdp_prog && !meta.portid) {
void *orig_data = rxbuf->frag + pkt_off; │ void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off; │ unsigned int dma_off;
int act; │ int act;
│
xdp_prepare_buff(&xdp, │ xdp_prepare_buff(&xdp,
rxbuf->frag + NFP_NET_RX_BUF_HEADROOM, │ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
pkt_off - NFP_NET_RX_BUF_HEADROOM, │ pkt_off - NFP_NET_RX_BUF_HEADROOM,
pkt_len, true); │ pkt_len, true);
│
act = bpf_prog_run_xdp(xdp_prog, &xdp); │ act = bpf_prog_run_xdp(xdp_prog, &xdp);
│
pkt_len = xdp.data_end - xdp.data; │ pkt_len = xdp.data_end - xdp.data;
pkt_off += xdp.data - orig_data; │ pkt_off += xdp.data - orig_data;
│
switch (act) { │ switch (act) {
case XDP_PASS: │ case XDP_PASS:
meta_len_xdp = xdp.data - xdp.data_meta; │ meta_len_xdp = xdp.data - xdp.data_meta;
break; │ break;
case XDP_TX: │ case XDP_TX:
dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM; │ dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
if (unlikely(!nfp_nfdk_tx_xdp_buf(dp, rx_ring, │ if (unlikely(!nfp_nfd3_tx_xdp_buf(dp, rx_ring,
tx_ring, │ tx_ring,
rxbuf, │ rxbuf,
dma_off, │ dma_off,
pkt_len, │ pkt_len,
&xdp_tx_cmpl))) │ &xdp_tx_cmpl)))
trace_xdp_exception(dp->netdev, │ trace_xdp_exception(dp->netdev,
xdp_prog, act); │ xdp_prog, act);
continue; │ continue;
default: │ default:
bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act); │ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
fallthrough; │ fallthrough;
case XDP_ABORTED: │ case XDP_ABORTED:
trace_xdp_exception(dp->netdev, xdp_prog, act); │ trace_xdp_exception(dp->netdev, xdp_prog, act);
fallthrough; │ fallthrough;
case XDP_DROP: │ case XDP_DROP:
nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, │ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
rxbuf->dma_addr); │ rxbuf->dma_addr);
continue; │ continue;
} │ }
} │ }
│
if (likely(!meta.portid)) { │ if (likely(!meta.portid)) {
netdev = dp->netdev; │ netdev = dp->netdev;
} else if (meta.portid == NFP_META_PORT_ID_CTRL) { │ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
struct nfp_net *nn = netdev_priv(dp->netdev); │ struct nfp_net *nn = netdev_priv(dp->netdev);
│
nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off, │ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
pkt_len); │ pkt_len);
nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, │ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
rxbuf->dma_addr); │ rxbuf->dma_addr);
continue; │ continue;
} else { │ } else {
struct nfp_net *nn; │ struct nfp_net *nn;
│
nn = netdev_priv(dp->netdev); │ nn = netdev_priv(dp->netdev);
netdev = nfp_app_dev_get(nn->app, meta.portid, │ netdev = nfp_app_dev_get(nn->app, meta.portid,
&redir_egress); │ &redir_egress);
if (unlikely(!netdev)) { │ if (unlikely(!netdev)) {
nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, │ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL); │ NULL);
continue; │ continue;
} │ }
│
if (nfp_netdev_is_nfp_repr(netdev)) │ if (nfp_netdev_is_nfp_repr(netdev))
nfp_repr_inc_rx_stats(netdev, pkt_len); │ nfp_repr_inc_rx_stats(netdev, pkt_len);
} │ }
│
skb = build_skb(rxbuf->frag, true_bufsz); │ skb = build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) { │ if (unlikely(!skb)) {
nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); │ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
continue; │ continue;
} │ }
new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr); │ new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
if (unlikely(!new_frag)) { │ if (unlikely(!new_frag)) {
nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); │ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
continue; │ continue;
} │ }
│
nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); │ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
│
nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); │ nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
│
skb_reserve(skb, pkt_off); │ skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len); │ skb_put(skb, pkt_len);
│
skb->mark = meta.mark; │ skb->mark = meta.mark;
skb_set_hash(skb, meta.hash, meta.hash_type); │ skb_set_hash(skb, meta.hash, meta.hash_type);
│
skb_record_rx_queue(skb, rx_ring->idx); │ skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, netdev); │ skb->protocol = eth_type_trans(skb, netdev);
│
nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb); │ nfp_nfd3_rx_csum(dp, r_vec, rxd, &meta, skb);
│
│ #ifdef CONFIG_TLS_DEVICE
│ if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
│ skb->decrypted = true;
│ u64_stats_update_begin(&r_vec->rx_sync);
│ r_vec->hw_tls_rx++;
│ u64_stats_update_end(&r_vec->rx_sync);
│ }
│ #endif
│
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) │ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), │ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxd->rxd.vlan)); │ le16_to_cpu(rxd->rxd.vlan));
if (meta_len_xdp) │ if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp); │ skb_metadata_set(skb, meta_len_xdp);
│
if (likely(!redir_egress)) { │ if (likely(!redir_egress)) {
napi_gro_receive(&rx_ring->r_vec->napi, skb); │ napi_gro_receive(&rx_ring->r_vec->napi, skb);
} else { │ } else {
skb->dev = netdev; │ skb->dev = netdev;
skb_reset_network_header(skb); │ skb_reset_network_header(skb);
__skb_push(skb, ETH_HLEN); │ __skb_push(skb, ETH_HLEN);
dev_queue_xmit(skb); │ dev_queue_xmit(skb);
} │ }
} │ }
│
if (xdp_prog) { │ if (xdp_prog) {
if (tx_ring->wr_ptr_add) │ if (tx_ring->wr_ptr_add)
nfp_net_tx_xmit_more_flush(tx_ring); │ nfp_net_tx_xmit_more_flush(tx_ring);
else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) && │ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
!xdp_tx_cmpl) │ !xdp_tx_cmpl)
if (!nfp_nfdk_xdp_complete(tx_ring)) │ if (!nfp_nfd3_xdp_complete(tx_ring))
pkts_polled = budget; │ pkts_polled = budget;
} │ }
│
return pkts_polled; │ return pkts_polled;
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:1413 │ linux/drivers/net/ethernet/cavium/liquidio/lio_main.c:2293
│
│ struct lio *lio;
struct octnet_buf_free_info *finfo; │ struct octnet_buf_free_info *finfo;
union octnic_cmd_setup cmdsetup; │ union octnic_cmd_setup cmdsetup;
struct octnic_data_pkt ndata; │ struct octnic_data_pkt ndata;
struct octeon_instr_irh *irh; │
struct oct_iq_stats *stats; │
struct octeon_device *oct; │ struct octeon_device *oct;
int q_idx = 0, iq_no = 0; │ struct oct_iq_stats *stats;
│ struct octeon_instr_irh *irh;
union tx_info *tx_info; │ union tx_info *tx_info;
int xmit_more = 0; │
struct lio *lio; │
int status = 0; │ int status = 0;
│ int q_idx = 0, iq_no = 0;
│ int j, xmit_more = 0;
u64 dptr = 0; │ u64 dptr = 0;
u32 tag = 0; │ u32 tag = 0;
int j; │
│
lio = GET_LIO(netdev); │ lio = GET_LIO(netdev);
oct = lio->oct_dev; │ oct = lio->oct_dev;
│
q_idx = skb_iq(lio->oct_dev, skb); │ q_idx = skb_iq(oct, skb);
tag = q_idx; │ tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no; │ iq_no = lio->linfo.txpciq[q_idx].s.q_no;
│
stats = &oct->instr_queue[iq_no]->stats; │ stats = &oct->instr_queue[iq_no]->stats;
│
/* Check for all conditions in which the current packet cannot be │ /* Check for all conditions in which the current packet cannot be
* transmitted. │ * transmitted.
*/ │ */
if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || │ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
(!lio->linfo.link.s.link_up) || (skb->len <= 0)) { │ (!lio->linfo.link.s.link_up) ||
netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n │ (skb->len <= 0)) {
│ netif_info(lio, tx_err, lio->netdev,
│ "Transmit failed link_status : %d\n",
lio->linfo.link.s.link_up); │ lio->linfo.link.s.link_up);
goto lio_xmit_failed; │ goto lio_xmit_failed;
} │ }
│
/* Use space in skb->cb to store info used to unmap and │ /* Use space in skb->cb to store info used to unmap and
* free the buffers. │ * free the buffers.
*/ │ */
finfo = (struct octnet_buf_free_info *)skb->cb; │ finfo = (struct octnet_buf_free_info *)skb->cb;
finfo->lio = lio; │ finfo->lio = lio;
finfo->skb = skb; │ finfo->skb = skb;
finfo->sc = NULL; │ finfo->sc = NULL;
│
/* Prepare the attributes for the data to be passed to OSI. */ │ /* Prepare the attributes for the data to be passed to OSI. */
memset(&ndata, 0, sizeof(struct octnic_data_pkt)); │ memset(&ndata, 0, sizeof(struct octnic_data_pkt));
│
ndata.buf = finfo; │ ndata.buf = (void *)finfo;
│
ndata.q_no = iq_no; │ ndata.q_no = iq_no;
│
if (octnet_iq_is_full(oct, ndata.q_no)) { │ if (octnet_iq_is_full(oct, ndata.q_no)) {
/* defer sending if queue is full */ │ /* defer sending if queue is full */
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", │ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
ndata.q_no); │ ndata.q_no);
stats->tx_iq_busy++; │ stats->tx_iq_busy++;
return NETDEV_TX_BUSY; │ return NETDEV_TX_BUSY;
} │ }
│
│ /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
│ * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
│ */
│
ndata.datasize = skb->len; │ ndata.datasize = skb->len;
│
cmdsetup.u64 = 0; │ cmdsetup.u64 = 0;
cmdsetup.s.iq_no = iq_no; │ cmdsetup.s.iq_no = iq_no;
│
if (skb->ip_summed == CHECKSUM_PARTIAL) { │ if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->encapsulation) { │ if (skb->encapsulation) {
cmdsetup.s.tnl_csum = 1; │ cmdsetup.s.tnl_csum = 1;
stats->tx_vxlan++; │ stats->tx_vxlan++;
} else { │ } else {
cmdsetup.s.transport_csum = 1; │ cmdsetup.s.transport_csum = 1;
} │ }
} │ }
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { │ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; │ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
cmdsetup.s.timestamp = 1; │ cmdsetup.s.timestamp = 1;
} │ }
│
if (!skb_shinfo(skb)->nr_frags) { │ if (skb_shinfo(skb)->nr_frags == 0) {
cmdsetup.s.u.datasize = skb->len; │ cmdsetup.s.u.datasize = skb->len;
octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); │ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
│
/* Offload checksum calculation for TCP/UDP packets */ │ /* Offload checksum calculation for TCP/UDP packets */
dptr = dma_map_single(&oct->pci_dev->dev, │ dptr = dma_map_single(&oct->pci_dev->dev,
skb->data, │ skb->data,
skb->len, │ skb->len,
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { │ if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", │ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
__func__); │ __func__);
│ stats->tx_dmamap_fail++;
return NETDEV_TX_BUSY; │ return NETDEV_TX_BUSY;
} │ }
│
ndata.cmd.cmd3.dptr = dptr; │ if (OCTEON_CN23XX_PF(oct))
│ ndata.cmd.cmd3.dptr = dptr;
│ else
│ ndata.cmd.cmd2.dptr = dptr;
finfo->dptr = dptr; │ finfo->dptr = dptr;
ndata.reqtype = REQTYPE_NORESP_NET; │ ndata.reqtype = REQTYPE_NORESP_NET;
│
} else { │ } else {
│ int i, frags;
skb_frag_t *frag; │ skb_frag_t *frag;
struct octnic_gather *g; │ struct octnic_gather *g;
int i, frags; │
│
spin_lock(&lio->glist_lock[q_idx]); │ spin_lock(&lio->glist_lock[q_idx]);
g = (struct octnic_gather *) │ g = (struct octnic_gather *)
lio_list_delete_head(&lio->glist[q_idx]); │ lio_list_delete_head(&lio->glist[q_idx]);
spin_unlock(&lio->glist_lock[q_idx]); │ spin_unlock(&lio->glist_lock[q_idx]);
│
if (!g) { │ if (!g) {
netif_info(lio, tx_err, lio->netdev, │ netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n"); │ "Transmit scatter gather: glist null!\n");
goto lio_xmit_failed; │ goto lio_xmit_failed;
} │ }
│
cmdsetup.s.gather = 1; │ cmdsetup.s.gather = 1;
cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); │ cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); │ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
│
memset(g->sg, 0, g->sg_size); │ memset(g->sg, 0, g->sg_size);
│
g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, │ g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
skb->data, │ skb->data,
(skb->len - skb->data_len), │ (skb->len - skb->data_len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { │ if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", │ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
__func__); │ __func__);
│ stats->tx_dmamap_fail++;
return NETDEV_TX_BUSY; │ return NETDEV_TX_BUSY;
} │ }
add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); │ add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
│
frags = skb_shinfo(skb)->nr_frags; │ frags = skb_shinfo(skb)->nr_frags;
i = 1; │ i = 1;
while (frags--) { │ while (frags--) {
frag = &skb_shinfo(skb)->frags[i - 1]; │ frag = &skb_shinfo(skb)->frags[i - 1];
│
g->sg[(i >> 2)].ptr[(i & 3)] = │ g->sg[(i >> 2)].ptr[(i & 3)] =
skb_frag_dma_map(&oct->pci_dev->dev, │ skb_frag_dma_map(&oct->pci_dev->dev,
frag, 0, skb_frag_size(frag), │ frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
│
if (dma_mapping_error(&oct->pci_dev->dev, │ if (dma_mapping_error(&oct->pci_dev->dev,
g->sg[i >> 2].ptr[i & 3])) { │ g->sg[i >> 2].ptr[i & 3])) {
dma_unmap_single(&oct->pci_dev->dev, │ dma_unmap_single(&oct->pci_dev->dev,
g->sg[0].ptr[0], │ g->sg[0].ptr[0],
skb->len - skb->data_len, │ skb->len - skb->data_len,
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
for (j = 1; j < i; j++) { │ for (j = 1; j < i; j++) {
frag = &skb_shinfo(skb)->frags[j - 1]; │ frag = &skb_shinfo(skb)->frags[j - 1];
dma_unmap_page(&oct->pci_dev->dev, │ dma_unmap_page(&oct->pci_dev->dev,
g->sg[j >> 2].ptr[j & 3], │ g->sg[j >> 2].ptr[j & 3],
skb_frag_size(frag), │ skb_frag_size(frag),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
} │ }
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", │ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
__func__); │ __func__);
return NETDEV_TX_BUSY; │ return NETDEV_TX_BUSY;
} │ }
│
add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), │ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
(i & 3)); │ (i & 3));
i++; │ i++;
} │ }
│
dptr = g->sg_dma_ptr; │ dptr = g->sg_dma_ptr;
│
ndata.cmd.cmd3.dptr = dptr; │ if (OCTEON_CN23XX_PF(oct))
│ ndata.cmd.cmd3.dptr = dptr;
│ else
│ ndata.cmd.cmd2.dptr = dptr;
finfo->dptr = dptr; │ finfo->dptr = dptr;
finfo->g = g; │ finfo->g = g;
│
ndata.reqtype = REQTYPE_NORESP_NET_SG; │ ndata.reqtype = REQTYPE_NORESP_NET_SG;
} │ }
│
irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; │ if (OCTEON_CN23XX_PF(oct)) {
tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; │ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
│ tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
│ } else {
│ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
│ tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
│ }
│
if (skb_shinfo(skb)->gso_size) { │ if (skb_shinfo(skb)->gso_size) {
tx_info->s.gso_size = skb_shinfo(skb)->gso_size; │ tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; │ tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
│ stats->tx_gso++;
} │ }
│
/* HW insert VLAN tag */ │ /* HW insert VLAN tag */
if (skb_vlan_tag_present(skb)) { │ if (skb_vlan_tag_present(skb)) {
irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; │ irh->priority = skb_vlan_tag_get(skb) >> 13;
irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; │ irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
} │ }
│
xmit_more = netdev_xmit_more(); │ xmit_more = netdev_xmit_more();
│
if (unlikely(cmdsetup.s.timestamp)) │ if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); │ status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
else │ else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); │ status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED) │ if (status == IQ_SEND_FAILED)
goto lio_xmit_failed; │ goto lio_xmit_failed;
│
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); │ netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
│
if (status == IQ_SEND_STOP) { │ if (status == IQ_SEND_STOP)
dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n" │
iq_no); │
netif_stop_subqueue(netdev, q_idx); │ netif_stop_subqueue(netdev, q_idx);
} │
│
netif_trans_update(netdev); │ netif_trans_update(netdev);
│
if (tx_info->s.gso_segs) │ if (tx_info->s.gso_segs)
stats->tx_done += tx_info->s.gso_segs; │ stats->tx_done += tx_info->s.gso_segs;
else │ else
stats->tx_done++; │ stats->tx_done++;
stats->tx_tot_bytes += ndata.datasize; │ stats->tx_tot_bytes += ndata.datasize;
│
return NETDEV_TX_OK; │ return NETDEV_TX_OK;
│
lio_xmit_failed: │ lio_xmit_failed:
stats->tx_dropped++; │ stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", │ netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped); │ iq_no, stats->tx_dropped);
if (dptr) │ if (dptr)
dma_unmap_single(&oct->pci_dev->dev, dptr, │ dma_unmap_single(&oct->pci_dev->dev, dptr,
ndata.datasize, DMA_TO_DEVICE); │ ndata.datasize, DMA_TO_DEVICE);
│
octeon_ring_doorbell_locked(oct, iq_no); │ octeon_ring_doorbell_locked(oct, iq_no);
│
tx_buffer_free(skb); │ tx_buffer_free(skb);
return NETDEV_TX_OK; │ return NETDEV_TX_OK;
} │
next prev up linux/drivers/net/ethernet/broadcom/cnic_defs.h:1054 │ linux/drivers/net/ethernet/broadcom/cnic_defs.h:1197
│
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 __aux_counter_flags; │ u8 __aux_counter_flags;
u8 agg_vars2; │ u8 agg_vars2;
#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) │ #define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 │ #define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) │ #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 │ #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) │ #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 │ #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) │ #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 │ #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
u8 agg_vars1; │ u8 agg_vars1;
#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) │ #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 │ #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) │ #define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 │ #define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) │ #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 │ #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
u8 state; │ u8 state;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 state; │ u8 state;
u8 agg_vars1; │ u8 agg_vars1;
#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) │ #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 │ #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 │ #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) │ #define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 │ #define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) │ #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 │ #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
u8 agg_vars2; │ u8 agg_vars2;
#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) │ #define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 │ #define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) │ #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 │ #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) │ #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 │ #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) │ #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 │ #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
u8 __aux_counter_flags; │ u8 __aux_counter_flags;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 cdu_usage; │ u8 cdu_usage;
u8 agg_misc2; │ u8 agg_misc2;
u16 pbf_tx_seq_ack; │ u16 __cq_local_comp_itt_val;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 pbf_tx_seq_ack; │ u16 __cq_local_comp_itt_val;
u8 agg_misc2; │ u8 agg_misc2;
u8 cdu_usage; │ u8 cdu_usage;
#endif │ #endif
u32 agg_misc4; │ u32 agg_misc4;
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 agg_val3_th; │ u8 agg_val3_th;
u8 agg_val3; │ u8 agg_val3;
u16 agg_misc3; │ u16 agg_misc3;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 agg_misc3; │ u16 agg_misc3;
u8 agg_val3; │ u8 agg_val3;
u8 agg_val3_th; │ u8 agg_val3_th;
#endif │ #endif
u32 expired_task_id; │ u32 agg_val1;
u32 agg_misc4_th; │ u32 agg_misc4_th;
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 cq_prod; │ u16 agg_val2_th;
u16 cq_cons; │ u16 agg_val2;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 cq_cons; │ u16 agg_val2;
u16 cq_prod; │ u16 agg_val2_th;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 __reserved2; │ u16 __reserved2;
u8 decision_rules; │ u8 decision_rules;
#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) │ #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 │ #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) │ #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 │ #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) │ #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 │ #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) │ #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 │ #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
u8 decision_rule_enable_bits; │ u8 decision_rule_enable_bits;
#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) │ #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 │ #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) │ #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 │ #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) │ #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 │ #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) │ #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 │ #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) │ #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 │ #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) │ #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 │ #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) │ #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 │ #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) │ #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 │ #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 decision_rule_enable_bits; │ u8 decision_rule_enable_bits;
#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) │ #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 │ #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) │ #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 │ #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) │ #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 │ #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) │ #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 │ #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) │ #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 │ #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) │ #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 │ #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) │ #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 │ #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) │ #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 │ #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
u8 decision_rules; │ u8 decision_rules;
#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) │ #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 │ #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) │ #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 │ #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) │ #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 │ #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) │ #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 │ #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
u16 __reserved2; │ u16 __reserved2;
#endif │ #endif
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c:132 │ linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c:39
│
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { │ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 3 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 3
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { │ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end =
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { │ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { │ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { │ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { │ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { │ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, │ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { │ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, │ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { │ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, │ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { │ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { │ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { │ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, │ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { │ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, │ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { │ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end =
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { │ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end =
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { │ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end =
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { │ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end =
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { │ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end =
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { │ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end =
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { │ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end =
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { │ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end =
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { │ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { │ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, │ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
}, │ },
[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { │ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 3 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 3
}, │ },
[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { │ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end =
}, │ },
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { │ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 3 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 3
}, │ },
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { │ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 3 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 3
}, │ },
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { │ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 3 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 3
}, │ },
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { │ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 3 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 3
}, │ },
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { │ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 3 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 3
}, │ },
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { │ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 3 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 3
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { │ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { │ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { │ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15, │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = { │ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31
}, │ },
[MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = { │ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15 │ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15
}, │ },
} │
next prev up linux/drivers/net/ethernet/atheros/atl1c/atl1c.h:297 │ linux/drivers/net/ethernet/atheros/atl1e/atl1e.h:246
│
/* rx */ │ /* rx */
unsigned long rx_ok; /* The number of good packet received. */ │ unsigned long rx_ok; /* The number of good packet received. */
unsigned long rx_bcast; /* The number of good broadcast packet received. │ unsigned long rx_bcast; /* The number of good broadcast packet received. *
unsigned long rx_mcast; /* The number of good multicast packet received. │ unsigned long rx_mcast; /* The number of good multicast packet received. *
unsigned long rx_pause; /* The number of Pause packet received. */ │ unsigned long rx_pause; /* The number of Pause packet received. */
unsigned long rx_ctrl; /* The number of Control packet received other t │ unsigned long rx_ctrl; /* The number of Control packet received other tha
unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ │ unsigned long rx_fcs_err; /* The number of packets with bad FCS. */
unsigned long rx_len_err; /* The number of packets with mismatch of length │ unsigned long rx_len_err; /* The number of packets with mismatch of length f
unsigned long rx_byte_cnt; /* The number of bytes of good packet received. │ unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FC
unsigned long rx_runt; /* The number of packets received that are less │ unsigned long rx_runt; /* The number of packets received that are less th
unsigned long rx_frag; /* The number of packets received that are less │ unsigned long rx_frag; /* The number of packets received that are less th
unsigned long rx_sz_64; /* The number of good and bad packets received t │ unsigned long rx_sz_64; /* The number of good and bad packets received tha
unsigned long rx_sz_65_127; /* The number of good and bad packets received t │ unsigned long rx_sz_65_127; /* The number of good and bad packets received tha
unsigned long rx_sz_128_255; /* The number of good and bad packets received t │ unsigned long rx_sz_128_255; /* The number of good and bad packets received tha
unsigned long rx_sz_256_511; /* The number of good and bad packets received t │ unsigned long rx_sz_256_511; /* The number of good and bad packets received tha
unsigned long rx_sz_512_1023; /* The number of good and bad packets received t │ unsigned long rx_sz_512_1023; /* The number of good and bad packets received tha
unsigned long rx_sz_1024_1518; /* The number of good and bad packets received t │ unsigned long rx_sz_1024_1518; /* The number of good and bad packets received
unsigned long rx_sz_1519_max; /* The number of good and bad packets received t │ unsigned long rx_sz_1519_max; /* The number of good and bad packets received tha
unsigned long rx_sz_ov; /* The number of good and bad packets received t │ unsigned long rx_sz_ov; /* The number of good and bad packets received tha
unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence │ unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence o
unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence │ unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence o
unsigned long rx_align_err; /* Alignment Error */ │ unsigned long rx_align_err; /* Alignment Error */
unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, │ unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received
unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, │ unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received
unsigned long rx_err_addr; /* The number of packets dropped due to address │ unsigned long rx_err_addr; /* The number of packets dropped due to address fi
│
/* tx */ │ /* tx */
unsigned long tx_ok; /* The number of good packet transmitted. */ │ unsigned long tx_ok; /* The number of good packet transmitted. */
unsigned long tx_bcast; /* The number of good broadcast packet transmitt │ unsigned long tx_bcast; /* The number of good broadcast packet transmitted
unsigned long tx_mcast; /* The number of good multicast packet transmitt │ unsigned long tx_mcast; /* The number of good multicast packet transmitted
unsigned long tx_pause; /* The number of Pause packet transmitted. */ │ unsigned long tx_pause; /* The number of Pause packet transmitted. */
unsigned long tx_exc_defer; /* The number of packets transmitted with excess │ unsigned long tx_exc_defer; /* The number of packets transmitted with excessiv
unsigned long tx_ctrl; /* The number of packets transmitted is a contro │ unsigned long tx_ctrl; /* The number of packets transmitted is a control
unsigned long tx_defer; /* The number of packets transmitted that is def │ unsigned long tx_defer; /* The number of packets transmitted that is defer
unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS │ unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is
unsigned long tx_sz_64; /* The number of good and bad packets transmitte │ unsigned long tx_sz_64; /* The number of good and bad packets transmitted
unsigned long tx_sz_65_127; /* The number of good and bad packets transmitte │ unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted
unsigned long tx_sz_128_255; /* The number of good and bad packets transmitte │ unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted
unsigned long tx_sz_256_511; /* The number of good and bad packets transmitte │ unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted
unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitte │ unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted
unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitte │ unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmit
unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitte │ unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted
unsigned long tx_1_col; /* The number of packets subsequently transmitte │ unsigned long tx_1_col; /* The number of packets subsequently transmitted
unsigned long tx_2_col; /* The number of packets subsequently transmitte │ unsigned long tx_2_col; /* The number of packets subsequently transmitted
unsigned long tx_late_col; /* The number of packets transmitted with late c │ unsigned long tx_late_col; /* The number of packets transmitted with late col
unsigned long tx_abort_col; /* The number of transmit packets aborted due to │ unsigned long tx_abort_col; /* The number of transmit packets aborted due to e
unsigned long tx_underrun; /* The number of transmit packets aborted due to │ unsigned long tx_underrun; /* The number of transmit packets aborted due to t
unsigned long tx_rd_eop; /* The number of times that read beyond the EOP │ unsigned long tx_rd_eop; /* The number of times that read beyond the EOP in
unsigned long tx_len_err; /* The number of transmit packets with length fi │ unsigned long tx_len_err; /* The number of transmit packets with length fiel
unsigned long tx_trunc; /* The number of transmit packets truncated due │ unsigned long tx_trunc; /* The number of transmit packets truncated due to
unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitte │ unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted,
unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitte │ unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted,
} │
next prev up linux/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c:636 │ linux/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c:340
│
sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt],
spx5_inst_rd(inst, ASM_RX_UNDERSIZE_CNT(portno))); │ spx5_inst_rd(inst,
│ DEV5G_RX_UNDERSIZE_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_UNDERSIZE_CNT(portno))); │ DEV5G_PMAC_RX_UNDERSIZE_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno))); │ spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_OVERSIZE_CNT(portno))); │ DEV5G_PMAC_RX_OVERSIZE_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt],
spx5_inst_rd(inst, ASM_RX_FRAGMENTS_CNT(portno))); │ spx5_inst_rd(inst,
│ DEV5G_RX_FRAGMENTS_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_FRAGMENTS_CNT(portno))); │ DEV5G_PMAC_RX_FRAGMENTS_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt],
spx5_inst_rd(inst, ASM_RX_JABBERS_CNT(portno))); │ spx5_inst_rd(inst, DEV5G_RX_JABBERS_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_JABBERS_CNT(portno))); │ DEV5G_PMAC_RX_JABBERS_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt],
spx5_inst_rd(inst, ASM_RX_SIZE64_CNT(portno))); │ spx5_inst_rd(inst, DEV5G_RX_SIZE64_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_SIZE64_CNT(portno))); │ DEV5G_PMAC_RX_SIZE64_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_RX_SIZE65TO127_CNT(portno))); │ DEV5G_RX_SIZE65TO127_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_SIZE65TO127_CNT(portno))); │ DEV5G_PMAC_RX_SIZE65TO127_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_RX_SIZE128TO255_CNT(portno))); │ DEV5G_RX_SIZE128TO255_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_SIZE128TO255_CNT(portno))); │ DEV5G_PMAC_RX_SIZE128TO255_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_RX_SIZE256TO511_CNT(portno))); │ DEV5G_RX_SIZE256TO511_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_SIZE256TO511_CNT(portno))); │ DEV5G_PMAC_RX_SIZE256TO511_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_RX_SIZE512TO1023_CNT(portno))); │ DEV5G_RX_SIZE512TO1023_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_SIZE512TO1023_CNT(portno))); │ DEV5G_PMAC_RX_SIZE512TO1023_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_RX_SIZE1024TO1518_CNT(portno))); │ DEV5G_RX_SIZE1024TO1518_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_SIZE1024TO1518_CNT(portno))); │ DEV5G_PMAC_RX_SIZE1024TO1518_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt], │ sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_RX_SIZE1519TOMAX_CNT(portno))); │ DEV5G_RX_SIZE1519TOMAX_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_RX_SIZE1519TOMAX_CNT(portno))); │ DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt], │ sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt],
spx5_inst_rd(inst, ASM_TX_SIZE64_CNT(portno))); │ spx5_inst_rd(inst, DEV5G_TX_SIZE64_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_TX_SIZE64_CNT(portno))); │ DEV5G_PMAC_TX_SIZE64_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt], │ sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_TX_SIZE65TO127_CNT(portno))); │ DEV5G_TX_SIZE65TO127_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_TX_SIZE65TO127_CNT(portno))); │ DEV5G_PMAC_TX_SIZE65TO127_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt], │ sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_TX_SIZE128TO255_CNT(portno))); │ DEV5G_TX_SIZE128TO255_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_TX_SIZE128TO255_CNT(portno))); │ DEV5G_PMAC_TX_SIZE128TO255_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt], │ sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_TX_SIZE256TO511_CNT(portno))); │ DEV5G_TX_SIZE256TO511_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_TX_SIZE256TO511_CNT(portno))); │ DEV5G_PMAC_TX_SIZE256TO511_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt], │ sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_TX_SIZE512TO1023_CNT(portno))); │ DEV5G_TX_SIZE512TO1023_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_TX_SIZE512TO1023_CNT(portno))); │ DEV5G_PMAC_TX_SIZE512TO1023_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt], │ sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_TX_SIZE1024TO1518_CNT(portno))); │ DEV5G_TX_SIZE1024TO1518_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_TX_SIZE1024TO1518_CNT(portno))); │ DEV5G_PMAC_TX_SIZE1024TO1518_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt], │ sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_TX_SIZE1519TOMAX_CNT(portno))); │ DEV5G_TX_SIZE1519TOMAX_CNT(tinst)));
sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt], │ sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt],
spx5_inst_rd(inst, │ spx5_inst_rd(inst,
ASM_PMAC_TX_SIZE1519TOMAX_CNT(portno))); │ DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(tinst)));
} │
next prev up linux/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c:266 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:1115
│
struct ixgbevf_adapter *adapter = q_vector->adapter; │ struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbevf_tx_buffer *tx_buffer; │ struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc; │ union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; │ unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = tx_ring->count / 2; │ unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean; │ unsigned int i = tx_ring->next_to_clean;
│
if (test_bit(__IXGBEVF_DOWN, &adapter->state)) │ if (test_bit(__IXGBE_DOWN, &adapter->state))
return true; │ return true;
│
tx_buffer = &tx_ring->tx_buffer_info[i]; │ tx_buffer = &tx_ring->tx_buffer_info[i];
tx_desc = IXGBEVF_TX_DESC(tx_ring, i); │ tx_desc = IXGBE_TX_DESC(tx_ring, i);
i -= tx_ring->count; │ i -= tx_ring->count;
│
do { │ do {
union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; │ union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
│
/* if next_to_watch is not set then there is no work pending */ │ /* if next_to_watch is not set then there is no work pending */
if (!eop_desc) │ if (!eop_desc)
break; │ break;
│
/* prevent any other reads prior to eop_desc */ │ /* prevent any other reads prior to eop_desc */
smp_rmb(); │ smp_rmb();
│
/* if DD is not set pending work has not been completed */ │ /* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) │ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break; │ break;
│
/* clear next_to_watch to prevent false hangs */ │ /* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL; │ tx_buffer->next_to_watch = NULL;
│
/* update the statistics for this packet */ │ /* update the statistics for this packet */
total_bytes += tx_buffer->bytecount; │ total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs; │ total_packets += tx_buffer->gso_segs;
if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) │ if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
total_ipsec++; │ total_ipsec++;
│
/* free the skb */ │ /* free the skb */
if (ring_is_xdp(tx_ring)) │ if (ring_is_xdp(tx_ring))
page_frag_free(tx_buffer->data); │ xdp_return_frame(tx_buffer->xdpf);
else │ else
napi_consume_skb(tx_buffer->skb, napi_budget); │ napi_consume_skb(tx_buffer->skb, napi_budget);
│
/* unmap skb header data */ │ /* unmap skb header data */
dma_unmap_single(tx_ring->dev, │ dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
│
/* clear tx_buffer data */ │ /* clear tx_buffer data */
dma_unmap_len_set(tx_buffer, len, 0); │ dma_unmap_len_set(tx_buffer, len, 0);
│
/* unmap remaining buffers */ │ /* unmap remaining buffers */
while (tx_desc != eop_desc) { │ while (tx_desc != eop_desc) {
tx_buffer++; │ tx_buffer++;
tx_desc++; │ tx_desc++;
i++; │ i++;
if (unlikely(!i)) { │ if (unlikely(!i)) {
i -= tx_ring->count; │ i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer_info; │ tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); │ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
} │ }
│
/* unmap any remaining paged data */ │ /* unmap any remaining paged data */
if (dma_unmap_len(tx_buffer, len)) { │ if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(tx_ring->dev, │ dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0); │ dma_unmap_len_set(tx_buffer, len, 0);
} │ }
} │ }
│
/* move us one more past the eop_desc for start of next pkt */ │ /* move us one more past the eop_desc for start of next pkt */
tx_buffer++; │ tx_buffer++;
tx_desc++; │ tx_desc++;
i++; │ i++;
if (unlikely(!i)) { │ if (unlikely(!i)) {
i -= tx_ring->count; │ i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer_info; │ tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); │ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
} │ }
│
/* issue prefetch for next Tx descriptor */ │ /* issue prefetch for next Tx descriptor */
prefetch(tx_desc); │ prefetch(tx_desc);
│
/* update budget accounting */ │ /* update budget accounting */
budget--; │ budget--;
} while (likely(budget)); │ } while (likely(budget));
│
i += tx_ring->count; │ i += tx_ring->count;
tx_ring->next_to_clean = i; │ tx_ring->next_to_clean = i;
u64_stats_update_begin(&tx_ring->syncp); │ u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes; │ tx_ring->stats.bytes += total_bytes;
tx_ring->stats.packets += total_packets; │ tx_ring->stats.packets += total_packets;
u64_stats_update_end(&tx_ring->syncp); │ u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes; │ q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets; │ q_vector->tx.total_packets += total_packets;
adapter->tx_ipsec += total_ipsec; │ adapter->tx_ipsec += total_ipsec;
│
if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { │ if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
│ /* schedule immediate reset if we believe we hung */
struct ixgbe_hw *hw = &adapter->hw; │ struct ixgbe_hw *hw = &adapter->hw;
union ixgbe_adv_tx_desc *eop_desc; │ e_err(drv, "Detected Tx Unit Hang %s\n"
│ " Tx Queue <%d>\n"
eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; │ " TDH, TDT <%x>, <%x>\n"
│ " next_to_use <%x>\n"
pr_err("Detected Tx Unit Hang%s\n" │ " next_to_clean <%x>\n"
" Tx Queue <%d>\n" │ "tx_buffer_info[next_to_clean]\n"
" TDH, TDT <%x>, <%x>\n" │ " time_stamp <%lx>\n"
" next_to_use <%x>\n" │ " jiffies <%lx>\n",
" next_to_clean <%x>\n" │ ring_is_xdp(tx_ring) ? "(XDP)" : "",
"tx_buffer_info[next_to_clean]\n" │ tx_ring->queue_index,
" next_to_watch <%p>\n" │ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
" eop_desc->wb.status <%x>\n" │ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
" time_stamp <%lx>\n" │ tx_ring->next_to_use, i,
" jiffies <%lx>\n", │ tx_ring->tx_buffer_info[i].time_stamp, jiffies);
ring_is_xdp(tx_ring) ? " XDP" : "", │
tx_ring->queue_index, │
IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), │
IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), │
tx_ring->next_to_use, i, │
eop_desc, (eop_desc ? eop_desc->wb.status : 0), │
tx_ring->tx_buffer_info[i].time_stamp, jiffies); │
│
if (!ring_is_xdp(tx_ring)) │ if (!ring_is_xdp(tx_ring))
netif_stop_subqueue(tx_ring->netdev, │ netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index); │ tx_ring->queue_index);
│
│ e_info(probe,
│ "tx hang %d detected on queue %d, resetting adapter\n",
│ adapter->tx_timeout_count + 1, tx_ring->queue_index);
│
/* schedule immediate reset if we believe we hung */ │ /* schedule immediate reset if we believe we hung */
ixgbevf_tx_timeout_reset(adapter); │ ixgbe_tx_timeout_reset(adapter);
│
│ /* the adapter is about to reset, no point in enabling stuff */
return true; │ return true;
} │ }
│
if (ring_is_xdp(tx_ring)) │ if (ring_is_xdp(tx_ring))
return !!budget; │ return !!budget;
│
│ netdev_tx_completed_queue(txring_txq(tx_ring),
│ total_packets, total_bytes);
│
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) │ #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && │ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { │ (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this │ /* Make sure that anybody stopping the queue after this
* sees the new next_to_clean. │ * sees the new next_to_clean.
*/ │ */
smp_mb(); │ smp_mb();
│
if (__netif_subqueue_stopped(tx_ring->netdev, │ if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) && │ tx_ring->queue_index)
!test_bit(__IXGBEVF_DOWN, &adapter->state)) { │ && !test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(tx_ring->netdev, │ netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index); │ tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue; │ ++tx_ring->tx_stats.restart_queue;
} │ }
} │ }
│
return !!budget; │ return !!budget;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/mac.c:1175 │ linux/drivers/net/ethernet/intel/igb/e1000_mac.c:1099
│
/* Read the PCS_LSTS and check to see if AutoNeg │ /* Read the PCS_LSTS and check to see if AutoNeg
* has completed. │ * has completed.
*/ │ */
pcs_status_reg = er32(PCS_LSTAT); │ pcs_status_reg = rd32(E1000_PCS_LSTAT);
│
if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { │ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
e_dbg("PCS Auto Neg has not completed.\n"); │ hw_dbg("PCS Auto Neg has not completed.\n");
return ret_val; │ return ret_val;
} │ }
│
/* The AutoNeg process has completed, so we now need to │ /* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement │ * read both the Auto Negotiation Advertisement
* Register (PCS_ANADV) and the Auto_Negotiation Base │ * Register (PCS_ANADV) and the Auto_Negotiation Base
* Page Ability Register (PCS_LPAB) to determine how │ * Page Ability Register (PCS_LPAB) to determine how
* flow control was negotiated. │ * flow control was negotiated.
*/ │ */
pcs_adv_reg = er32(PCS_ANADV); │ pcs_adv_reg = rd32(E1000_PCS_ANADV);
pcs_lp_ability_reg = er32(PCS_LPAB); │ pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
│
/* Two bits in the Auto Negotiation Advertisement Register │ /* Two bits in the Auto Negotiation Advertisement Register
* (PCS_ANADV) and two bits in the Auto Negotiation Base │ * (PCS_ANADV) and two bits in the Auto Negotiation Base
* Page Ability Register (PCS_LPAB) determine flow control │ * Page Ability Register (PCS_LPAB) determine flow control
* for both the PHY and the link partner. The following │ * for both the PHY and the link partner. The following
* table, taken out of the IEEE 802.3ab/D6.0 dated March 25, │ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
* 1999, describes these PAUSE resolution bits and how flow │ * 1999, describes these PAUSE resolution bits and how flow
* control is determined based upon these settings. │ * control is determined based upon these settings.
* NOTE: DC = Don't Care │ * NOTE: DC = Don't Care
* │ *
* LOCAL DEVICE | LINK PARTNER │ * LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution │ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
*-------|---------|-------|---------|-------------------- │ *-------|---------|-------|---------|--------------------
* 0 | 0 | DC | DC | e1000_fc_none │ * 0 | 0 | DC | DC | e1000_fc_none
* 0 | 1 | 0 | DC | e1000_fc_none │ * 0 | 1 | 0 | DC | e1000_fc_none
* 0 | 1 | 1 | 0 | e1000_fc_none │ * 0 | 1 | 1 | 0 | e1000_fc_none
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause │ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
* 1 | 0 | 0 | DC | e1000_fc_none │ * 1 | 0 | 0 | DC | e1000_fc_none
* 1 | DC | 1 | DC | e1000_fc_full │ * 1 | DC | 1 | DC | e1000_fc_full
* 1 | 1 | 0 | 0 | e1000_fc_none │ * 1 | 1 | 0 | 0 | e1000_fc_none
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause │ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
* │ *
* Are both PAUSE bits set to 1? If so, this implies │ * Are both PAUSE bits set to 1? If so, this implies
* Symmetric Flow Control is enabled at both ends. The │ * Symmetric Flow Control is enabled at both ends. The
* ASM_DIR bits are irrelevant per the spec. │ * ASM_DIR bits are irrelevant per the spec.
* │ *
* For Symmetric Flow Control: │ * For Symmetric Flow Control:
* │ *
* LOCAL DEVICE | LINK PARTNER │ * LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result │ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|-------------------- │ *-------|---------|-------|---------|--------------------
* 1 | DC | 1 | DC | e1000_fc_full │ * 1 | DC | 1 | DC | e1000_fc_full
* │ *
*/ │ */
if ((pcs_adv_reg & E1000_TXCW_PAUSE) && │ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
(pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { │ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
/* Now we need to check if the user selected Rx ONLY │ /* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise │ * of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise Rx │ * FULL flow control because we could not advertise Rx
* ONLY. Hence, we must now check to see if we need to │ * ONLY. Hence, we must now check to see if we need to
* turn OFF the TRANSMISSION of PAUSE frames. │ * turn OFF the TRANSMISSION of PAUSE frames.
*/ │ */
if (hw->fc.requested_mode == e1000_fc_full) { │ if (hw->fc.requested_mode == e1000_fc_full) {
hw->fc.current_mode = e1000_fc_full; │ hw->fc.current_mode = e1000_fc_full;
e_dbg("Flow Control = FULL.\n"); │ hw_dbg("Flow Control = FULL.\n");
} else { │ } else {
hw->fc.current_mode = e1000_fc_rx_pause; │ hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = Rx PAUSE frames only.\n"); │ hw_dbg("Flow Control = Rx PAUSE frames only.\n");
} │ }
} │ }
/* For receiving PAUSE frames ONLY. │ /* For receiving PAUSE frames ONLY.
* │ *
* LOCAL DEVICE | LINK PARTNER │ * LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result │ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|-------------------- │ *-------|---------|-------|---------|--------------------
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause │ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
*/ │ */
else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && │ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
(pcs_adv_reg & E1000_TXCW_ASM_DIR) && │ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && │ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
(pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { │ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_tx_pause; │ hw->fc.current_mode = e1000_fc_tx_pause;
e_dbg("Flow Control = Tx PAUSE frames only.\n"); │ hw_dbg("Flow Control = Tx PAUSE frames only.\n");
} │ }
/* For transmitting PAUSE frames ONLY. │ /* For transmitting PAUSE frames ONLY.
* │ *
* LOCAL DEVICE | LINK PARTNER │ * LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result │ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|-------------------- │ *-------|---------|-------|---------|--------------------
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause │ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
*/ │ */
else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && │ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
(pcs_adv_reg & E1000_TXCW_ASM_DIR) && │ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
!(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && │ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
(pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { │ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_rx_pause; │ hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = Rx PAUSE frames only.\n"); │ hw_dbg("Flow Control = Rx PAUSE frames only.\n");
} else { │ } else {
/* Per the IEEE spec, at this point flow control │ /* Per the IEEE spec, at this point flow control
* should be disabled. │ * should be disabled.
*/ │ */
hw->fc.current_mode = e1000_fc_none; │ hw->fc.current_mode = e1000_fc_none;
e_dbg("Flow Control = NONE.\n"); │ hw_dbg("Flow Control = NONE.\n");
} │ }
│
/* Now we call a subroutine to actually force the MAC │ /* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings. │ * controller to use the correct flow control settings.
*/ │ */
pcs_ctrl_reg = er32(PCS_LCTL); │ pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; │ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
ew32(PCS_LCTL, pcs_ctrl_reg); │ wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
│
ret_val = e1000e_force_mac_fc(hw); │ ret_val = igb_force_mac_fc(hw);
if (ret_val) { │ if (ret_val) {
e_dbg("Error forcing flow control settings\n"); │ hw_dbg("Error forcing flow control settings\n");
return ret_val; │ return ret_val;
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ptp.c:663 │ linux/drivers/net/ethernet/intel/igc/igc_ptp.c:248
│
struct igb_adapter *igb = │ struct igc_adapter *igc =
container_of(ptp, struct igb_adapter, ptp_caps); │ container_of(ptp, struct igc_adapter, ptp_caps);
struct e1000_hw *hw = &igb->hw; │ struct igc_hw *hw = &igc->hw;
u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; │
unsigned long flags; │ unsigned long flags;
struct timespec64 ts; │ struct timespec64 ts;
int use_freq = 0, pin = -1; │ int use_freq = 0, pin = -1;
│ u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
s64 ns; │ s64 ns;
│
switch (rq->type) { │ switch (rq->type) {
case PTP_CLK_REQ_EXTTS: │ case PTP_CLK_REQ_EXTTS:
/* Reject requests with unsupported flags */ │ /* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | │ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE | │ PTP_RISING_EDGE |
PTP_FALLING_EDGE | │ PTP_FALLING_EDGE |
PTP_STRICT_FLAGS)) │ PTP_STRICT_FLAGS))
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
│
/* Reject requests failing to enable both edges. */ │ /* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) && │ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) && │ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) │ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
│
if (on) { │ if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, │ pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS,
rq->extts.index); │ rq->extts.index);
if (pin < 0) │ if (pin < 0)
return -EBUSY; │ return -EBUSY;
} │ }
if (rq->extts.index == 1) { │ if (rq->extts.index == 1) {
tsauxc_mask = TSAUXC_EN_TS1; │ tsauxc_mask = IGC_TSAUXC_EN_TS1;
tsim_mask = TSINTR_AUTT1; │ tsim_mask = IGC_TSICR_AUTT1;
} else { │ } else {
tsauxc_mask = TSAUXC_EN_TS0; │ tsauxc_mask = IGC_TSAUXC_EN_TS0;
tsim_mask = TSINTR_AUTT0; │ tsim_mask = IGC_TSICR_AUTT0;
} │ }
spin_lock_irqsave(&igb->tmreg_lock, flags); │ spin_lock_irqsave(&igc->tmreg_lock, flags);
tsauxc = rd32(E1000_TSAUXC); │ tsauxc = rd32(IGC_TSAUXC);
tsim = rd32(E1000_TSIM); │ tsim = rd32(IGC_TSIM);
if (on) { │ if (on) {
igb_pin_extts(igb, rq->extts.index, pin); │ igc_pin_extts(igc, rq->extts.index, pin);
tsauxc |= tsauxc_mask; │ tsauxc |= tsauxc_mask;
tsim |= tsim_mask; │ tsim |= tsim_mask;
} else { │ } else {
tsauxc &= ~tsauxc_mask; │ tsauxc &= ~tsauxc_mask;
tsim &= ~tsim_mask; │ tsim &= ~tsim_mask;
} │ }
wr32(E1000_TSAUXC, tsauxc); │ wr32(IGC_TSAUXC, tsauxc);
wr32(E1000_TSIM, tsim); │ wr32(IGC_TSIM, tsim);
spin_unlock_irqrestore(&igb->tmreg_lock, flags); │ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
return 0; │ return 0;
│
case PTP_CLK_REQ_PEROUT: │ case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */ │ /* Reject requests with unsupported flags */
if (rq->perout.flags) │ if (rq->perout.flags)
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
│
if (on) { │ if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, │ pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT,
rq->perout.index); │ rq->perout.index);
if (pin < 0) │ if (pin < 0)
return -EBUSY; │ return -EBUSY;
} │ }
ts.tv_sec = rq->perout.period.sec; │ ts.tv_sec = rq->perout.period.sec;
ts.tv_nsec = rq->perout.period.nsec; │ ts.tv_nsec = rq->perout.period.nsec;
ns = timespec64_to_ns(&ts); │ ns = timespec64_to_ns(&ts);
ns = ns >> 1; │ ns = ns >> 1;
if (on && ((ns <= 70000000LL) || (ns == 125000000LL) || │ if (on && (ns <= 70000000LL || ns == 125000000LL ||
(ns == 250000000LL) || (ns == 500000000LL))) { │ ns == 250000000LL || ns == 500000000LL)) {
if (ns < 8LL) │ if (ns < 8LL)
return -EINVAL; │ return -EINVAL;
use_freq = 1; │ use_freq = 1;
} │ }
ts = ns_to_timespec64(ns); │ ts = ns_to_timespec64(ns);
if (rq->perout.index == 1) { │ if (rq->perout.index == 1) {
if (use_freq) { │ if (use_freq) {
tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1; │ tsauxc_mask = IGC_TSAUXC_EN_CLK1;
tsim_mask = 0; │ tsim_mask = 0;
} else { │ } else {
tsauxc_mask = TSAUXC_EN_TT1; │ tsauxc_mask = IGC_TSAUXC_EN_TT1;
tsim_mask = TSINTR_TT1; │ tsim_mask = IGC_TSICR_TT1;
} │ }
trgttiml = E1000_TRGTTIML1; │ trgttiml = IGC_TRGTTIML1;
trgttimh = E1000_TRGTTIMH1; │ trgttimh = IGC_TRGTTIMH1;
freqout = E1000_FREQOUT1; │ freqout = IGC_FREQOUT1;
} else { │ } else {
if (use_freq) { │ if (use_freq) {
tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0; │ tsauxc_mask = IGC_TSAUXC_EN_CLK0;
tsim_mask = 0; │ tsim_mask = 0;
} else { │ } else {
tsauxc_mask = TSAUXC_EN_TT0; │ tsauxc_mask = IGC_TSAUXC_EN_TT0;
tsim_mask = TSINTR_TT0; │ tsim_mask = IGC_TSICR_TT0;
} │ }
trgttiml = E1000_TRGTTIML0; │ trgttiml = IGC_TRGTTIML0;
trgttimh = E1000_TRGTTIMH0; │ trgttimh = IGC_TRGTTIMH0;
freqout = E1000_FREQOUT0; │ freqout = IGC_FREQOUT0;
} │ }
spin_lock_irqsave(&igb->tmreg_lock, flags); │ spin_lock_irqsave(&igc->tmreg_lock, flags);
tsauxc = rd32(E1000_TSAUXC); │ tsauxc = rd32(IGC_TSAUXC);
tsim = rd32(E1000_TSIM); │ tsim = rd32(IGC_TSIM);
if (rq->perout.index == 1) { │ if (rq->perout.index == 1) {
tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); │ tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1);
tsim &= ~TSINTR_TT1; │ tsim &= ~IGC_TSICR_TT1;
} else { │ } else {
tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); │ tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0);
tsim &= ~TSINTR_TT0; │ tsim &= ~IGC_TSICR_TT0;
} │ }
if (on) { │ if (on) {
int i = rq->perout.index; │ int i = rq->perout.index;
igb_pin_perout(igb, i, pin, use_freq); │
igb->perout[i].start.tv_sec = rq->perout.start.sec; │ igc_pin_perout(igc, i, pin, use_freq);
igb->perout[i].start.tv_nsec = rq->perout.start.nsec; │ igc->perout[i].start.tv_sec = rq->perout.start.sec;
igb->perout[i].period.tv_sec = ts.tv_sec; │ igc->perout[i].start.tv_nsec = rq->perout.start.nsec;
igb->perout[i].period.tv_nsec = ts.tv_nsec; │ igc->perout[i].period.tv_sec = ts.tv_sec;
│ igc->perout[i].period.tv_nsec = ts.tv_nsec;
wr32(trgttimh, rq->perout.start.sec); │ wr32(trgttimh, rq->perout.start.sec);
wr32(trgttiml, rq->perout.start.nsec); │ /* For now, always select timer 0 as source. */
│ wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTI
if (use_freq) │ if (use_freq)
wr32(freqout, ns); │ wr32(freqout, ns);
tsauxc |= tsauxc_mask; │ tsauxc |= tsauxc_mask;
tsim |= tsim_mask; │ tsim |= tsim_mask;
} │ }
wr32(E1000_TSAUXC, tsauxc); │ wr32(IGC_TSAUXC, tsauxc);
wr32(E1000_TSIM, tsim); │ wr32(IGC_TSIM, tsim);
spin_unlock_irqrestore(&igb->tmreg_lock, flags); │ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
return 0; │ return 0;
│
case PTP_CLK_REQ_PPS: │ case PTP_CLK_REQ_PPS:
spin_lock_irqsave(&igb->tmreg_lock, flags); │ spin_lock_irqsave(&igc->tmreg_lock, flags);
tsim = rd32(E1000_TSIM); │ tsim = rd32(IGC_TSIM);
if (on) │ if (on)
tsim |= TSINTR_SYS_WRAP; │ tsim |= IGC_TSICR_SYS_WRAP;
else │ else
tsim &= ~TSINTR_SYS_WRAP; │ tsim &= ~IGC_TSICR_SYS_WRAP;
igb->pps_sys_wrap_on = !!on; │ igc->pps_sys_wrap_on = on;
wr32(E1000_TSIM, tsim); │ wr32(IGC_TSIM, tsim);
spin_unlock_irqrestore(&igb->tmreg_lock, flags); │ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
return 0; │ return 0;
│
│ default:
│ break;
} │ }
│
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:3579 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:694
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 │ #define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 │ #define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 │ #define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 │ #define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 │ #define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 │ #define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 │ #define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1; │ u8 flags1;
#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 │ #define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 │ #define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 │ #define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 │ #define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2; │ u8 flags2;
#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 │ #define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 │ #define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 │ #define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 │ #define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3; │ u8 flags3;
#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 │ #define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 │ #define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 │ #define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 │ #define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 │ #define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 │ #define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 │ #define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4; │ u8 flags4;
#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 │ #define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 │ #define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 │ #define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 │ #define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 │ #define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 │ #define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 │ #define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5; │ u8 flags5;
#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 │ #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le32 reg4; │ __le32 reg4;
__le32 reg5; │ __le32 reg5;
__le32 reg6; │ __le32 reg6;
__le32 reg7; │ __le32 reg7;
__le32 reg8; │ __le32 reg8;
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 rx_bd_cons; │ __le16 word0;
u8 byte4; │ u8 byte4;
u8 byte5; │ u8 byte5;
__le16 rx_bd_prod; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le32 reg9; │ __le32 ll2_rx_prod;
__le32 reg10; │ __le32 reg10;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c:194 │ linux/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c:345
│
struct mac_delay_struct *mac_delay = &plat->mac_delay; │ struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 delay_val = 0, fine_val = 0; │ u32 gtxc_delay_val = 0, delay_val = 0, rmii_delay_val = 0;
│
mt2712_delay_ps2stage(plat); │ mt8195_delay_ps2stage(plat);
│
switch (plat->phy_mode) { │ switch (plat->phy_mode) {
case PHY_INTERFACE_MODE_MII: │ case PHY_INTERFACE_MODE_MII:
delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->tx_delay); │ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE, !!mac_delay->tx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->tx_delay); │ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES, mac_delay->tx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->tx_inv); │ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV, mac_delay->tx_inv);
│
delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay); │ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay); │ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv); │ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
break; │ break;
case PHY_INTERFACE_MODE_RMII: │ case PHY_INTERFACE_MODE_RMII:
if (plat->rmii_clk_from_mac) { │ if (plat->rmii_clk_from_mac) {
/* case 1: mac provides the rmii reference clock, │ /* case 1: mac provides the rmii reference clock,
* and the clock output to TXC pin. │ * and the clock output to TXC pin.
* The egress timing can be adjusted by GTXC delay macro circuit │ * The egress timing can be adjusted by RMII_TXC delay macro cir
* The ingress timing can be adjusted by TXC delay macro circuit │ * The ingress timing can be adjusted by RMII_RXC delay macro ci
*/ │ */
delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_dela │ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_ENABLE,
delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay) │ !!mac_delay->tx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv); │ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_STAGES,
│ mac_delay->tx_delay);
delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_del │ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_INV,
delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay │ mac_delay->tx_inv);
delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv); │
│ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_ENABLE,
│ !!mac_delay->rx_delay);
│ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_STAGES,
│ mac_delay->rx_delay);
│ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_INV,
│ mac_delay->rx_inv);
} else { │ } else {
/* case 2: the rmii reference clock is from external phy, │ /* case 2: the rmii reference clock is from external phy,
* and the property "rmii_rxc" indicates which pin(TXC/RXC) │ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
* the reference clk is connected to. The reference clock is a │ * the reference clk is connected to. The reference clock is a
* received signal, so rx_delay/rx_inv are used to indicate │ * received signal, so rx_delay/rx_inv are used to indicate
* the reference clock timing adjustment │ * the reference clock timing adjustment
*/ │ */
if (plat->rmii_rxc) { │ if (plat->rmii_rxc) {
/* the rmii reference clock from outside is connected │ /* the rmii reference clock from outside is connected
* to RXC pin, the reference clock will be adjusted │ * to RXC pin, the reference clock will be adjusted
* by RXC delay macro circuit. │ * by RXC delay macro circuit.
*/ │ */
delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay- │ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE,
delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->r │ !!mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_i │ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES,
│ mac_delay->rx_delay);
│ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV,
│ mac_delay->rx_inv);
} else { │ } else {
/* the rmii reference clock from outside is connected │ /* the rmii reference clock from outside is connected
* to TXC pin, the reference clock will be adjusted │ * to TXC pin, the reference clock will be adjusted
* by TXC delay macro circuit. │ * by TXC delay macro circuit.
*/ │ */
delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay- │ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE,
delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->r │ !!mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_i │ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES,
│ mac_delay->rx_delay);
│ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV,
│ mac_delay->rx_inv);
} │ }
/* tx_inv will inverse the tx clock inside mac relateive to │
* reference clock from external phy, │
* and this bit is located in the same register with fine-tune │
*/ │
if (mac_delay->tx_inv) │
fine_val = ETH_RMII_DLY_TX_INV; │
} │ }
break; │ break;
case PHY_INTERFACE_MODE_RGMII: │ case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_TXID: │ case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID: │ case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID: │ case PHY_INTERFACE_MODE_RGMII_ID:
fine_val = ETH_FINE_DLY_GTXC | ETH_FINE_DLY_RXC; │ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_ENABLE, !!mac_delay->tx_del
│ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_STAGES, mac_delay->tx_delay
│ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_INV, mac_delay->tx_inv);
│
│ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
│ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
│ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
│
delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_delay); │
delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay); │
delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv); │
│
delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay); │
delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay); │
delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv); │
break; │ break;
default: │ default:
dev_err(plat->dev, "phy interface not supported\n"); │ dev_err(plat->dev, "phy interface not supported\n");
return -EINVAL; │ return -EINVAL;
} │ }
regmap_write(plat->peri_regmap, PERI_ETH_DLY, delay_val); │
regmap_write(plat->peri_regmap, PERI_ETH_DLY_FINE, fine_val); │
│
mt2712_delay_stage2ps(plat); │ regmap_update_bits(plat->peri_regmap,
│ MT8195_PERI_ETH_CTRL0,
│ MT8195_RGMII_TXC_PHASE_CTRL |
│ MT8195_DLY_GTXC_INV |
│ MT8195_DLY_GTXC_ENABLE |
│ MT8195_DLY_GTXC_STAGES,
│ gtxc_delay_val);
│ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL1, delay_val);
│ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL2, rmii_delay_val);
│
│ mt8195_delay_stage2ps(plat);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:6113 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:8226
│
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer; │ struct ixgbe_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc; │ union ixgbe_adv_tx_desc *tx_desc;
skb_frag_t *frag; │ skb_frag_t *frag;
dma_addr_t dma; │ dma_addr_t dma;
unsigned int data_len, size; │ unsigned int data_len, size;
u32 tx_flags = first->tx_flags; │ u32 tx_flags = first->tx_flags;
u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); │ u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use; │ u16 i = tx_ring->next_to_use;
│
tx_desc = IGB_TX_DESC(tx_ring, i); │ tx_desc = IXGBE_TX_DESC(tx_ring, i);
│
igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); │ ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
│
size = skb_headlen(skb); │ size = skb_headlen(skb);
data_len = skb->data_len; │ data_len = skb->data_len;
│
│ #ifdef IXGBE_FCOE
│ if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
│ if (data_len < sizeof(struct fcoe_crc_eof)) {
│ size -= sizeof(struct fcoe_crc_eof) - data_len;
│ data_len = 0;
│ } else {
│ data_len -= sizeof(struct fcoe_crc_eof);
│ }
│ }
│
│ #endif
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); │ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
│
tx_buffer = first; │ tx_buffer = first;
│
for (frag = &skb_shinfo(skb)->frags[0];; frag++) { │ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
if (dma_mapping_error(tx_ring->dev, dma)) │ if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error; │ goto dma_error;
│
/* record length, and DMA address */ │ /* record length, and DMA address */
dma_unmap_len_set(tx_buffer, len, size); │ dma_unmap_len_set(tx_buffer, len, size);
dma_unmap_addr_set(tx_buffer, dma, dma); │ dma_unmap_addr_set(tx_buffer, dma, dma);
│
tx_desc->read.buffer_addr = cpu_to_le64(dma); │ tx_desc->read.buffer_addr = cpu_to_le64(dma);
│
while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { │ while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len = │ tx_desc->read.cmd_type_len =
cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); │ cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
│
i++; │ i++;
tx_desc++; │ tx_desc++;
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = IGB_TX_DESC(tx_ring, 0); │ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
tx_desc->read.olinfo_status = 0; │ tx_desc->read.olinfo_status = 0;
│
dma += IGB_MAX_DATA_PER_TXD; │ dma += IXGBE_MAX_DATA_PER_TXD;
size -= IGB_MAX_DATA_PER_TXD; │ size -= IXGBE_MAX_DATA_PER_TXD;
│
tx_desc->read.buffer_addr = cpu_to_le64(dma); │ tx_desc->read.buffer_addr = cpu_to_le64(dma);
} │ }
│
if (likely(!data_len)) │ if (likely(!data_len))
break; │ break;
│
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); │ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
│
i++; │ i++;
tx_desc++; │ tx_desc++;
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = IGB_TX_DESC(tx_ring, 0); │ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
tx_desc->read.olinfo_status = 0; │ tx_desc->read.olinfo_status = 0;
│
│ #ifdef IXGBE_FCOE
│ size = min_t(unsigned int, data_len, skb_frag_size(frag));
│ #else
size = skb_frag_size(frag); │ size = skb_frag_size(frag);
│ #endif
data_len -= size; │ data_len -= size;
│
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, │ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
size, DMA_TO_DEVICE); │ DMA_TO_DEVICE);
│
tx_buffer = &tx_ring->tx_buffer_info[i]; │ tx_buffer = &tx_ring->tx_buffer_info[i];
} │ }
│
/* write last descriptor with RS and EOP bits */ │ /* write last descriptor with RS and EOP bits */
cmd_type |= size | IGB_TXD_DCMD; │ cmd_type |= size | IXGBE_TXD_CMD;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); │ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
│
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); │ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
│
/* set the timestamp */ │ /* set the timestamp */
first->time_stamp = jiffies; │ first->time_stamp = jiffies;
│
skb_tx_timestamp(skb); │ skb_tx_timestamp(skb);
│
/* Force memory writes to complete before letting h/w know there │ /*
│ * Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered │ * are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64). │ * memory model archs, such as IA-64).
* │ *
* We also need this memory barrier to make certain all of the │ * We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written. │ * status bits have been updated before next_to_watch is written.
*/ │ */
dma_wmb(); │ wmb();
│
/* set next_to_watch value indicating a packet is present */ │ /* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc; │ first->next_to_watch = tx_desc;
│
i++; │ i++;
if (i == tx_ring->count) │ if (i == tx_ring->count)
i = 0; │ i = 0;
│
tx_ring->next_to_use = i; │ tx_ring->next_to_use = i;
│
/* Make sure there is space in the ring for the next send. */ │ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
igb_maybe_stop_tx(tx_ring, DESC_NEEDED); │
│
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { │ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail); │ writel(i, tx_ring->tail);
} │ }
return 0; │
│
│ return 0;
dma_error: │ dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n"); │ dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i]; │
│
/* clear dma mappings for failed tx_buffer_info map */ │ /* clear dma mappings for failed tx_buffer_info map */
while (tx_buffer != first) { │ for (;;) {
│ tx_buffer = &tx_ring->tx_buffer_info[i];
if (dma_unmap_len(tx_buffer, len)) │ if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev, │ dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0); │ dma_unmap_len_set(tx_buffer, len, 0);
│ if (tx_buffer == first)
if (i-- == 0) │ break;
│ if (i == 0)
i += tx_ring->count; │ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i]; │ i--;
} │ }
│
if (dma_unmap_len(tx_buffer, len)) │ dev_kfree_skb_any(first->skb);
dma_unmap_single(tx_ring->dev, │ first->skb = NULL;
dma_unmap_addr(tx_buffer, dma), │
dma_unmap_len(tx_buffer, len), │
DMA_TO_DEVICE); │
dma_unmap_len_set(tx_buffer, len, 0); │
│
dev_kfree_skb_any(tx_buffer->skb); │
tx_buffer->skb = NULL; │
│
tx_ring->next_to_use = i; │ tx_ring->next_to_use = i;
│
return -1; │ return -1;
} │
next prev up linux/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c:240 │ linux/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c:333
│
static const u32 max_weight = BIT(HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH) - 1; │ static const u32 max_weight = BIT(HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
/* Scale factor is based on the number of bits in fractional portion */ │ /* Scale factor is based on the number of bits in fractional portion */
static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH); │ static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >> │ static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
HW_ATL_TPS_DESC_RATE_Y_SHIFT; │ HW_ATL_TPS_DESC_RATE_Y_SHIFT;
const u32 link_speed = self->aq_link_status.mbps; │ const u32 link_speed = self->aq_link_status.mbps;
struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; │ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
unsigned long num_min_rated_tcs = 0; │ unsigned long num_min_rated_tcs = 0;
u32 tc_weight[AQ_CFG_TCS_MAX]; │ u32 tc_weight[AQ_CFG_TCS_MAX];
u32 fixed_max_credit_4b; │
u32 fixed_max_credit; │ u32 fixed_max_credit;
u8 min_rate_msk = 0; │ u8 min_rate_msk = 0;
u32 sum_weight = 0; │ u32 sum_weight = 0;
int tc; │ int tc;
│
/* By default max_credit is based upon MTU (in unit of 64b) */ │ /* By default max_credit is based upon MTU (in unit of 64b) */
fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64; │ fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
/* in unit of 4b */ │
fixed_max_credit_4b = nic_cfg->aq_hw_caps->mtu / 4; │
│
if (link_speed) { │ if (link_speed) {
min_rate_msk = nic_cfg->tc_min_rate_msk & │ min_rate_msk = nic_cfg->tc_min_rate_msk &
(BIT(nic_cfg->tcs) - 1); │ (BIT(nic_cfg->tcs) - 1);
num_min_rated_tcs = hweight8(min_rate_msk); │ num_min_rated_tcs = hweight8(min_rate_msk);
} │ }
│
/* First, calculate weights where min_rate is specified */ │ /* First, calculate weights where min_rate is specified */
if (num_min_rated_tcs) { │ if (num_min_rated_tcs) {
for (tc = 0; tc != nic_cfg->tcs; tc++) { │ for (tc = 0; tc != nic_cfg->tcs; tc++) {
if (!nic_cfg->tc_min_rate[tc]) { │ if (!nic_cfg->tc_min_rate[tc]) {
tc_weight[tc] = 0; │ tc_weight[tc] = 0;
continue; │ continue;
} │ }
│
tc_weight[tc] = (-1L + link_speed + │ tc_weight[tc] = (-1L + link_speed +
nic_cfg->tc_min_rate[tc] * │ nic_cfg->tc_min_rate[tc] *
max_weight) / │ max_weight) /
link_speed; │ link_speed;
tc_weight[tc] = min(tc_weight[tc], max_weight); │ tc_weight[tc] = min(tc_weight[tc], max_weight);
sum_weight += tc_weight[tc]; │ sum_weight += tc_weight[tc];
} │ }
} │ }
│
/* WSP, if min_rate is set for at least one TC. │ /* WSP, if min_rate is set for at least one TC.
* RR otherwise. │ * RR otherwise.
│ *
│ * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't
│ * overwrite it here in that case.
*/ │ */
hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); │ if (!nic_cfg->is_ptp)
│ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
│
/* Data TC Arbiter takes precedence over Descriptor TC Arbiter, │ /* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
* leave Descriptor TC Arbiter as RR. │ * leave Descriptor TC Arbiter as RR.
*/ │ */
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); │ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
│
hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U); │ hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
│
for (tc = 0; tc != nic_cfg->tcs; tc++) { │ for (tc = 0; tc != nic_cfg->tcs; tc++) {
const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U; │ const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); │ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
u32 weight, max_credit; │ u32 weight, max_credit;
│
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc, │ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
fixed_max_credit); │ fixed_max_credit);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E); │ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
│
if (num_min_rated_tcs) { │ if (num_min_rated_tcs) {
weight = tc_weight[tc]; │ weight = tc_weight[tc];
│
if (!weight && sum_weight < max_weight) │ if (!weight && sum_weight < max_weight)
weight = (max_weight - sum_weight) / │ weight = (max_weight - sum_weight) /
(nic_cfg->tcs - num_min_rated_tcs); │ (nic_cfg->tcs - num_min_rated_tcs);
else if (!weight) │ else if (!weight)
weight = 0x640; │ weight = 0x64;
│
max_credit = max(2 * weight, fixed_max_credit_4b); │ max_credit = max(8 * weight, fixed_max_credit);
} else { │ } else {
weight = 0x640; │ weight = 0x64;
max_credit = 0xFFF0; │ max_credit = 0xFFF;
} │ }
│
hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight); │ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc, │ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
max_credit); │ max_credit);
│
hw_atl_tps_tx_desc_rate_en_set(self, desc, en); │ hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
│
if (en) { │ if (en) {
/* Nominal rate is always 10G */ │ /* Nominal rate is always 10G */
const u32 rate = 10000U * scale / │ const u32 rate = 10000U * scale /
nic_cfg->tc_max_rate[tc]; │ nic_cfg->tc_max_rate[tc];
const u32 rate_int = rate >> │ const u32 rate_int = rate >>
HW_ATL_TPS_DESC_RATE_Y_WIDTH; │ HW_ATL_TPS_DESC_RATE_Y_WIDTH;
const u32 rate_frac = rate & frac_msk; │ const u32 rate_frac = rate & frac_msk;
│
hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int); │ hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac); │ hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
} else { │ } else {
/* A value of 1 indicates the queue is not │ /* A value of 1 indicates the queue is not
* rate controlled. │ * rate controlled.
*/ │ */
hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); │ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); │ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
} │ }
} │ }
for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) { │ for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); │ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
│
hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U); │ hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); │ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); │ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
} │ }
│
return aq_hw_err_from_flags(self); │ return aq_hw_err_from_flags(self);
} │
next prev up linux/drivers/net/ethernet/neterion/vxge/vxge-config.c:4298 │ linux/drivers/net/ethernet/neterion/vxge/vxge-config.c:4405
│
│
val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); │ val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
│
if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( │ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
0x3ffffff); │ 0x3ffffff);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( │ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
config->tti.btimer_val); │ config->rti.btimer_val);
} │ }
│
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; │ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
│
if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
if (config->tti.timer_ac_en) │ if (config->rti.timer_ac_en)
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; │ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
else │ else
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; │ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
} │ }
│
if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
if (config->tti.timer_ci_en) │ if (config->rti.timer_ci_en)
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; │ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
else │ else
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; │ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
} │ }
│
if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); │ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( │ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
config->tti.urange_a); │ config->rti.urange_a);
} │ }
│
if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); │ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( │ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
config->tti.urange_b); │ config->rti.urange_b);
} │ }
│
if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); │ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( │ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
config->tti.urange_c); │ config->rti.urange_c);
} │ }
│
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); │ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
vpath->tim_tti_cfg1_saved = val64; │ vpath->tim_rti_cfg1_saved = val64;
│
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); │ val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
│
if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); │ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( │ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
config->tti.uec_a); │ config->rti.uec_a);
} │ }
│
if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); │ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( │ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
config->tti.uec_b); │ config->rti.uec_b);
} │ }
│
if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); │ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( │ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
config->tti.uec_c); │ config->rti.uec_c);
} │ }
│
if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); │ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( │ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
config->tti.uec_d); │ config->rti.uec_d);
} │ }
│
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); │ writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); │ val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
│
if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
if (config->tti.timer_ri_en) │ if (config->rti.timer_ri_en)
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; │ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
else │ else
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; │ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
} │ }
│
if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( │ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
0x3ffffff); │ 0x3ffffff);
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( │ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
config->tti.rtimer_val); │ config->rti.rtimer_val);
} │ }
│
if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); │ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); │ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
} │ }
│
if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { │ if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( │ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
0x3ffffff); │ 0x3ffffff);
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( │ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
config->tti.ltimer_val); │ config->rti.ltimer_val);
} │ }
│
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); │ writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
vpath->tim_tti_cfg3_saved = val64; │ vpath->tim_rti_cfg3_saved = val64;
} │
next prev up linux/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c:3960 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:8226
│
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer; │ struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc; │ union ixgbe_adv_tx_desc *tx_desc;
skb_frag_t *frag; │ skb_frag_t *frag;
dma_addr_t dma; │ dma_addr_t dma;
unsigned int data_len, size; │ unsigned int data_len, size;
u32 tx_flags = first->tx_flags; │ u32 tx_flags = first->tx_flags;
__le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags); │ u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use; │ u16 i = tx_ring->next_to_use;
│
tx_desc = IXGBEVF_TX_DESC(tx_ring, i); │ tx_desc = IXGBE_TX_DESC(tx_ring, i);
│
ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); │ ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
│
size = skb_headlen(skb); │ size = skb_headlen(skb);
data_len = skb->data_len; │ data_len = skb->data_len;
│
│ #ifdef IXGBE_FCOE
│ if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
│ if (data_len < sizeof(struct fcoe_crc_eof)) {
│ size -= sizeof(struct fcoe_crc_eof) - data_len;
│ data_len = 0;
│ } else {
│ data_len -= sizeof(struct fcoe_crc_eof);
│ }
│ }
│
│ #endif
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); │ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
│
tx_buffer = first; │ tx_buffer = first;
│
for (frag = &skb_shinfo(skb)->frags[0];; frag++) { │ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
if (dma_mapping_error(tx_ring->dev, dma)) │ if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error; │ goto dma_error;
│
/* record length, and DMA address */ │ /* record length, and DMA address */
dma_unmap_len_set(tx_buffer, len, size); │ dma_unmap_len_set(tx_buffer, len, size);
dma_unmap_addr_set(tx_buffer, dma, dma); │ dma_unmap_addr_set(tx_buffer, dma, dma);
│
tx_desc->read.buffer_addr = cpu_to_le64(dma); │ tx_desc->read.buffer_addr = cpu_to_le64(dma);
│
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { │ while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len = │ tx_desc->read.cmd_type_len =
cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); │ cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
│
i++; │ i++;
tx_desc++; │ tx_desc++;
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); │ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
tx_desc->read.olinfo_status = 0; │ tx_desc->read.olinfo_status = 0;
│
dma += IXGBE_MAX_DATA_PER_TXD; │ dma += IXGBE_MAX_DATA_PER_TXD;
size -= IXGBE_MAX_DATA_PER_TXD; │ size -= IXGBE_MAX_DATA_PER_TXD;
│
tx_desc->read.buffer_addr = cpu_to_le64(dma); │ tx_desc->read.buffer_addr = cpu_to_le64(dma);
} │ }
│
if (likely(!data_len)) │ if (likely(!data_len))
break; │ break;
│
tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); │ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
│
i++; │ i++;
tx_desc++; │ tx_desc++;
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); │ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
tx_desc->read.olinfo_status = 0; │ tx_desc->read.olinfo_status = 0;
│
│ #ifdef IXGBE_FCOE
│ size = min_t(unsigned int, data_len, skb_frag_size(frag));
│ #else
size = skb_frag_size(frag); │ size = skb_frag_size(frag);
│ #endif
data_len -= size; │ data_len -= size;
│
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, │ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
│
tx_buffer = &tx_ring->tx_buffer_info[i]; │ tx_buffer = &tx_ring->tx_buffer_info[i];
} │ }
│
/* write last descriptor with RS and EOP bits */ │ /* write last descriptor with RS and EOP bits */
cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); │ cmd_type |= size | IXGBE_TXD_CMD;
tx_desc->read.cmd_type_len = cmd_type; │ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
│
│ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
│
/* set the timestamp */ │ /* set the timestamp */
first->time_stamp = jiffies; │ first->time_stamp = jiffies;
│
skb_tx_timestamp(skb); │ skb_tx_timestamp(skb);
│
/* Force memory writes to complete before letting h/w know there │ /*
│ * Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered │ * are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64). │ * memory model archs, such as IA-64).
* │ *
* We also need this memory barrier (wmb) to make certain all of the │ * We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written. │ * status bits have been updated before next_to_watch is written.
*/ │ */
wmb(); │ wmb();
│
/* set next_to_watch value indicating a packet is present */ │ /* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc; │ first->next_to_watch = tx_desc;
│
i++; │ i++;
if (i == tx_ring->count) │ if (i == tx_ring->count)
i = 0; │ i = 0;
│
tx_ring->next_to_use = i; │ tx_ring->next_to_use = i;
│
/* notify HW of packet */ │ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
ixgbevf_write_tail(tx_ring, i); │
│ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
│ writel(i, tx_ring->tail);
│ }
│
return; │ return 0;
dma_error: │ dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n"); │ dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i]; │
│
/* clear dma mappings for failed tx_buffer_info map */ │ /* clear dma mappings for failed tx_buffer_info map */
while (tx_buffer != first) { │ for (;;) {
│ tx_buffer = &tx_ring->tx_buffer_info[i];
if (dma_unmap_len(tx_buffer, len)) │ if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev, │ dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0); │ dma_unmap_len_set(tx_buffer, len, 0);
│ if (tx_buffer == first)
if (i-- == 0) │ break;
│ if (i == 0)
i += tx_ring->count; │ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i]; │ i--;
} │ }
│
if (dma_unmap_len(tx_buffer, len)) │ dev_kfree_skb_any(first->skb);
dma_unmap_single(tx_ring->dev, │ first->skb = NULL;
dma_unmap_addr(tx_buffer, dma), │
dma_unmap_len(tx_buffer, len), │
DMA_TO_DEVICE); │
dma_unmap_len_set(tx_buffer, len, 0); │
│
dev_kfree_skb_any(tx_buffer->skb); │
tx_buffer->skb = NULL; │
│
tx_ring->next_to_use = i; │ tx_ring->next_to_use = i;
│
│ return -1;
} │
next prev up linux/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c:3960 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:6113
│
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer; │ struct igb_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc; │ union e1000_adv_tx_desc *tx_desc;
skb_frag_t *frag; │ skb_frag_t *frag;
dma_addr_t dma; │ dma_addr_t dma;
unsigned int data_len, size; │ unsigned int data_len, size;
u32 tx_flags = first->tx_flags; │ u32 tx_flags = first->tx_flags;
__le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags); │ u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use; │ u16 i = tx_ring->next_to_use;
│
tx_desc = IXGBEVF_TX_DESC(tx_ring, i); │ tx_desc = IGB_TX_DESC(tx_ring, i);
│
ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); │ igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
│
size = skb_headlen(skb); │ size = skb_headlen(skb);
data_len = skb->data_len; │ data_len = skb->data_len;
│
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); │ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
│
tx_buffer = first; │ tx_buffer = first;
│
for (frag = &skb_shinfo(skb)->frags[0];; frag++) { │ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
if (dma_mapping_error(tx_ring->dev, dma)) │ if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error; │ goto dma_error;
│
/* record length, and DMA address */ │ /* record length, and DMA address */
dma_unmap_len_set(tx_buffer, len, size); │ dma_unmap_len_set(tx_buffer, len, size);
dma_unmap_addr_set(tx_buffer, dma, dma); │ dma_unmap_addr_set(tx_buffer, dma, dma);
│
tx_desc->read.buffer_addr = cpu_to_le64(dma); │ tx_desc->read.buffer_addr = cpu_to_le64(dma);
│
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { │ while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len = │ tx_desc->read.cmd_type_len =
cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); │ cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
│
i++; │ i++;
tx_desc++; │ tx_desc++;
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); │ tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
tx_desc->read.olinfo_status = 0; │ tx_desc->read.olinfo_status = 0;
│
dma += IXGBE_MAX_DATA_PER_TXD; │ dma += IGB_MAX_DATA_PER_TXD;
size -= IXGBE_MAX_DATA_PER_TXD; │ size -= IGB_MAX_DATA_PER_TXD;
│
tx_desc->read.buffer_addr = cpu_to_le64(dma); │ tx_desc->read.buffer_addr = cpu_to_le64(dma);
} │ }
│
if (likely(!data_len)) │ if (likely(!data_len))
break; │ break;
│
tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); │ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
│
i++; │ i++;
tx_desc++; │ tx_desc++;
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); │ tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
tx_desc->read.olinfo_status = 0; │ tx_desc->read.olinfo_status = 0;
│
size = skb_frag_size(frag); │ size = skb_frag_size(frag);
data_len -= size; │ data_len -= size;
│
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, │ dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
DMA_TO_DEVICE); │ size, DMA_TO_DEVICE);
│
tx_buffer = &tx_ring->tx_buffer_info[i]; │ tx_buffer = &tx_ring->tx_buffer_info[i];
} │ }
│
/* write last descriptor with RS and EOP bits */ │ /* write last descriptor with RS and EOP bits */
cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); │ cmd_type |= size | IGB_TXD_DCMD;
tx_desc->read.cmd_type_len = cmd_type; │ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
│
│ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
│
/* set the timestamp */ │ /* set the timestamp */
first->time_stamp = jiffies; │ first->time_stamp = jiffies;
│
skb_tx_timestamp(skb); │ skb_tx_timestamp(skb);
│
/* Force memory writes to complete before letting h/w know there │ /* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered │ * are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64). │ * memory model archs, such as IA-64).
* │ *
* We also need this memory barrier (wmb) to make certain all of the │ * We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written. │ * status bits have been updated before next_to_watch is written.
*/ │ */
wmb(); │ dma_wmb();
│
/* set next_to_watch value indicating a packet is present */ │ /* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc; │ first->next_to_watch = tx_desc;
│
i++; │ i++;
if (i == tx_ring->count) │ if (i == tx_ring->count)
i = 0; │ i = 0;
│
tx_ring->next_to_use = i; │ tx_ring->next_to_use = i;
│
/* notify HW of packet */ │ /* Make sure there is space in the ring for the next send. */
ixgbevf_write_tail(tx_ring, i); │ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
│
│ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
│ writel(i, tx_ring->tail);
│ }
│ return 0;
│
return; │
dma_error: │ dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n"); │ dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i]; │ tx_buffer = &tx_ring->tx_buffer_info[i];
│
/* clear dma mappings for failed tx_buffer_info map */ │ /* clear dma mappings for failed tx_buffer_info map */
while (tx_buffer != first) { │ while (tx_buffer != first) {
if (dma_unmap_len(tx_buffer, len)) │ if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev, │ dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0); │ dma_unmap_len_set(tx_buffer, len, 0);
│
if (i-- == 0) │ if (i-- == 0)
i += tx_ring->count; │ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i]; │ tx_buffer = &tx_ring->tx_buffer_info[i];
} │ }
│
if (dma_unmap_len(tx_buffer, len)) │ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev, │ dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0); │ dma_unmap_len_set(tx_buffer, len, 0);
│
dev_kfree_skb_any(tx_buffer->skb); │ dev_kfree_skb_any(tx_buffer->skb);
tx_buffer->skb = NULL; │ tx_buffer->skb = NULL;
│
tx_ring->next_to_use = i; │ tx_ring->next_to_use = i;
│
│ return -1;
} │
next prev up linux/drivers/net/ethernet/broadcom/cnic_defs.h:877 │ linux/drivers/net/ethernet/broadcom/cnic_defs.h:700
│
u32 __agg_val1; │ u32 __agg_val1;
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 __tcp_agg_vars2; │ u8 __tcp_agg_vars2;
u8 __agg_val3; │ u8 __agg_val3;
u16 __agg_val2; │ u16 __agg_val2;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 __agg_val2; │ u16 __agg_val2;
u8 __agg_val3; │ u8 __agg_val3;
u8 __tcp_agg_vars2; │ u8 __tcp_agg_vars2;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 __agg_val5; │ u16 __agg_val5;
u8 __agg_val6; │ u8 __agg_val6;
u8 __tcp_agg_vars3; │ u8 __tcp_agg_vars3;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 __tcp_agg_vars3; │ u8 __tcp_agg_vars3;
u8 __agg_val6; │ u8 __agg_val6;
u16 __agg_val5; │ u16 __agg_val5;
#endif │ #endif
u32 snd_nxt; │ u32 __lcq_prod;
u32 rtt_seq; │ u32 rtt_seq;
u32 rtt_time; │ u32 rtt_time;
u32 wnd_right_edge_local; │ u32 __reserved66;
u32 wnd_right_edge; │ u32 wnd_right_edge;
u32 tcp_agg_vars1; │ u32 tcp_agg_vars1;
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9) │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9 │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 │ #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 │ #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
u32 snd_max; │ u32 snd_max;
u32 snd_una; │ u32 __lcq_cons;
u32 __reserved2; │ u32 __reserved2;
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_main.c:1186 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:6113
│
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
struct igc_tx_buffer *tx_buffer; │ struct igb_tx_buffer *tx_buffer;
union igc_adv_tx_desc *tx_desc; │ union e1000_adv_tx_desc *tx_desc;
u32 tx_flags = first->tx_flags; │
skb_frag_t *frag; │ skb_frag_t *frag;
u16 i = tx_ring->next_to_use; │
unsigned int data_len, size; │
dma_addr_t dma; │ dma_addr_t dma;
u32 cmd_type; │ unsigned int data_len, size;
│ u32 tx_flags = first->tx_flags;
│ u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
│ u16 i = tx_ring->next_to_use;
│
cmd_type = igc_tx_cmd_type(skb, tx_flags); │ tx_desc = IGB_TX_DESC(tx_ring, i);
tx_desc = IGC_TX_DESC(tx_ring, i); │
│
igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); │ igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
│
size = skb_headlen(skb); │ size = skb_headlen(skb);
data_len = skb->data_len; │ data_len = skb->data_len;
│
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); │ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
│
tx_buffer = first; │ tx_buffer = first;
│
for (frag = &skb_shinfo(skb)->frags[0];; frag++) { │ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
if (dma_mapping_error(tx_ring->dev, dma)) │ if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error; │ goto dma_error;
│
/* record length, and DMA address */ │ /* record length, and DMA address */
dma_unmap_len_set(tx_buffer, len, size); │ dma_unmap_len_set(tx_buffer, len, size);
dma_unmap_addr_set(tx_buffer, dma, dma); │ dma_unmap_addr_set(tx_buffer, dma, dma);
│
tx_desc->read.buffer_addr = cpu_to_le64(dma); │ tx_desc->read.buffer_addr = cpu_to_le64(dma);
│
while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { │ while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len = │ tx_desc->read.cmd_type_len =
cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); │ cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
│
i++; │ i++;
tx_desc++; │ tx_desc++;
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = IGC_TX_DESC(tx_ring, 0); │ tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
tx_desc->read.olinfo_status = 0; │ tx_desc->read.olinfo_status = 0;
│
dma += IGC_MAX_DATA_PER_TXD; │ dma += IGB_MAX_DATA_PER_TXD;
size -= IGC_MAX_DATA_PER_TXD; │ size -= IGB_MAX_DATA_PER_TXD;
│
tx_desc->read.buffer_addr = cpu_to_le64(dma); │ tx_desc->read.buffer_addr = cpu_to_le64(dma);
} │ }
│
if (likely(!data_len)) │ if (likely(!data_len))
break; │ break;
│
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); │ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
│
i++; │ i++;
tx_desc++; │ tx_desc++;
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = IGC_TX_DESC(tx_ring, 0); │ tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
tx_desc->read.olinfo_status = 0; │ tx_desc->read.olinfo_status = 0;
│
size = skb_frag_size(frag); │ size = skb_frag_size(frag);
data_len -= size; │ data_len -= size;
│
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, │ dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
size, DMA_TO_DEVICE); │ size, DMA_TO_DEVICE);
│
tx_buffer = &tx_ring->tx_buffer_info[i]; │ tx_buffer = &tx_ring->tx_buffer_info[i];
} │ }
│
/* write last descriptor with RS and EOP bits */ │ /* write last descriptor with RS and EOP bits */
cmd_type |= size | IGC_TXD_DCMD; │ cmd_type |= size | IGB_TXD_DCMD;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); │ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
│
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); │ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
│
/* set the timestamp */ │ /* set the timestamp */
first->time_stamp = jiffies; │ first->time_stamp = jiffies;
│
skb_tx_timestamp(skb); │ skb_tx_timestamp(skb);
│
/* Force memory writes to complete before letting h/w know there │ /* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered │ * are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64). │ * memory model archs, such as IA-64).
* │ *
* We also need this memory barrier to make certain all of the │ * We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written. │ * status bits have been updated before next_to_watch is written.
*/ │ */
wmb(); │ dma_wmb();
│
/* set next_to_watch value indicating a packet is present */ │ /* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc; │ first->next_to_watch = tx_desc;
│
i++; │ i++;
if (i == tx_ring->count) │ if (i == tx_ring->count)
i = 0; │ i = 0;
│
tx_ring->next_to_use = i; │ tx_ring->next_to_use = i;
│
/* Make sure there is space in the ring for the next send. */ │ /* Make sure there is space in the ring for the next send. */
igc_maybe_stop_tx(tx_ring, DESC_NEEDED); │ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
│
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { │ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail); │ writel(i, tx_ring->tail);
} │ }
│
return 0; │ return 0;
│
dma_error: │ dma_error:
netdev_err(tx_ring->netdev, "TX DMA map failed\n"); │ dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i]; │ tx_buffer = &tx_ring->tx_buffer_info[i];
│
/* clear dma mappings for failed tx_buffer_info map */ │ /* clear dma mappings for failed tx_buffer_info map */
while (tx_buffer != first) { │ while (tx_buffer != first) {
if (dma_unmap_len(tx_buffer, len)) │ if (dma_unmap_len(tx_buffer, len))
igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); │ dma_unmap_page(tx_ring->dev,
│ dma_unmap_addr(tx_buffer, dma),
│ dma_unmap_len(tx_buffer, len),
│ DMA_TO_DEVICE);
│ dma_unmap_len_set(tx_buffer, len, 0);
│
if (i-- == 0) │ if (i-- == 0)
i += tx_ring->count; │ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i]; │ tx_buffer = &tx_ring->tx_buffer_info[i];
} │ }
│
if (dma_unmap_len(tx_buffer, len)) │ if (dma_unmap_len(tx_buffer, len))
igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); │ dma_unmap_single(tx_ring->dev,
│ dma_unmap_addr(tx_buffer, dma),
│ dma_unmap_len(tx_buffer, len),
│ DMA_TO_DEVICE);
│ dma_unmap_len_set(tx_buffer, len, 0);
│
dev_kfree_skb_any(tx_buffer->skb); │ dev_kfree_skb_any(tx_buffer->skb);
tx_buffer->skb = NULL; │ tx_buffer->skb = NULL;
│
tx_ring->next_to_use = i; │ tx_ring->next_to_use = i;
│
return -1; │ return -1;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c:226 │ linux/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c:496
│
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); │ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key; │ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct mlx5_pkt_reformat_params reformat_params; │ struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_neigh m_neigh = {}; │ struct mlx5e_neigh m_neigh = {};
TC_TUN_ROUTE_ATTR_INIT(attr); │ TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size; │ struct ipv6hdr *ip6h;
│ int ipv6_encap_size;
char *encap_header; │ char *encap_header;
struct iphdr *ip; │
u8 nud_state; │ u8 nud_state;
int err; │ int err;
│
/* add the IP fields */ │
attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK; │
attr.fl.fl4.daddr = tun_key->u.ipv4.dst; │
attr.fl.fl4.saddr = tun_key->u.ipv4.src; │
attr.ttl = tun_key->ttl; │ attr.ttl = tun_key->ttl;
│ attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
│ attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
│ attr.fl.fl6.saddr = tun_key->u.ipv6.src;
│
err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr); │ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr);
if (err) │ if (err)
return err; │ return err;
│
ipv4_encap_size = │ ipv6_encap_size =
(is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + │ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct iphdr) + │ sizeof(struct ipv6hdr) +
e->tunnel->calc_hlen(e); │ e->tunnel->calc_hlen(e);
│
if (max_encap_size < ipv4_encap_size) { │ if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n │ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n
ipv4_encap_size, max_encap_size); │ ipv6_encap_size, max_encap_size);
err = -EOPNOTSUPP; │ err = -EOPNOTSUPP;
goto release_neigh; │ goto release_neigh;
} │ }
│
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); │ encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
if (!encap_header) { │ if (!encap_header) {
err = -ENOMEM; │ err = -ENOMEM;
goto release_neigh; │ goto release_neigh;
} │ }
│
m_neigh.family = attr.n->ops->family; │ m_neigh.family = attr.n->ops->family;
memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len); │ memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len);
e->out_dev = attr.out_dev; │ e->out_dev = attr.out_dev;
e->route_dev_ifindex = attr.route_dev->ifindex; │ e->route_dev_ifindex = attr.route_dev->ifindex;
│
/* It's important to add the neigh to the hash table before checking │ /* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the │ * the neigh validity state. So if we'll get a notification, in case the
* neigh changes it's validity state, we would find the relevant neigh │ * neigh changes it's validity state, we would find the relevant neigh
* in the hash. │ * in the hash.
*/ │ */
err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr. │ err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr.
if (err) │ if (err)
goto free_encap; │ goto free_encap;
│
read_lock_bh(&attr.n->lock); │ read_lock_bh(&attr.n->lock);
nud_state = attr.n->nud_state; │ nud_state = attr.n->nud_state;
ether_addr_copy(e->h_dest, attr.n->ha); │ ether_addr_copy(e->h_dest, attr.n->ha);
read_unlock_bh(&attr.n->lock); │ read_unlock_bh(&attr.n->lock);
│
/* add ethernet header */ │ /* add ethernet header */
ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, │ ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
ETH_P_IP); │ ETH_P_IPV6);
│
/* add ip header */ │ /* add ip header */
ip->tos = tun_key->tos; │ ip6_flow_hdr(ip6h, tun_key->tos, 0);
ip->version = 0x4; │ /* the HW fills up ipv6 payload len */
ip->ihl = 0x5; │ ip6h->hop_limit = attr.ttl;
ip->ttl = attr.ttl; │ ip6h->daddr = attr.fl.fl6.daddr;
ip->daddr = attr.fl.fl4.daddr; │ ip6h->saddr = attr.fl.fl6.saddr;
ip->saddr = attr.fl.fl4.saddr; │
│
/* add tunneling protocol header */ │ /* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr), │ err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
&ip->protocol, e); │ &ip6h->nexthdr, e);
if (err) │ if (err)
goto destroy_neigh_entry; │ goto destroy_neigh_entry;
│
e->encap_size = ipv4_encap_size; │ e->encap_size = ipv6_encap_size;
e->encap_header = encap_header; │ e->encap_header = encap_header;
│
if (!(nud_state & NUD_VALID)) { │ if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL); │ neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event │ /* the encap entry will be made valid on neigh update event
* and not used before that. │ * and not used before that.
*/ │ */
goto release_neigh; │ goto release_neigh;
} │ }
│
memset(&reformat_params, 0, sizeof(reformat_params)); │ memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type; │ reformat_params.type = e->reformat_type;
reformat_params.size = ipv4_encap_size; │ reformat_params.size = ipv6_encap_size;
reformat_params.data = encap_header; │ reformat_params.data = encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, │ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB); │ MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) { │ if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat); │ err = PTR_ERR(e->pkt_reformat);
goto destroy_neigh_entry; │ goto destroy_neigh_entry;
} │ }
│
e->flags |= MLX5_ENCAP_ENTRY_VALID; │ e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); │ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr); │ mlx5e_route_lookup_ipv6_put(&attr);
return err; │ return err;
│
destroy_neigh_entry: │ destroy_neigh_entry:
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); │ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
free_encap: │ free_encap:
kfree(encap_header); │ kfree(encap_header);
release_neigh: │ release_neigh:
mlx5e_route_lookup_ipv4_put(&attr); │ mlx5e_route_lookup_ipv6_put(&attr);
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/sun/niu.c:755 │ linux/drivers/net/ethernet/sun/niu.c:2351
│
struct niu_link_config *lp = &np->link_config; │ struct niu_link_config *lp = &np->link_config;
unsigned long ctrl_reg, test_cfg_reg, i; │ unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
u64 ctrl_val, test_cfg_val, sig, mask, val; │ u64 ctrl_val, test_cfg_val, sig, mask, val;
int err; │
│
switch (np->port) { │ switch (np->port) {
case 0: │ case 0:
ctrl_reg = ENET_SERDES_0_CTRL_CFG; │ ctrl_reg = ENET_SERDES_0_CTRL_CFG;
test_cfg_reg = ENET_SERDES_0_TEST_CFG; │ test_cfg_reg = ENET_SERDES_0_TEST_CFG;
│ pll_cfg = ENET_SERDES_0_PLL_CFG;
break; │ break;
case 1: │ case 1:
ctrl_reg = ENET_SERDES_1_CTRL_CFG; │ ctrl_reg = ENET_SERDES_1_CTRL_CFG;
test_cfg_reg = ENET_SERDES_1_TEST_CFG; │ test_cfg_reg = ENET_SERDES_1_TEST_CFG;
│ pll_cfg = ENET_SERDES_1_PLL_CFG;
break; │ break;
│
default: │ default:
return -EINVAL; │ return -EINVAL;
} │ }
ctrl_val = (ENET_SERDES_CTRL_SDET_0 | │ ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
ENET_SERDES_CTRL_SDET_1 | │ ENET_SERDES_CTRL_SDET_1 |
ENET_SERDES_CTRL_SDET_2 | │ ENET_SERDES_CTRL_SDET_2 |
ENET_SERDES_CTRL_SDET_3 | │ ENET_SERDES_CTRL_SDET_3 |
(0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | │ (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
(0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | │ (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
(0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | │ (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
(0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | │ (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
(0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | │ (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
(0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | │ (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
(0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | │ (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
(0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); │ (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
test_cfg_val = 0; │ test_cfg_val = 0;
│
if (lp->loopback_mode == LOOPBACK_PHY) { │ if (lp->loopback_mode == LOOPBACK_PHY) {
test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << │ test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
ENET_SERDES_TEST_MD_0_SHIFT) | │ ENET_SERDES_TEST_MD_0_SHIFT) |
(ENET_TEST_MD_PAD_LOOPBACK << │ (ENET_TEST_MD_PAD_LOOPBACK <<
ENET_SERDES_TEST_MD_1_SHIFT) | │ ENET_SERDES_TEST_MD_1_SHIFT) |
(ENET_TEST_MD_PAD_LOOPBACK << │ (ENET_TEST_MD_PAD_LOOPBACK <<
ENET_SERDES_TEST_MD_2_SHIFT) | │ ENET_SERDES_TEST_MD_2_SHIFT) |
(ENET_TEST_MD_PAD_LOOPBACK << │ (ENET_TEST_MD_PAD_LOOPBACK <<
ENET_SERDES_TEST_MD_3_SHIFT)); │ ENET_SERDES_TEST_MD_3_SHIFT));
} │ }
│
│ esr_reset(np);
│ nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
nw64(ctrl_reg, ctrl_val); │ nw64(ctrl_reg, ctrl_val);
nw64(test_cfg_reg, test_cfg_val); │ nw64(test_cfg_reg, test_cfg_val);
│
/* Initialize all 4 lanes of the SERDES. */ │ /* Initialize all 4 lanes of the SERDES. */
for (i = 0; i < 4; i++) { │ for (i = 0; i < 4; i++) {
u32 rxtx_ctrl, glue0; │ u32 rxtx_ctrl, glue0;
│ int err;
│
err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); │ err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
if (err) │ if (err)
return err; │ return err;
err = esr_read_glue0(np, i, &glue0); │ err = esr_read_glue0(np, i, &glue0);
if (err) │ if (err)
return err; │ return err;
│
rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); │ rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | │ rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
(2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); │ (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
│
glue0 &= ~(ESR_GLUE_CTRL0_SRATE | │ glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
ESR_GLUE_CTRL0_THCNT | │ ESR_GLUE_CTRL0_THCNT |
ESR_GLUE_CTRL0_BLTIME); │ ESR_GLUE_CTRL0_BLTIME);
glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | │ glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
(0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | │ (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
(0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | │ (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
(BLTIME_300_CYCLES << │ (BLTIME_300_CYCLES <<
ESR_GLUE_CTRL0_BLTIME_SHIFT)); │ ESR_GLUE_CTRL0_BLTIME_SHIFT));
│
err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); │ err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
if (err) │ if (err)
return err; │ return err;
err = esr_write_glue0(np, i, glue0); │ err = esr_write_glue0(np, i, glue0);
if (err) │ if (err)
return err; │ return err;
} │ }
│
err = esr_reset(np); │
if (err) │
return err; │
│
sig = nr64(ESR_INT_SIGNALS); │ sig = nr64(ESR_INT_SIGNALS);
switch (np->port) { │ switch (np->port) {
case 0: │ case 0:
mask = ESR_INT_SIGNALS_P0_BITS; │ mask = ESR_INT_SIGNALS_P0_BITS;
val = (ESR_INT_SRDY0_P0 | │ val = (ESR_INT_SRDY0_P0 |
ESR_INT_DET0_P0 | │ ESR_INT_DET0_P0 |
ESR_INT_XSRDY_P0 | │ ESR_INT_XSRDY_P0 |
ESR_INT_XDP_P0_CH3 | │ ESR_INT_XDP_P0_CH3 |
ESR_INT_XDP_P0_CH2 | │ ESR_INT_XDP_P0_CH2 |
ESR_INT_XDP_P0_CH1 | │ ESR_INT_XDP_P0_CH1 |
ESR_INT_XDP_P0_CH0); │ ESR_INT_XDP_P0_CH0);
break; │ break;
│
case 1: │ case 1:
mask = ESR_INT_SIGNALS_P1_BITS; │ mask = ESR_INT_SIGNALS_P1_BITS;
val = (ESR_INT_SRDY0_P1 | │ val = (ESR_INT_SRDY0_P1 |
ESR_INT_DET0_P1 | │ ESR_INT_DET0_P1 |
ESR_INT_XSRDY_P1 | │ ESR_INT_XSRDY_P1 |
ESR_INT_XDP_P1_CH3 | │ ESR_INT_XDP_P1_CH3 |
ESR_INT_XDP_P1_CH2 | │ ESR_INT_XDP_P1_CH2 |
ESR_INT_XDP_P1_CH1 | │ ESR_INT_XDP_P1_CH1 |
ESR_INT_XDP_P1_CH0); │ ESR_INT_XDP_P1_CH0);
break; │ break;
│
default: │ default:
return -EINVAL; │ return -EINVAL;
} │ }
│
if ((sig & mask) != val) { │ if ((sig & mask) != val) {
if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { │ int err;
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; │ err = serdes_init_1g_serdes(np);
return 0; │ if (!err) {
│ np->flags &= ~NIU_FLAGS_10G;
│ np->mac_xcvr = MAC_XCVR_PCS;
│ } else {
│ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
│ np->port);
│ return -ENODEV;
} │ }
netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", │
np->port, (int)(sig & mask), (int)val); │
return -ENODEV; │
} │ }
if (np->flags & NIU_FLAGS_HOTPLUG_PHY) │
np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; │
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c:167 │ linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c:2054
│
/* Builders */ │ /* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, │ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
.build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init, │ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
.build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init, │ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
.build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init, │ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
.build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init, │ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
.build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init, │ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
.build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init, │ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
.build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init, │ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
.build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init, │ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
.build_mpls_init = &dr_ste_v1_build_mpls_init, │ .build_mpls_init = &dr_ste_v1_build_mpls_init,
.build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init, │ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
.build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init, │ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
.build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init, │ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
.build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init, │ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
.build_icmp_init = &dr_ste_v1_build_icmp_init, │ .build_icmp_init = &dr_ste_v1_build_icmp_init,
.build_general_purpose_init = &dr_ste_v1_build_general_purpose_init, │ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
.build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init, │ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
.build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_ini │ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_ini
.build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, │ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
.build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_op │ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_op
.build_tnl_geneve_tlv_opt_exist_init = │ .build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_t
&dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_ │
.build_register_0_init = &dr_ste_v1_build_register_0_init, │ .build_register_0_init = &dr_ste_v1_build_register_0_init,
.build_register_1_init = &dr_ste_v1_build_register_1_init, │ .build_register_1_init = &dr_ste_v1_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, │ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
.build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init, │ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init, │ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init, │ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
.build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init, │ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_ini │ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_ini
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_ini │ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_ini
│
/* Getters and Setters */ │ /* Getters and Setters */
.ste_init = &dr_ste_v1_init, │ .ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type, │ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
.get_next_lu_type = &dr_ste_v1_get_next_lu_type, │ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
.set_miss_addr = &dr_ste_v1_set_miss_addr, │ .set_miss_addr = &dr_ste_v1_set_miss_addr,
.get_miss_addr = &dr_ste_v1_get_miss_addr, │ .get_miss_addr = &dr_ste_v1_get_miss_addr,
.set_hit_addr = &dr_ste_v1_set_hit_addr, │ .set_hit_addr = &dr_ste_v1_set_hit_addr,
.set_byte_mask = &dr_ste_v1_set_byte_mask, │ .set_byte_mask = &dr_ste_v1_set_byte_mask,
.get_byte_mask = &dr_ste_v1_get_byte_mask, │ .get_byte_mask = &dr_ste_v1_get_byte_mask,
│
/* Actions */ │ /* Actions */
.actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | │ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
DR_STE_CTX_ACTION_CAP_RX_PUSH | │ DR_STE_CTX_ACTION_CAP_RX_PUSH |
DR_STE_CTX_ACTION_CAP_RX_ENCAP, │ DR_STE_CTX_ACTION_CAP_RX_ENCAP |
│ DR_STE_CTX_ACTION_CAP_POP_MDFY,
.set_actions_rx = &dr_ste_v1_set_actions_rx, │ .set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx, │ .set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr), │ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
.modify_field_arr = dr_ste_v2_action_modify_field_arr, │ .modify_field_arr = dr_ste_v1_action_modify_field_arr,
.set_action_set = &dr_ste_v1_set_action_set, │ .set_action_set = &dr_ste_v1_set_action_set,
.set_action_add = &dr_ste_v1_set_action_add, │ .set_action_add = &dr_ste_v1_set_action_add,
.set_action_copy = &dr_ste_v1_set_action_copy, │ .set_action_copy = &dr_ste_v1_set_action_copy,
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, │ .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
│
/* Send */ │ /* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, │ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
} │
next prev up linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c:636 │ linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c:762
│
│ struct iavf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0; │ int len, i = 0, count = 0;
struct iavf_vlan_filter *f; │
bool more = false; │ bool more = false;
│
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { │ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ │ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", │ dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n"
adapter->current_op); │ adapter->current_op);
return; │ return;
} │ }
│
spin_lock_bh(&adapter->mac_vlan_list_lock); │ spin_lock_bh(&adapter->mac_vlan_list_lock);
│
list_for_each_entry(f, &adapter->vlan_filter_list, list) { │ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
if (f->add) │ /* since VLAN capabilities are not allowed, we dont want to send
│ * a VLAN delete request because it will most likely fail and
│ * create unnecessary errors/noise, so just free the VLAN
│ * filters marked for removal to enable bailing out before
│ * sending a virtchnl message
│ */
│ if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
│ list_del(&f->list);
│ kfree(f);
│ } else if (f->remove) {
count++; │ count++;
│ }
} │ }
if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { │ if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; │ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock); │ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return; │ return;
} │ }
│
if (VLAN_ALLOWED(adapter)) { │ if (VLAN_ALLOWED(adapter)) {
struct virtchnl_vlan_filter_list *vvfl; │ struct virtchnl_vlan_filter_list *vvfl;
│
adapter->current_op = VIRTCHNL_OP_ADD_VLAN; │ adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
│
len = sizeof(*vvfl) + (count * sizeof(u16)); │ len = sizeof(*vvfl) + (count * sizeof(u16));
if (len > IAVF_MAX_AQ_BUF_SIZE) { │ if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one │ dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in o
count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / │ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
sizeof(u16); │ sizeof(u16);
len = sizeof(*vvfl) + (count * sizeof(u16)); │ len = sizeof(*vvfl) + (count * sizeof(u16));
more = true; │ more = true;
} │ }
vvfl = kzalloc(len, GFP_ATOMIC); │ vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl) { │ if (!vvfl) {
spin_unlock_bh(&adapter->mac_vlan_list_lock); │ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return; │ return;
} │ }
│
vvfl->vsi_id = adapter->vsi_res->vsi_id; │ vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count; │ vvfl->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) { │ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
if (f->add) { │ if (f->remove) {
vvfl->vlan_id[i] = f->vlan.vid; │ vvfl->vlan_id[i] = f->vlan.vid;
i++; │ i++;
f->add = false; │ list_del(&f->list);
│ kfree(f);
if (i == count) │ if (i == count)
break; │ break;
} │ }
} │ }
│
if (!more) │ if (!more)
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; │ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
│
spin_unlock_bh(&adapter->mac_vlan_list_lock); │ spin_unlock_bh(&adapter->mac_vlan_list_lock);
│
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); │ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl); │ kfree(vvfl);
} else { │ } else {
struct virtchnl_vlan_filter_list_v2 *vvfl_v2; │ struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
│
adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; │ adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
│
len = sizeof(*vvfl_v2) + ((count - 1) * │ len = sizeof(*vvfl_v2) +
sizeof(struct virtchnl_vlan_filter)); │ ((count - 1) * sizeof(struct virtchnl_vlan_filter));
if (len > IAVF_MAX_AQ_BUF_SIZE) { │ if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one │ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one
count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) / │ count = (IAVF_MAX_AQ_BUF_SIZE -
│ sizeof(*vvfl_v2)) /
sizeof(struct virtchnl_vlan_filter); │ sizeof(struct virtchnl_vlan_filter);
len = sizeof(*vvfl_v2) + │ len = sizeof(*vvfl_v2) +
((count - 1) * │ ((count - 1) *
sizeof(struct virtchnl_vlan_filter)); │ sizeof(struct virtchnl_vlan_filter));
more = true; │ more = true;
} │ }
│
vvfl_v2 = kzalloc(len, GFP_ATOMIC); │ vvfl_v2 = kzalloc(len, GFP_ATOMIC);
if (!vvfl_v2) { │ if (!vvfl_v2) {
spin_unlock_bh(&adapter->mac_vlan_list_lock); │ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return; │ return;
} │ }
│
vvfl_v2->vport_id = adapter->vsi_res->vsi_id; │ vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
vvfl_v2->num_elements = count; │ vvfl_v2->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) { │ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
if (f->add) { │ if (f->remove) {
struct virtchnl_vlan_supported_caps *filtering_support = │ struct virtchnl_vlan_supported_caps *filtering_support =
&adapter->vlan_v2_caps.filtering.filtering_suppo │ &adapter->vlan_v2_caps.filtering.filtering_suppo
struct virtchnl_vlan *vlan; │ struct virtchnl_vlan *vlan;
│
/* give priority over outer if it's enabled */ │ /* give priority over outer if it's enabled */
if (filtering_support->outer) │ if (filtering_support->outer)
vlan = &vvfl_v2->filters[i].outer; │ vlan = &vvfl_v2->filters[i].outer;
else │ else
vlan = &vvfl_v2->filters[i].inner; │ vlan = &vvfl_v2->filters[i].inner;
│
vlan->tci = f->vlan.vid; │ vlan->tci = f->vlan.vid;
vlan->tpid = f->vlan.tpid; │ vlan->tpid = f->vlan.tpid;
│
│ list_del(&f->list);
│ kfree(f);
i++; │ i++;
f->add = false; │
if (i == count) │ if (i == count)
break; │ break;
} │ }
} │ }
│
if (!more) │ if (!more)
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; │ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
│
spin_unlock_bh(&adapter->mac_vlan_list_lock); │ spin_unlock_bh(&adapter->mac_vlan_list_lock);
│
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, │ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
(u8 *)vvfl_v2, len); │ (u8 *)vvfl_v2, len);
kfree(vvfl_v2); │ kfree(vvfl_v2);
} │ }
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/farch.c:1664 │ linux/drivers/net/ethernet/sfc/farch.c:1730
│
ef4_oword_t temp; │ efx_oword_t temp;
│
/* Set positions of descriptor caches in SRAM. */ │ /* Set positions of descriptor caches in SRAM. */
EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); │ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); │ efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); │ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); │ efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
│
/* Set TX descriptor cache size. */ │ /* Set TX descriptor cache size. */
BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); │ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); │ EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG); │ efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
│
/* Set RX descriptor cache size. Set low watermark to size-8, as │ /* Set RX descriptor cache size. Set low watermark to size-8, as
* this allows most efficient prefetching. │ * this allows most efficient prefetching.
*/ │ */
BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); │ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); │ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG); │ efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); │ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); │ efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
│
/* Program INT_KER address */ │ /* Program INT_KER address */
EF4_POPULATE_OWORD_2(temp, │ EFX_POPULATE_OWORD_2(temp,
FRF_AZ_NORM_INT_VEC_DIS_KER, │ FRF_AZ_NORM_INT_VEC_DIS_KER,
EF4_INT_MODE_USE_MSI(efx), │ EFX_INT_MODE_USE_MSI(efx),
FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); │ FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER); │ efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
│
/* Use a valid MSI-X vector */ │ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
efx->irq_level = 0; │ /* Use an interrupt level unused by event queues */
│ efx->irq_level = 0x1f;
│ else
│ /* Use a valid MSI-X vector */
│ efx->irq_level = 0;
│
/* Enable all the genuinely fatal interrupts. (They are still │ /* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by │ * masked by the overall interrupt mask, controlled by
* falcon_interrupts()). │ * falcon_interrupts()).
* │ *
* Note: All other fatal interrupts are enabled │ * Note: All other fatal interrupts are enabled
*/ │ */
EF4_POPULATE_OWORD_3(temp, │ EFX_POPULATE_OWORD_3(temp,
FRF_AZ_ILL_ADR_INT_KER_EN, 1, │ FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1, │ FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1); │ FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
EF4_INVERT_OWORD(temp); │ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); │ EFX_INVERT_OWORD(temp);
│ efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
│
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be │ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. │ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/ │ */
ef4_reado(efx, &temp, FR_AZ_TX_RESERVED); │ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); │ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); │ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); │ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); │ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); │ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
/* Enable SW_EV to inherit in char driver - assume harmless here */ │ /* Enable SW_EV to inherit in char driver - assume harmless here */
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); │ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */ │ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); │ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
/* Disable hardware watchdog which can misfire */ │ /* Disable hardware watchdog which can misfire */
EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); │ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */ │ /* Squash TX of packets of 16 bytes or less */
if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) │ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); │ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED); │
│ EFX_POPULATE_OWORD_4(temp,
if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { │ /* Default values */
EF4_POPULATE_OWORD_4(temp, │ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
/* Default values */ │ FRF_BZ_TX_PACE_SB_AF, 0xb,
FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, │ FRF_BZ_TX_PACE_FB_BASE, 0,
FRF_BZ_TX_PACE_SB_AF, 0xb, │ /* Allow large pace values in the fast bin. */
FRF_BZ_TX_PACE_FB_BASE, 0, │ FRF_BZ_TX_PACE_BIN_TH,
/* Allow large pace values in the │ FFE_BZ_TX_PACE_RESERVED);
* fast bin. */ │ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
FRF_BZ_TX_PACE_BIN_TH, │
FFE_BZ_TX_PACE_RESERVED); │
ef4_writeo(efx, &temp, FR_BZ_TX_PACE); │
} │
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:971 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:980
│
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; │ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; │ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr; │ unsigned expected_ptr;
bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; │ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
u16 flags; │ u16 flags;
struct efx_rx_queue *rx_queue; │ struct ef4_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx; │ struct ef4_nic *efx = channel->efx;
│
if (unlikely(READ_ONCE(efx->reset_pending))) │ if (unlikely(READ_ONCE(efx->reset_pending)))
return; │ return;
│
rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); │ rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); │ rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != │ WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel); │ channel->channel);
│
rx_queue = efx_channel_get_rx_queue(channel); │ rx_queue = ef4_channel_get_rx_queue(channel);
│
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); │ rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & │ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
rx_queue->ptr_mask); │ rx_queue->ptr_mask);
│
/* Check for partial drops and other errors */ │ /* Check for partial drops and other errors */
if (unlikely(rx_ev_desc_ptr != expected_ptr) || │ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { │ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
if (rx_ev_desc_ptr != expected_ptr && │ if (rx_ev_desc_ptr != expected_ptr &&
!efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) │ !ef4_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
return; │ return;
│
/* Discard all pending fragments */ │ /* Discard all pending fragments */
if (rx_queue->scatter_n) { │ if (rx_queue->scatter_n) {
efx_rx_packet( │ ef4_rx_packet(
rx_queue, │ rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask, │ rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); │ rx_queue->scatter_n, 0, EF4_RX_PKT_DISCARD);
rx_queue->removed_count += rx_queue->scatter_n; │ rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0; │ rx_queue->scatter_n = 0;
} │ }
│
/* Return if there is no new fragment */ │ /* Return if there is no new fragment */
if (rx_ev_desc_ptr != expected_ptr) │ if (rx_ev_desc_ptr != expected_ptr)
return; │ return;
│
/* Discard new fragment if not SOP */ │ /* Discard new fragment if not SOP */
if (!rx_ev_sop) { │ if (!rx_ev_sop) {
efx_rx_packet( │ ef4_rx_packet(
rx_queue, │ rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask, │ rx_queue->removed_count & rx_queue->ptr_mask,
1, 0, EFX_RX_PKT_DISCARD); │ 1, 0, EF4_RX_PKT_DISCARD);
++rx_queue->removed_count; │ ++rx_queue->removed_count;
return; │ return;
} │ }
} │ }
│
++rx_queue->scatter_n; │ ++rx_queue->scatter_n;
if (rx_ev_cont) │ if (rx_ev_cont)
return; │ return;
│
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); │ rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); │ rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); │ rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
│
if (likely(rx_ev_pkt_ok)) { │ if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK then we can rely on the │ /* If packet is marked as OK then we can rely on the
* hardware checksum and classification. │ * hardware checksum and classification.
*/ │ */
flags = 0; │ flags = 0;
switch (rx_ev_hdr_type) { │ switch (rx_ev_hdr_type) {
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: │ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
flags |= EFX_RX_PKT_TCP; │ flags |= EF4_RX_PKT_TCP;
fallthrough; │ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: │ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
flags |= EFX_RX_PKT_CSUMMED; │ flags |= EF4_RX_PKT_CSUMMED;
fallthrough; │ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: │ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
case FSE_AZ_RX_EV_HDR_TYPE_OTHER: │ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
break; │ break;
} │ }
} else { │ } else {
flags = efx_farch_handle_rx_not_ok(rx_queue, event); │ flags = ef4_farch_handle_rx_not_ok(rx_queue, event);
} │ }
│
/* Detect multicast packets that didn't match the filter */ │ /* Detect multicast packets that didn't match the filter */
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); │ rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
if (rx_ev_mcast_pkt) { │ if (rx_ev_mcast_pkt) {
unsigned int rx_ev_mcast_hash_match = │ unsigned int rx_ev_mcast_hash_match =
EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); │ EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
│
if (unlikely(!rx_ev_mcast_hash_match)) { │ if (unlikely(!rx_ev_mcast_hash_match)) {
++channel->n_rx_mcast_mismatch; │ ++channel->n_rx_mcast_mismatch;
flags |= EFX_RX_PKT_DISCARD; │ flags |= EF4_RX_PKT_DISCARD;
} │ }
} │ }
│
channel->irq_mod_score += 2; │ channel->irq_mod_score += 2;
│
/* Handle received packet */ │ /* Handle received packet */
efx_rx_packet(rx_queue, │ ef4_rx_packet(rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask, │ rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, rx_ev_byte_cnt, flags); │ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
rx_queue->removed_count += rx_queue->scatter_n; │ rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0; │ rx_queue->scatter_n = 0;
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_ethtool.c:589 │ linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:883
│
struct igc_adapter *adapter = netdev_priv(netdev); │ struct igb_adapter *adapter = netdev_priv(netdev);
struct igc_ring *temp_ring; │ struct igb_ring *temp_ring;
u16 new_rx_count, new_tx_count; │
int i, err = 0; │ int i, err = 0;
│ u16 new_rx_count, new_tx_count;
│
if (ring->rx_mini_pending || ring->rx_jumbo_pending) │ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL; │ return -EINVAL;
│
new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); │ new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); │ new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); │ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
│
new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); │ new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); │ new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); │ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
│
if (new_tx_count == adapter->tx_ring_count && │ if ((new_tx_count == adapter->tx_ring_count) &&
new_rx_count == adapter->rx_ring_count) { │ (new_rx_count == adapter->rx_ring_count)) {
/* nothing to do */ │ /* nothing to do */
return 0; │ return 0;
} │ }
│
while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) │ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000); │ usleep_range(1000, 2000);
│
if (!netif_running(adapter->netdev)) { │ if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) │ for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->count = new_tx_count; │ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++) │ for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->count = new_rx_count; │ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count; │ adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count; │ adapter->rx_ring_count = new_rx_count;
goto clear_reset; │ goto clear_reset;
} │ }
│
if (adapter->num_tx_queues > adapter->num_rx_queues) │ if (adapter->num_tx_queues > adapter->num_rx_queues)
temp_ring = vmalloc(array_size(sizeof(struct igc_ring), │ temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
adapter->num_tx_queues)); │ adapter->num_tx_queues));
else │ else
temp_ring = vmalloc(array_size(sizeof(struct igc_ring), │ temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
adapter->num_rx_queues)); │ adapter->num_rx_queues));
│
if (!temp_ring) { │ if (!temp_ring) {
err = -ENOMEM; │ err = -ENOMEM;
goto clear_reset; │ goto clear_reset;
} │ }
│
igc_down(adapter); │ igb_down(adapter);
│
/* We can't just free everything and then setup again, │ /* We can't just free everything and then setup again,
* because the ISRs in MSI-X mode get passed pointers │ * because the ISRs in MSI-X mode get passed pointers
* to the Tx and Rx ring structs. │ * to the Tx and Rx ring structs.
*/ │ */
if (new_tx_count != adapter->tx_ring_count) { │ if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) { │ for (i = 0; i < adapter->num_tx_queues; i++) {
memcpy(&temp_ring[i], adapter->tx_ring[i], │ memcpy(&temp_ring[i], adapter->tx_ring[i],
sizeof(struct igc_ring)); │ sizeof(struct igb_ring));
│
temp_ring[i].count = new_tx_count; │ temp_ring[i].count = new_tx_count;
err = igc_setup_tx_resources(&temp_ring[i]); │ err = igb_setup_tx_resources(&temp_ring[i]);
if (err) { │ if (err) {
while (i) { │ while (i) {
i--; │ i--;
igc_free_tx_resources(&temp_ring[i]); │ igb_free_tx_resources(&temp_ring[i]);
} │ }
goto err_setup; │ goto err_setup;
} │ }
} │ }
│
for (i = 0; i < adapter->num_tx_queues; i++) { │ for (i = 0; i < adapter->num_tx_queues; i++) {
igc_free_tx_resources(adapter->tx_ring[i]); │ igb_free_tx_resources(adapter->tx_ring[i]);
│
memcpy(adapter->tx_ring[i], &temp_ring[i], │ memcpy(adapter->tx_ring[i], &temp_ring[i],
sizeof(struct igc_ring)); │ sizeof(struct igb_ring));
} │ }
│
adapter->tx_ring_count = new_tx_count; │ adapter->tx_ring_count = new_tx_count;
} │ }
│
if (new_rx_count != adapter->rx_ring_count) { │ if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) { │ for (i = 0; i < adapter->num_rx_queues; i++) {
memcpy(&temp_ring[i], adapter->rx_ring[i], │ memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct igc_ring)); │ sizeof(struct igb_ring));
│
temp_ring[i].count = new_rx_count; │ temp_ring[i].count = new_rx_count;
err = igc_setup_rx_resources(&temp_ring[i]); │ err = igb_setup_rx_resources(&temp_ring[i]);
if (err) { │ if (err) {
while (i) { │ while (i) {
i--; │ i--;
igc_free_rx_resources(&temp_ring[i]); │ igb_free_rx_resources(&temp_ring[i]);
} │ }
goto err_setup; │ goto err_setup;
} │ }
│
} │ }
│
for (i = 0; i < adapter->num_rx_queues; i++) { │ for (i = 0; i < adapter->num_rx_queues; i++) {
igc_free_rx_resources(adapter->rx_ring[i]); │ igb_free_rx_resources(adapter->rx_ring[i]);
│
memcpy(adapter->rx_ring[i], &temp_ring[i], │ memcpy(adapter->rx_ring[i], &temp_ring[i],
sizeof(struct igc_ring)); │ sizeof(struct igb_ring));
} │ }
│
adapter->rx_ring_count = new_rx_count; │ adapter->rx_ring_count = new_rx_count;
} │ }
err_setup: │ err_setup:
igc_up(adapter); │ igb_up(adapter);
vfree(temp_ring); │ vfree(temp_ring);
clear_reset: │ clear_reset:
clear_bit(__IGC_RESETTING, &adapter->state); │ clear_bit(__IGB_RESETTING, &adapter->state);
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/nic.c:192 │ linux/drivers/net/ethernet/sfc/nic.c:200
│
REGISTER_AZ(ADR_REGION), │ REGISTER_AZ(ADR_REGION),
REGISTER_AZ(INT_EN_KER), │ REGISTER_AZ(INT_EN_KER),
REGISTER_BZ(INT_EN_CHAR), │ REGISTER_BZ(INT_EN_CHAR),
REGISTER_AZ(INT_ADR_KER), │ REGISTER_AZ(INT_ADR_KER),
REGISTER_BZ(INT_ADR_CHAR), │ REGISTER_BZ(INT_ADR_CHAR),
/* INT_ACK_KER is WO */ │ /* INT_ACK_KER is WO */
/* INT_ISR0 is RC */ │ /* INT_ISR0 is RC */
REGISTER_AZ(HW_INIT), │ REGISTER_AZ(HW_INIT),
REGISTER_CZ(USR_EV_CFG), │ REGISTER_CZ(USR_EV_CFG),
REGISTER_AB(EE_SPI_HCMD), │ REGISTER_AB(EE_SPI_HCMD),
REGISTER_AB(EE_SPI_HADR), │ REGISTER_AB(EE_SPI_HADR),
REGISTER_AB(EE_SPI_HDATA), │ REGISTER_AB(EE_SPI_HDATA),
REGISTER_AB(EE_BASE_PAGE), │ REGISTER_AB(EE_BASE_PAGE),
REGISTER_AB(EE_VPD_CFG0), │ REGISTER_AB(EE_VPD_CFG0),
/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ │ /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ │ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
/* PCIE_CORE_INDIRECT is indirect */ │ /* PCIE_CORE_INDIRECT is indirect */
REGISTER_AB(NIC_STAT), │ REGISTER_AB(NIC_STAT),
REGISTER_AB(GPIO_CTL), │ REGISTER_AB(GPIO_CTL),
REGISTER_AB(GLB_CTL), │ REGISTER_AB(GLB_CTL),
/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ │ /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
REGISTER_BZ(DP_CTRL), │ REGISTER_BZ(DP_CTRL),
REGISTER_AZ(MEM_STAT), │ REGISTER_AZ(MEM_STAT),
REGISTER_AZ(CS_DEBUG), │ REGISTER_AZ(CS_DEBUG),
REGISTER_AZ(ALTERA_BUILD), │ REGISTER_AZ(ALTERA_BUILD),
REGISTER_AZ(CSR_SPARE), │ REGISTER_AZ(CSR_SPARE),
REGISTER_AB(PCIE_SD_CTL0123), │ REGISTER_AB(PCIE_SD_CTL0123),
REGISTER_AB(PCIE_SD_CTL45), │ REGISTER_AB(PCIE_SD_CTL45),
REGISTER_AB(PCIE_PCS_CTL_STAT), │ REGISTER_AB(PCIE_PCS_CTL_STAT),
/* DEBUG_DATA_OUT is not used */ │ /* DEBUG_DATA_OUT is not used */
/* DRV_EV is WO */ │ /* DRV_EV is WO */
REGISTER_AZ(EVQ_CTL), │ REGISTER_AZ(EVQ_CTL),
REGISTER_AZ(EVQ_CNT1), │ REGISTER_AZ(EVQ_CNT1),
REGISTER_AZ(EVQ_CNT2), │ REGISTER_AZ(EVQ_CNT2),
REGISTER_AZ(BUF_TBL_CFG), │ REGISTER_AZ(BUF_TBL_CFG),
REGISTER_AZ(SRM_RX_DC_CFG), │ REGISTER_AZ(SRM_RX_DC_CFG),
REGISTER_AZ(SRM_TX_DC_CFG), │ REGISTER_AZ(SRM_TX_DC_CFG),
REGISTER_AZ(SRM_CFG), │ REGISTER_AZ(SRM_CFG),
/* BUF_TBL_UPD is WO */ │ /* BUF_TBL_UPD is WO */
REGISTER_AZ(SRM_UPD_EVQ), │ REGISTER_AZ(SRM_UPD_EVQ),
REGISTER_AZ(SRAM_PARITY), │ REGISTER_AZ(SRAM_PARITY),
REGISTER_AZ(RX_CFG), │ REGISTER_AZ(RX_CFG),
REGISTER_BZ(RX_FILTER_CTL), │ REGISTER_BZ(RX_FILTER_CTL),
/* RX_FLUSH_DESCQ is WO */ │ /* RX_FLUSH_DESCQ is WO */
REGISTER_AZ(RX_DC_CFG), │ REGISTER_AZ(RX_DC_CFG),
REGISTER_AZ(RX_DC_PF_WM), │ REGISTER_AZ(RX_DC_PF_WM),
REGISTER_BZ(RX_RSS_TKEY), │ REGISTER_BZ(RX_RSS_TKEY),
/* RX_NODESC_DROP is RC */ │ /* RX_NODESC_DROP is RC */
REGISTER_AA(RX_SELF_RST), │ REGISTER_AA(RX_SELF_RST),
/* RX_DEBUG, RX_PUSH_DROP are not used */ │ /* RX_DEBUG, RX_PUSH_DROP are not used */
REGISTER_CZ(RX_RSS_IPV6_REG1), │ REGISTER_CZ(RX_RSS_IPV6_REG1),
REGISTER_CZ(RX_RSS_IPV6_REG2), │ REGISTER_CZ(RX_RSS_IPV6_REG2),
REGISTER_CZ(RX_RSS_IPV6_REG3), │ REGISTER_CZ(RX_RSS_IPV6_REG3),
/* TX_FLUSH_DESCQ is WO */ │ /* TX_FLUSH_DESCQ is WO */
REGISTER_AZ(TX_DC_CFG), │ REGISTER_AZ(TX_DC_CFG),
REGISTER_AA(TX_CHKSM_CFG), │ REGISTER_AA(TX_CHKSM_CFG),
REGISTER_AZ(TX_CFG), │ REGISTER_AZ(TX_CFG),
/* TX_PUSH_DROP is not used */ │ /* TX_PUSH_DROP is not used */
REGISTER_AZ(TX_RESERVED), │ REGISTER_AZ(TX_RESERVED),
REGISTER_BZ(TX_PACE), │ REGISTER_BZ(TX_PACE),
/* TX_PACE_DROP_QID is RC */ │ /* TX_PACE_DROP_QID is RC */
REGISTER_BB(TX_VLAN), │ REGISTER_BB(TX_VLAN),
REGISTER_BZ(TX_IPFIL_PORTEN), │ REGISTER_BZ(TX_IPFIL_PORTEN),
REGISTER_AB(MD_TXD), │ REGISTER_AB(MD_TXD),
REGISTER_AB(MD_RXD), │ REGISTER_AB(MD_RXD),
REGISTER_AB(MD_CS), │ REGISTER_AB(MD_CS),
REGISTER_AB(MD_PHY_ADR), │ REGISTER_AB(MD_PHY_ADR),
REGISTER_AB(MD_ID), │ REGISTER_AB(MD_ID),
/* MD_STAT is RC */ │ /* MD_STAT is RC */
REGISTER_AB(MAC_STAT_DMA), │ REGISTER_AB(MAC_STAT_DMA),
REGISTER_AB(MAC_CTRL), │ REGISTER_AB(MAC_CTRL),
REGISTER_BB(GEN_MODE), │ REGISTER_BB(GEN_MODE),
REGISTER_AB(MAC_MC_HASH_REG0), │ REGISTER_AB(MAC_MC_HASH_REG0),
REGISTER_AB(MAC_MC_HASH_REG1), │ REGISTER_AB(MAC_MC_HASH_REG1),
REGISTER_AB(GM_CFG1), │ REGISTER_AB(GM_CFG1),
REGISTER_AB(GM_CFG2), │ REGISTER_AB(GM_CFG2),
/* GM_IPG and GM_HD are not used */ │ /* GM_IPG and GM_HD are not used */
REGISTER_AB(GM_MAX_FLEN), │ REGISTER_AB(GM_MAX_FLEN),
/* GM_TEST is not used */ │ /* GM_TEST is not used */
REGISTER_AB(GM_ADR1), │ REGISTER_AB(GM_ADR1),
REGISTER_AB(GM_ADR2), │ REGISTER_AB(GM_ADR2),
REGISTER_AB(GMF_CFG0), │ REGISTER_AB(GMF_CFG0),
REGISTER_AB(GMF_CFG1), │ REGISTER_AB(GMF_CFG1),
REGISTER_AB(GMF_CFG2), │ REGISTER_AB(GMF_CFG2),
REGISTER_AB(GMF_CFG3), │ REGISTER_AB(GMF_CFG3),
REGISTER_AB(GMF_CFG4), │ REGISTER_AB(GMF_CFG4),
REGISTER_AB(GMF_CFG5), │ REGISTER_AB(GMF_CFG5),
REGISTER_BB(TX_SRC_MAC_CTL), │ REGISTER_BB(TX_SRC_MAC_CTL),
REGISTER_AB(XM_ADR_LO), │ REGISTER_AB(XM_ADR_LO),
REGISTER_AB(XM_ADR_HI), │ REGISTER_AB(XM_ADR_HI),
REGISTER_AB(XM_GLB_CFG), │ REGISTER_AB(XM_GLB_CFG),
REGISTER_AB(XM_TX_CFG), │ REGISTER_AB(XM_TX_CFG),
REGISTER_AB(XM_RX_CFG), │ REGISTER_AB(XM_RX_CFG),
REGISTER_AB(XM_MGT_INT_MASK), │ REGISTER_AB(XM_MGT_INT_MASK),
REGISTER_AB(XM_FC), │ REGISTER_AB(XM_FC),
REGISTER_AB(XM_PAUSE_TIME), │ REGISTER_AB(XM_PAUSE_TIME),
REGISTER_AB(XM_TX_PARAM), │ REGISTER_AB(XM_TX_PARAM),
REGISTER_AB(XM_RX_PARAM), │ REGISTER_AB(XM_RX_PARAM),
/* XM_MGT_INT_MSK (note no 'A') is RC */ │ /* XM_MGT_INT_MSK (note no 'A') is RC */
REGISTER_AB(XX_PWR_RST), │ REGISTER_AB(XX_PWR_RST),
REGISTER_AB(XX_SD_CTL), │ REGISTER_AB(XX_SD_CTL),
REGISTER_AB(XX_TXDRV_CTL), │ REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ │ /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */ │ /* XX_CORE_STAT is partly RC */
│ REGISTER_DZ(BIU_HW_REV_ID),
│ REGISTER_DZ(MC_DB_LWRD),
│ REGISTER_DZ(MC_DB_HWRD),
} │
next prev up linux/drivers/net/ethernet/ti/cpsw_new.c:283 │ linux/drivers/net/ethernet/ti/cpsw.c:345
│
struct page *new_page, *page = token; │ struct page *new_page, *page = token;
void *pa = page_address(page); │ void *pa = page_address(page);
int headroom = CPSW_HEADROOM_NA; │ struct cpsw_meta_xdp *xmeta = pa + CPSW_XMETA_OFFSET;
struct cpsw_meta_xdp *xmeta; │ struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev);
struct cpsw_common *cpsw; │ int pkt_size = cpsw->rx_packet_max;
struct net_device *ndev; │ int ret = 0, port, ch = xmeta->ch;
int port, ch, pkt_size; │ int headroom = CPSW_HEADROOM_NA;
struct cpsw_priv *priv; │ struct net_device *ndev = xmeta->ndev;
struct page_pool *pool; │ struct cpsw_priv *priv;
struct sk_buff *skb; │ struct page_pool *pool;
struct xdp_buff xdp; │ struct sk_buff *skb;
int ret = 0; │ struct xdp_buff xdp;
dma_addr_t dma; │ dma_addr_t dma;
│
xmeta = pa + CPSW_XMETA_OFFSET; │
cpsw = ndev_to_cpsw(xmeta->ndev); │
ndev = xmeta->ndev; │
pkt_size = cpsw->rx_packet_max; │
ch = xmeta->ch; │
│
if (status >= 0) { │ if (cpsw->data.dual_emac && status >= 0) {
port = CPDMA_RX_SOURCE_PORT(status); │ port = CPDMA_RX_SOURCE_PORT(status);
if (port) │ if (port)
ndev = cpsw->slaves[--port].ndev; │ ndev = cpsw->slaves[--port].ndev;
} │ }
│
priv = netdev_priv(ndev); │ priv = netdev_priv(ndev);
pool = cpsw->page_pool[ch]; │ pool = cpsw->page_pool[ch];
│
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { │ if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
/* In dual emac mode check for all interfaces */ │ /* In dual emac mode check for all interfaces */
if (cpsw->usage_count && status >= 0) { │ if (cpsw->data.dual_emac && cpsw->usage_count &&
│ (status >= 0)) {
/* The packet received is for the interface which │ /* The packet received is for the interface which
* is already down and the other interface is up │ * is already down and the other interface is up
* and running, instead of freeing which results │ * and running, instead of freeing which results
* in reducing of the number of rx descriptor in │ * in reducing of the number of rx descriptor in
* DMA engine, requeue page back to cpdma. │ * DMA engine, requeue page back to cpdma.
*/ │ */
new_page = page; │ new_page = page;
goto requeue; │ goto requeue;
} │ }
│
/* the interface is going down, pages are purged */ │ /* the interface is going down, pages are purged */
page_pool_recycle_direct(pool, page); │ page_pool_recycle_direct(pool, page);
return; │ return;
} │ }
│
new_page = page_pool_dev_alloc_pages(pool); │ new_page = page_pool_dev_alloc_pages(pool);
if (unlikely(!new_page)) { │ if (unlikely(!new_page)) {
new_page = page; │ new_page = page;
ndev->stats.rx_dropped++; │ ndev->stats.rx_dropped++;
goto requeue; │ goto requeue;
} │ }
│
if (priv->xdp_prog) { │ if (priv->xdp_prog) {
int size = len; │ int size = len;
│
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]); │ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) { │ if (status & CPDMA_RX_VLAN_ENCAP) {
headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE; │ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE; │ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
} │ }
│
xdp_prepare_buff(&xdp, pa, headroom, size, false); │ xdp_prepare_buff(&xdp, pa, headroom, size, false);
│
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len); │ port = priv->emac_port + cpsw->data.dual_emac;
│ ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
if (ret != CPSW_XDP_PASS) │ if (ret != CPSW_XDP_PASS)
goto requeue; │ goto requeue;
│
headroom = xdp.data - xdp.data_hard_start; │ headroom = xdp.data - xdp.data_hard_start;
│
/* XDP prog can modify vlan tag, so can't use encap header */ │ /* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP; │ status &= ~CPDMA_RX_VLAN_ENCAP;
} │ }
│
/* pass skb to netstack if no XDP prog or returned XDP_PASS */ │ /* pass skb to netstack if no XDP prog or returned XDP_PASS */
skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size)); │ skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
if (!skb) { │ if (!skb) {
ndev->stats.rx_dropped++; │ ndev->stats.rx_dropped++;
page_pool_recycle_direct(pool, page); │ page_pool_recycle_direct(pool, page);
goto requeue; │ goto requeue;
} │ }
│
skb->offload_fwd_mark = priv->offload_fwd_mark; │
skb_reserve(skb, headroom); │ skb_reserve(skb, headroom);
skb_put(skb, len); │ skb_put(skb, len);
skb->dev = ndev; │ skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP) │ if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb); │ cpsw_rx_vlan_encap(skb);
if (priv->rx_ts_enabled) │ if (priv->rx_ts_enabled)
cpts_rx_timestamp(cpsw->cpts, skb); │ cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev); │ skb->protocol = eth_type_trans(skb, ndev);
│
/* mark skb for recycling */ │ /* mark skb for recycling */
skb_mark_for_recycle(skb); │ skb_mark_for_recycle(skb);
netif_receive_skb(skb); │ netif_receive_skb(skb);
│
ndev->stats.rx_bytes += len; │ ndev->stats.rx_bytes += len;
ndev->stats.rx_packets++; │ ndev->stats.rx_packets++;
│
requeue: │ requeue:
xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; │ xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
xmeta->ndev = ndev; │ xmeta->ndev = ndev;
xmeta->ch = ch; │ xmeta->ch = ch;
│
dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA; │ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, │ ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
pkt_size, 0); │ pkt_size, 0);
if (ret < 0) { │ if (ret < 0) {
WARN_ON(ret == -ENOMEM); │ WARN_ON(ret == -ENOMEM);
page_pool_recycle_direct(pool, new_page); │ page_pool_recycle_direct(pool, new_page);
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/fm10k/fm10k_pf.c:1194 │ linux/drivers/net/ethernet/intel/fm10k/fm10k_iov.c:35
│
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; │ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
│ struct fm10k_intfc *interface = hw->back;
u8 mac[ETH_ALEN]; │ u8 mac[ETH_ALEN];
u32 *result; │ u32 *result;
int err = 0; │ int err = 0;
bool set; │ bool set;
u16 vlan; │ u16 vlan;
u32 vid; │ u32 vid;
│
/* we shouldn't be updating rules on a disabled interface */ │ /* we shouldn't be updating rules on a disabled interface */
if (!FM10K_VF_FLAG_ENABLED(vf_info)) │ if (!FM10K_VF_FLAG_ENABLED(vf_info))
err = FM10K_ERR_PARAM; │ err = FM10K_ERR_PARAM;
│
if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { │ if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
result = results[FM10K_MAC_VLAN_MSG_VLAN]; │ result = results[FM10K_MAC_VLAN_MSG_VLAN];
│
/* record VLAN id requested */ │ /* record VLAN id requested */
err = fm10k_tlv_attr_get_u32(result, &vid); │ err = fm10k_tlv_attr_get_u32(result, &vid);
if (err) │ if (err)
return err; │ return err;
│
set = !(vid & FM10K_VLAN_CLEAR); │ set = !(vid & FM10K_VLAN_CLEAR);
vid &= ~FM10K_VLAN_CLEAR; │ vid &= ~FM10K_VLAN_CLEAR;
│
/* if the length field has been set, this is a multi-bit │ /* if the length field has been set, this is a multi-bit
* update request. For multi-bit requests, simply disallow │ * update request. For multi-bit requests, simply disallow
* them when the pf_vid has been set. In this case, the PF │ * them when the pf_vid has been set. In this case, the PF
* should have already cleared the VLAN_TABLE, and if we │ * should have already cleared the VLAN_TABLE, and if we
* allowed them, it could allow a rogue VF to receive traffic │ * allowed them, it could allow a rogue VF to receive traffic
* on a VLAN it was not assigned. In the single-bit case, we │ * on a VLAN it was not assigned. In the single-bit case, we
* need to modify requests for VLAN 0 to use the default PF or │ * need to modify requests for VLAN 0 to use the default PF or
* SW vid when assigned. │ * SW vid when assigned.
*/ │ */
│
if (vid >> 16) { │ if (vid >> 16) {
/* prevent multi-bit requests when PF has │ /* prevent multi-bit requests when PF has
* administratively set the VLAN for this VF │ * administratively set the VLAN for this VF
*/ │ */
if (vf_info->pf_vid) │ if (vf_info->pf_vid)
return FM10K_ERR_PARAM; │ return FM10K_ERR_PARAM;
} else { │ } else {
err = fm10k_iov_select_vid(vf_info, (u16)vid); │ err = fm10k_iov_select_vid(vf_info, (u16)vid);
if (err < 0) │ if (err < 0)
return err; │ return err;
│
vid = err; │ vid = err;
} │ }
│
/* update VSI info for VF in regards to VLAN table */ │ /* update VSI info for VF in regards to VLAN table */
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); │ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
} │ }
│
if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { │ if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
result = results[FM10K_MAC_VLAN_MSG_MAC]; │ result = results[FM10K_MAC_VLAN_MSG_MAC];
│
/* record unicast MAC address requested */ │ /* record unicast MAC address requested */
err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); │ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
if (err) │ if (err)
return err; │ return err;
│
/* block attempts to set MAC for a locked device */ │ /* block attempts to set MAC for a locked device */
if (is_valid_ether_addr(vf_info->mac) && │ if (is_valid_ether_addr(vf_info->mac) &&
!ether_addr_equal(mac, vf_info->mac)) │ !ether_addr_equal(mac, vf_info->mac))
return FM10K_ERR_PARAM; │ return FM10K_ERR_PARAM;
│
set = !(vlan & FM10K_VLAN_CLEAR); │ set = !(vlan & FM10K_VLAN_CLEAR);
vlan &= ~FM10K_VLAN_CLEAR; │ vlan &= ~FM10K_VLAN_CLEAR;
│
err = fm10k_iov_select_vid(vf_info, vlan); │ err = fm10k_iov_select_vid(vf_info, vlan);
if (err < 0) │ if (err < 0)
return err; │ return err;
│
vlan = (u16)err; │ vlan = (u16)err;
│
/* notify switch of request for new unicast address */ │ /* Add this request to the MAC/VLAN queue */
err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, │ err = fm10k_queue_mac_request(interface, vf_info->glort,
mac, vlan, set, 0); │ mac, vlan, set);
} │ }
│
if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { │ if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; │ result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
│
/* record multicast MAC address requested */ │ /* record multicast MAC address requested */
err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); │ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
if (err) │ if (err)
return err; │ return err;
│
/* verify that the VF is allowed to request multicast */ │ /* verify that the VF is allowed to request multicast */
if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) │ if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
return FM10K_ERR_PARAM; │ return FM10K_ERR_PARAM;
│
set = !(vlan & FM10K_VLAN_CLEAR); │ set = !(vlan & FM10K_VLAN_CLEAR);
vlan &= ~FM10K_VLAN_CLEAR; │ vlan &= ~FM10K_VLAN_CLEAR;
│
err = fm10k_iov_select_vid(vf_info, vlan); │ err = fm10k_iov_select_vid(vf_info, vlan);
if (err < 0) │ if (err < 0)
return err; │ return err;
│
vlan = (u16)err; │ vlan = (u16)err;
│
/* notify switch of request for new multicast address */ │ /* Add this request to the MAC/VLAN queue */
err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, │ err = fm10k_queue_mac_request(interface, vf_info->glort,
mac, vlan, set); │ mac, vlan, set);
} │ }
│
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/marvell/sky2.h:1677 │ linux/drivers/net/ethernet/marvell/skge.h:1664
│
GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ │ GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ │ GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ │ GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ │ GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ │ GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
│ /* GM_MIB_CNT_BASE + 40: reserved */
GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ │ GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ │ GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ │ GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ │ GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ │ GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err │ GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err
GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ │ GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */ │ GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */
GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */ │ GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */
GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */ │ GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */
GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */ │ GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */
GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */ │ GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */
GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */ │ GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */
GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */ │ GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */
GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */ │ GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */
│ /* GM_MIB_CNT_BASE + 168: reserved */
GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */ │ GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */
GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */ │ /* GM_MIB_CNT_BASE + 184: reserved */
GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */ │ GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */
GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */ │ GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK *
GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */ │ GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted
GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */ │ GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK *
GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */ │ GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */
GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */ │ GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */
GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */ │ GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */
GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */ │ GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */
GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */ │ GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */
GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */ │ GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */
GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */ │ GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */
GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */ │ GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */
│ GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */
│
GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */ │ GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */
GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */ │ GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */
GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */ │ GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col.
GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */ │ GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */
GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */ │ GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */
GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */ │ GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
} │
next prev up linux/drivers/net/ethernet/sfc/ef100_nic.c:799 │ linux/drivers/net/ethernet/sfc/ef100_nic.c:713
│
.revision = EFX_REV_EF100, │ .revision = EFX_REV_EF100,
.is_vf = true, │ .is_vf = false,
.probe = ef100_probe_vf, │ .probe = ef100_probe_pf,
.offload_features = EF100_OFFLOAD_FEATURES, │ .offload_features = EF100_OFFLOAD_FEATURES,
.mcdi_max_ver = 2, │ .mcdi_max_ver = 2,
.mcdi_request = ef100_mcdi_request, │ .mcdi_request = ef100_mcdi_request,
.mcdi_poll_response = ef100_mcdi_poll_response, │ .mcdi_poll_response = ef100_mcdi_poll_response,
.mcdi_read_response = ef100_mcdi_read_response, │ .mcdi_read_response = ef100_mcdi_read_response,
.mcdi_poll_reboot = ef100_mcdi_poll_reboot, │ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
.mcdi_reboot_detected = ef100_mcdi_reboot_detected, │ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
.irq_enable_master = efx_port_dummy_op_void, │ .irq_enable_master = efx_port_dummy_op_void,
.irq_test_generate = efx_ef100_irq_test_generate, │ .irq_test_generate = efx_ef100_irq_test_generate,
.irq_disable_non_ev = efx_port_dummy_op_void, │ .irq_disable_non_ev = efx_port_dummy_op_void,
.push_irq_moderation = efx_channel_dummy_op_void, │ .push_irq_moderation = efx_channel_dummy_op_void,
.min_interrupt_mode = EFX_INT_MODE_MSIX, │ .min_interrupt_mode = EFX_INT_MODE_MSIX,
.map_reset_reason = ef100_map_reset_reason, │ .map_reset_reason = ef100_map_reset_reason,
.map_reset_flags = ef100_map_reset_flags, │ .map_reset_flags = ef100_map_reset_flags,
.reset = ef100_reset, │ .reset = ef100_reset,
│
.check_caps = ef100_check_caps, │ .check_caps = ef100_check_caps,
│
.ev_probe = ef100_ev_probe, │ .ev_probe = ef100_ev_probe,
.ev_init = ef100_ev_init, │ .ev_init = ef100_ev_init,
.ev_fini = efx_mcdi_ev_fini, │ .ev_fini = efx_mcdi_ev_fini,
.ev_remove = efx_mcdi_ev_remove, │ .ev_remove = efx_mcdi_ev_remove,
.irq_handle_msi = ef100_msi_interrupt, │ .irq_handle_msi = ef100_msi_interrupt,
.ev_process = ef100_ev_process, │ .ev_process = ef100_ev_process,
.ev_read_ack = ef100_ev_read_ack, │ .ev_read_ack = ef100_ev_read_ack,
.ev_test_generate = efx_ef100_ev_test_generate, │ .ev_test_generate = efx_ef100_ev_test_generate,
.tx_probe = ef100_tx_probe, │ .tx_probe = ef100_tx_probe,
.tx_init = ef100_tx_init, │ .tx_init = ef100_tx_init,
.tx_write = ef100_tx_write, │ .tx_write = ef100_tx_write,
.tx_enqueue = ef100_enqueue_skb, │ .tx_enqueue = ef100_enqueue_skb,
.rx_probe = efx_mcdi_rx_probe, │ .rx_probe = efx_mcdi_rx_probe,
.rx_init = efx_mcdi_rx_init, │ .rx_init = efx_mcdi_rx_init,
.rx_remove = efx_mcdi_rx_remove, │ .rx_remove = efx_mcdi_rx_remove,
.rx_write = ef100_rx_write, │ .rx_write = ef100_rx_write,
.rx_packet = __ef100_rx_packet, │ .rx_packet = __ef100_rx_packet,
.rx_buf_hash_valid = ef100_rx_buf_hash_valid, │ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
.fini_dmaq = efx_fini_dmaq, │ .fini_dmaq = efx_fini_dmaq,
.max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, │ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.filter_table_probe = ef100_filter_table_up, │ .filter_table_probe = ef100_filter_table_up,
.filter_table_restore = efx_mcdi_filter_table_restore, │ .filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = ef100_filter_table_down, │ .filter_table_remove = ef100_filter_table_down,
.filter_insert = efx_mcdi_filter_insert, │ .filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe, │ .filter_remove_safe = efx_mcdi_filter_remove_safe,
.filter_get_safe = efx_mcdi_filter_get_safe, │ .filter_get_safe = efx_mcdi_filter_get_safe,
.filter_clear_rx = efx_mcdi_filter_clear_rx, │ .filter_clear_rx = efx_mcdi_filter_clear_rx,
.filter_count_rx_used = efx_mcdi_filter_count_rx_used, │ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
.filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, │ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, │ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL │ #ifdef CONFIG_RFS_ACCEL
.filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, │ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
#endif │ #endif
│
│ .get_phys_port_id = efx_ef100_get_phys_port_id,
│
.rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN, │ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
.rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8, │ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
.rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8, │ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
.rx_hash_key_size = 40, │ .rx_hash_key_size = 40,
.rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, │ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
.rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, │ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
│ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
│ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
.rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, │ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
.rx_recycle_ring_size = efx_ef100_recycle_ring_size, │ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
│
.reconfigure_mac = ef100_reconfigure_mac, │ .reconfigure_mac = ef100_reconfigure_mac,
│ .reconfigure_port = efx_mcdi_port_reconfigure,
.test_nvram = efx_new_mcdi_nvram_test_all, │ .test_nvram = efx_new_mcdi_nvram_test_all,
.describe_stats = ef100_describe_stats, │ .describe_stats = ef100_describe_stats,
.start_stats = efx_mcdi_mac_start_stats, │ .start_stats = efx_mcdi_mac_start_stats,
.update_stats = ef100_update_stats, │ .update_stats = ef100_update_stats,
.pull_stats = efx_mcdi_mac_pull_stats, │ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats, │ .stop_stats = efx_mcdi_mac_stop_stats,
│
│ /* Per-type bar/size configuration not used on ef100. Location of
│ * registers is defined by extended capabilities.
│ */
.mem_bar = NULL, │ .mem_bar = NULL,
.mem_map_size = NULL, │ .mem_map_size = NULL,
│
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7996 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8065
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:2033 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:1965
│
bool is_full = false; │ bool is_full = false;
│
if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) │ if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) &&
│ gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT)
return -EINVAL; │ return -EINVAL;
│
spec->priority = gen_spec->priority; │ spec->priority = gen_spec->priority;
spec->flags = gen_spec->flags; │ spec->flags = gen_spec->flags;
spec->dmaq_id = gen_spec->dmaq_id; │ spec->dmaq_id = gen_spec->dmaq_id;
│
switch (gen_spec->match_flags) { │ switch (gen_spec->match_flags) {
case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | │ case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | │ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): │ EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
is_full = true; │ is_full = true;
fallthrough; │ fallthrough;
case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | │ case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { │ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
__be32 rhost, host1, host2; │ __be32 rhost, host1, host2;
__be16 rport, port1, port2; │ __be16 rport, port1, port2;
│
EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); │ EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX));
│
if (gen_spec->ether_type != htons(ETH_P_IP)) │ if (gen_spec->ether_type != htons(ETH_P_IP))
return -EPROTONOSUPPORT; │ return -EPROTONOSUPPORT;
if (gen_spec->loc_port == 0 || │ if (gen_spec->loc_port == 0 ||
(is_full && gen_spec->rem_port == 0)) │ (is_full && gen_spec->rem_port == 0))
return -EADDRNOTAVAIL; │ return -EADDRNOTAVAIL;
switch (gen_spec->ip_proto) { │ switch (gen_spec->ip_proto) {
case IPPROTO_TCP: │ case IPPROTO_TCP:
spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : │ spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL :
EFX_FARCH_FILTER_TCP_WILD); │ EF4_FARCH_FILTER_TCP_WILD);
break; │ break;
case IPPROTO_UDP: │ case IPPROTO_UDP:
spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : │ spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL :
EFX_FARCH_FILTER_UDP_WILD); │ EF4_FARCH_FILTER_UDP_WILD);
break; │ break;
default: │ default:
return -EPROTONOSUPPORT; │ return -EPROTONOSUPPORT;
} │ }
│
/* Filter is constructed in terms of source and destination, │ /* Filter is constructed in terms of source and destination,
* with the odd wrinkle that the ports are swapped in a UDP │ * with the odd wrinkle that the ports are swapped in a UDP
* wildcard filter. We need to convert from local and remote │ * wildcard filter. We need to convert from local and remote
* (= zero for wildcard) addresses. │ * (= zero for wildcard) addresses.
*/ │ */
rhost = is_full ? gen_spec->rem_host[0] : 0; │ rhost = is_full ? gen_spec->rem_host[0] : 0;
rport = is_full ? gen_spec->rem_port : 0; │ rport = is_full ? gen_spec->rem_port : 0;
host1 = rhost; │ host1 = rhost;
host2 = gen_spec->loc_host[0]; │ host2 = gen_spec->loc_host[0];
if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { │ if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
port1 = gen_spec->loc_port; │ port1 = gen_spec->loc_port;
port2 = rport; │ port2 = rport;
} else { │ } else {
port1 = rport; │ port1 = rport;
port2 = gen_spec->loc_port; │ port2 = gen_spec->loc_port;
} │ }
spec->data[0] = ntohl(host1) << 16 | ntohs(port1); │ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; │ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
spec->data[2] = ntohl(host2); │ spec->data[2] = ntohl(host2);
│
break; │ break;
} │ }
│
case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: │ case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
is_full = true; │ is_full = true;
fallthrough; │ fallthrough;
case EFX_FILTER_MATCH_LOC_MAC: │ case EF4_FILTER_MATCH_LOC_MAC:
spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : │ spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
EFX_FARCH_FILTER_MAC_WILD); │ EF4_FARCH_FILTER_MAC_WILD);
spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; │ spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
spec->data[1] = (gen_spec->loc_mac[2] << 24 | │ spec->data[1] = (gen_spec->loc_mac[2] << 24 |
gen_spec->loc_mac[3] << 16 | │ gen_spec->loc_mac[3] << 16 |
gen_spec->loc_mac[4] << 8 | │ gen_spec->loc_mac[4] << 8 |
gen_spec->loc_mac[5]); │ gen_spec->loc_mac[5]);
spec->data[2] = (gen_spec->loc_mac[0] << 8 | │ spec->data[2] = (gen_spec->loc_mac[0] << 8 |
gen_spec->loc_mac[1]); │ gen_spec->loc_mac[1]);
break; │ break;
│
case EFX_FILTER_MATCH_LOC_MAC_IG: │ case EF4_FILTER_MATCH_LOC_MAC_IG:
spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? │ spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
EFX_FARCH_FILTER_MC_DEF : │ EF4_FARCH_FILTER_MC_DEF :
EFX_FARCH_FILTER_UC_DEF); │ EF4_FARCH_FILTER_UC_DEF);
memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ │ memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
break; │ break;
│
default: │ default:
return -EPROTONOSUPPORT; │ return -EPROTONOSUPPORT;
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/octeon_device.c:310 │ linux/drivers/net/ethernet/cavium/liquidio/octeon_device.c:36
│
.card_type = LIO_210NV, │ .card_type = LIO_210SV,
.card_name = LIO_210NV_NAME, │ .card_name = LIO_210SV_NAME,
│
/** IQ attributes */ │ /** IQ attributes */
│
.iq = { │ .iq = {
.max_iqs = CN6XXX_CFG_IO_QUEUES, │ .max_iqs = CN6XXX_CFG_IO_QUEUES,
.pending_list_size = │ .pending_list_size =
(CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), │ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
.instr_type = OCTEON_64BYTE_INSTR, │ .instr_type = OCTEON_64BYTE_INSTR,
.db_min = CN6XXX_DB_MIN, │ .db_min = CN6XXX_DB_MIN,
.db_timeout = CN6XXX_DB_TIMEOUT, │ .db_timeout = CN6XXX_DB_TIMEOUT,
} │ }
, │ ,
│
/** OQ attributes */ │ /** OQ attributes */
.oq = { │ .oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES, │ .max_oqs = CN6XXX_CFG_IO_QUEUES,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, │ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT, │ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME, │ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
.pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, │ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
} │ }
, │ ,
│
.num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV, │ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX,
.num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, │ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
.num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, │ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
.def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, │ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
│
│ /* For ethernet interface 0: Port cfg Attributes */
.nic_if_cfg[0] = { │ .nic_if_cfg[0] = {
/* Max Txqs: Half for each of the two ports :max_iq/2 */ │ /* Max Txqs: Half for each of the two ports :max_iq/2 */
.max_txqs = MAX_TXQS_PER_INTF, │ .max_txqs = MAX_TXQS_PER_INTF,
│
/* Actual configured value. Range could be: 1...max_txqs */ │ /* Actual configured value. Range could be: 1...max_txqs */
.num_txqs = DEF_TXQS_PER_INTF, │ .num_txqs = DEF_TXQS_PER_INTF,
│
/* Max Rxqs: Half for each of the two ports :max_oq/2 */ │ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
.max_rxqs = MAX_RXQS_PER_INTF, │ .max_rxqs = MAX_RXQS_PER_INTF,
│
/* Actual configured value. Range could be: 1...max_rxqs */ │ /* Actual configured value. Range could be: 1...max_rxqs */
.num_rxqs = DEF_RXQS_PER_INTF, │ .num_rxqs = DEF_RXQS_PER_INTF,
│
/* Num of desc for rx rings */ │ /* Num of desc for rx rings */
.num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, │ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
│
/* Num of desc for tx rings */ │ /* Num of desc for tx rings */
.num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, │ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
│
/* SKB size, We need not change buf size even for Jumbo frames. │ /* SKB size, We need not change buf size even for Jumbo frames.
* Octeon can send jumbo frames in 4 consecutive descriptors, │ * Octeon can send jumbo frames in 4 consecutive descriptors,
*/ │ */
.rx_buf_size = CN6XXX_OQ_BUF_SIZE, │ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
│
.base_queue = BASE_QUEUE_NOT_REQUESTED, │ .base_queue = BASE_QUEUE_NOT_REQUESTED,
│
.gmx_port_id = 0, │ .gmx_port_id = 0,
}, │ },
│
.nic_if_cfg[1] = { │ .nic_if_cfg[1] = {
/* Max Txqs: Half for each of the two ports :max_iq/2 */ │ /* Max Txqs: Half for each of the two ports :max_iq/2 */
.max_txqs = MAX_TXQS_PER_INTF, │ .max_txqs = MAX_TXQS_PER_INTF,
│
/* Actual configured value. Range could be: 1...max_txqs */ │ /* Actual configured value. Range could be: 1...max_txqs */
.num_txqs = DEF_TXQS_PER_INTF, │ .num_txqs = DEF_TXQS_PER_INTF,
│
/* Max Rxqs: Half for each of the two ports :max_oq/2 */ │ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
.max_rxqs = MAX_RXQS_PER_INTF, │ .max_rxqs = MAX_RXQS_PER_INTF,
│
/* Actual configured value. Range could be: 1...max_rxqs */ │ /* Actual configured value. Range could be: 1...max_rxqs */
.num_rxqs = DEF_RXQS_PER_INTF, │ .num_rxqs = DEF_RXQS_PER_INTF,
│
/* Num of desc for rx rings */ │ /* Num of desc for rx rings */
.num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, │ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
│
/* Num of desc for tx rings */ │ /* Num of desc for tx rings */
.num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, │ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
│
/* SKB size, We need not change buf size even for Jumbo frames. │ /* SKB size, We need not change buf size even for Jumbo frames.
* Octeon can send jumbo frames in 4 consecutive descriptors, │ * Octeon can send jumbo frames in 4 consecutive descriptors,
*/ │ */
.rx_buf_size = CN6XXX_OQ_BUF_SIZE, │ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
│
.base_queue = BASE_QUEUE_NOT_REQUESTED, │ .base_queue = BASE_QUEUE_NOT_REQUESTED,
│
.gmx_port_id = 1, │ .gmx_port_id = 1,
}, │ },
│
/** Miscellaneous attributes */ │ /** Miscellaneous attributes */
.misc = { │ .misc = {
/* Host driver link query interval */ │ /* Host driver link query interval */
.oct_link_query_interval = 100, │ .oct_link_query_interval = 100,
│
/* Octeon link query interval */ │ /* Octeon link query interval */
.host_link_query_interval = 500, │ .host_link_query_interval = 500,
│
.enable_sli_oq_bp = 0, │ .enable_sli_oq_bp = 0,
│
/* Control queue group */ │ /* Control queue group */
.ctrlq_grp = 1, │ .ctrlq_grp = 1,
} │ }
, │ ,
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:1934 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:1866
│
struct efx_farch_filter_state *state = efx->filter_state; │ struct ef4_farch_filter_state *state = efx->filter_state;
struct efx_farch_filter_table *table; │ struct ef4_farch_filter_table *table;
efx_oword_t filter_ctl; │ ef4_oword_t filter_ctl;
│
efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); │ ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
│
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; │ table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, │ EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + │ table->search_limit[EF4_FARCH_FILTER_TCP_FULL] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); │ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, │ EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + │ table->search_limit[EF4_FARCH_FILTER_TCP_WILD] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); │ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, │ EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + │ table->search_limit[EF4_FARCH_FILTER_UDP_FULL] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); │ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, │ EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + │ table->search_limit[EF4_FARCH_FILTER_UDP_WILD] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); │ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
│
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; │ table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC];
if (table->size) { │ if (table->size) {
EFX_SET_OWORD_FIELD( │ EF4_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, │ filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + │ table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); │ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD( │ EF4_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, │ filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + │ table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); │ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
} │ }
│
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; │ table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
if (table->size) { │ if (table->size) {
EFX_SET_OWORD_FIELD( │ EF4_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, │ filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); │ table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
EFX_SET_OWORD_FIELD( │ EF4_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, │ filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & │ !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS)); │ EF4_FILTER_FLAG_RX_RSS));
EFX_SET_OWORD_FIELD( │ EF4_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, │ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); │ table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
EFX_SET_OWORD_FIELD( │ EF4_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, │ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & │ !!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS)); │ EF4_FILTER_FLAG_RX_RSS));
│
/* There is a single bit to enable RX scatter for all │ /* There is a single bit to enable RX scatter for all
* unmatched packets. Only set it if scatter is │ * unmatched packets. Only set it if scatter is
* enabled in both filter specs. │ * enabled in both filter specs.
*/ │ */
EFX_SET_OWORD_FIELD( │ EF4_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, │ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & │ !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & │ table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_SCATTER)); │ EF4_FILTER_FLAG_RX_SCATTER));
} else { │ } else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
/* We don't expose 'default' filters because unmatched │ /* We don't expose 'default' filters because unmatched
* packets always go to the queue number found in the │ * packets always go to the queue number found in the
* RSS table. But we still need to set the RX scatter │ * RSS table. But we still need to set the RX scatter
* bit here. │ * bit here.
*/ │ */
EFX_SET_OWORD_FIELD( │ EF4_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, │ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
efx->rx_scatter); │ efx->rx_scatter);
} │ }
│
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); │ ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
} │
next prev up linux/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c:1104 │ linux/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c:667
│
unsigned int i = 0U; │ unsigned int i = 0U;
u32 itr_tx = 2U; │ u32 itr_tx = 2U;
u32 itr_rx = 2U; │ u32 itr_rx = 2U;
│
switch (self->aq_nic_cfg->itr) { │ switch (self->aq_nic_cfg->itr) {
case AQ_CFG_INTERRUPT_MODERATION_ON: │ case AQ_CFG_INTERRUPT_MODERATION_ON:
case AQ_CFG_INTERRUPT_MODERATION_AUTO: │ case AQ_CFG_INTERRUPT_MODERATION_AUTO:
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U); │ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
hw_atl_tdm_tdm_intr_moder_en_set(self, 1U); │ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U); │ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
hw_atl_rdm_rdm_intr_moder_en_set(self, 1U); │ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
│
if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { │ if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
/* HW timers are in 2us units */ │ /* HW timers are in 2us units */
int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; │ int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
int tx_min_timer = tx_max_timer / 2; │ int tx_min_timer = tx_max_timer / 2;
│
int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; │ int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
int rx_min_timer = rx_max_timer / 2; │ int rx_min_timer = rx_max_timer / 2;
│
tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer); │ tx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer); │ tx_max_timer);
rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer); │ tx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer); │ tx_min_timer);
│ rx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
│ rx_max_timer);
│ rx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
│ rx_min_timer);
│
itr_tx |= tx_min_timer << 0x8U; │ itr_tx |= tx_min_timer << 0x8U;
itr_tx |= tx_max_timer << 0x10U; │ itr_tx |= tx_max_timer << 0x10U;
itr_rx |= rx_min_timer << 0x8U; │ itr_rx |= rx_min_timer << 0x8U;
itr_rx |= rx_max_timer << 0x10U; │ itr_rx |= rx_max_timer << 0x10U;
} else { │ } else {
static unsigned int hw_atl_b0_timers_table_tx_[][2] = { │ static unsigned int hw_atl2_timers_table_tx_[][2] = {
{0xfU, 0xffU}, /* 10Gbit */ │ {0xfU, 0xffU}, /* 10Gbit */
{0xfU, 0x1ffU}, /* 5Gbit */ │ {0xfU, 0x1ffU}, /* 5Gbit */
{0xfU, 0x1ffU}, /* 5Gbit 5GS */ │ {0xfU, 0x1ffU}, /* 5Gbit 5GS */
{0xfU, 0x1ffU}, /* 2.5Gbit */ │ {0xfU, 0x1ffU}, /* 2.5Gbit */
{0xfU, 0x1ffU}, /* 1Gbit */ │ {0xfU, 0x1ffU}, /* 1Gbit */
{0xfU, 0x1ffU}, /* 100Mbit */ │ {0xfU, 0x1ffU}, /* 100Mbit */
}; │ };
│ static unsigned int hw_atl2_timers_table_rx_[][2] = {
static unsigned int hw_atl_b0_timers_table_rx_[][2] = { │
{0x6U, 0x38U},/* 10Gbit */ │ {0x6U, 0x38U},/* 10Gbit */
{0xCU, 0x70U},/* 5Gbit */ │ {0xCU, 0x70U},/* 5Gbit */
{0xCU, 0x70U},/* 5Gbit 5GS */ │ {0xCU, 0x70U},/* 5Gbit 5GS */
{0x18U, 0xE0U},/* 2.5Gbit */ │ {0x18U, 0xE0U},/* 2.5Gbit */
{0x30U, 0x80U},/* 1Gbit */ │ {0x30U, 0x80U},/* 1Gbit */
{0x4U, 0x50U},/* 100Mbit */ │ {0x4U, 0x50U},/* 100Mbit */
}; │ };
│ unsigned int mbps = self->aq_link_status.mbps;
│ unsigned int speed_index;
│
unsigned int speed_index = │ speed_index = hw_atl_utils_mbps_2_speed_index(mbps);
hw_atl_utils_mbps_2_speed_index( │
self->aq_link_status.mbps); │
│
/* Update user visible ITR settings */ │ /* Update user visible ITR settings */
self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_ │ self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_
[speed_index][1] * 2; │ [speed_index][1] * 2;
self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_ │ self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_
[speed_index][1] * 2; │ [speed_index][1] * 2;
│
itr_tx |= hw_atl_b0_timers_table_tx_ │ itr_tx |= hw_atl2_timers_table_tx_
[speed_index][0] << 0x8U; │ [speed_index][0] << 0x8U;
itr_tx |= hw_atl_b0_timers_table_tx_ │ itr_tx |= hw_atl2_timers_table_tx_
[speed_index][1] << 0x10U; │ [speed_index][1] << 0x10U;
│
itr_rx |= hw_atl_b0_timers_table_rx_ │ itr_rx |= hw_atl2_timers_table_rx_
[speed_index][0] << 0x8U; │ [speed_index][0] << 0x8U;
itr_rx |= hw_atl_b0_timers_table_rx_ │ itr_rx |= hw_atl2_timers_table_rx_
[speed_index][1] << 0x10U; │ [speed_index][1] << 0x10U;
} │ }
break; │ break;
case AQ_CFG_INTERRUPT_MODERATION_OFF: │ case AQ_CFG_INTERRUPT_MODERATION_OFF:
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); │ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_tdm_tdm_intr_moder_en_set(self, 0U); │ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); │ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_rdm_rdm_intr_moder_en_set(self, 0U); │ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
itr_tx = 0U; │ itr_tx = 0U;
itr_rx = 0U; │ itr_rx = 0U;
break; │ break;
} │ }
│
for (i = HW_ATL_B0_RINGS_MAX; i--;) { │ for (i = HW_ATL2_RINGS_MAX; i--;) {
hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i); │ hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i); │ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
} │ }
│
return aq_hw_err_from_flags(self); │ return aq_hw_err_from_flags(self);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c:342 │ linux/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c:610
│
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); │ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key; │ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct mlx5_pkt_reformat_params reformat_params; │ struct mlx5_pkt_reformat_params reformat_params;
TC_TUN_ROUTE_ATTR_INIT(attr); │ TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size; │ struct ipv6hdr *ip6h;
│ int ipv6_encap_size;
char *encap_header; │ char *encap_header;
struct iphdr *ip; │
u8 nud_state; │ u8 nud_state;
int err; │ int err;
│
/* add the IP fields */ │
attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK; │
attr.fl.fl4.daddr = tun_key->u.ipv4.dst; │
attr.fl.fl4.saddr = tun_key->u.ipv4.src; │
attr.ttl = tun_key->ttl; │ attr.ttl = tun_key->ttl;
│
err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr); │ attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
│ attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
│ attr.fl.fl6.saddr = tun_key->u.ipv6.src;
│
│ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr);
if (err) │ if (err)
return err; │ return err;
│
ipv4_encap_size = │ ipv6_encap_size =
(is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + │ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct iphdr) + │ sizeof(struct ipv6hdr) +
e->tunnel->calc_hlen(e); │ e->tunnel->calc_hlen(e);
│
if (max_encap_size < ipv4_encap_size) { │ if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n │ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n
ipv4_encap_size, max_encap_size); │ ipv6_encap_size, max_encap_size);
err = -EOPNOTSUPP; │ err = -EOPNOTSUPP;
goto release_neigh; │ goto release_neigh;
} │ }
│
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); │ encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
if (!encap_header) { │ if (!encap_header) {
err = -ENOMEM; │ err = -ENOMEM;
goto release_neigh; │ goto release_neigh;
} │ }
│
e->route_dev_ifindex = attr.route_dev->ifindex; │ e->route_dev_ifindex = attr.route_dev->ifindex;
│
read_lock_bh(&attr.n->lock); │ read_lock_bh(&attr.n->lock);
nud_state = attr.n->nud_state; │ nud_state = attr.n->nud_state;
ether_addr_copy(e->h_dest, attr.n->ha); │ ether_addr_copy(e->h_dest, attr.n->ha);
WRITE_ONCE(e->nhe->neigh_dev, attr.n->dev); │ WRITE_ONCE(e->nhe->neigh_dev, attr.n->dev);
read_unlock_bh(&attr.n->lock); │ read_unlock_bh(&attr.n->lock);
│
/* add ethernet header */ │ /* add ethernet header */
ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, │ ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
ETH_P_IP); │ ETH_P_IPV6);
│
/* add ip header */ │ /* add ip header */
ip->tos = tun_key->tos; │ ip6_flow_hdr(ip6h, tun_key->tos, 0);
ip->version = 0x4; │ /* the HW fills up ipv6 payload len */
ip->ihl = 0x5; │ ip6h->hop_limit = attr.ttl;
ip->ttl = attr.ttl; │ ip6h->daddr = attr.fl.fl6.daddr;
ip->daddr = attr.fl.fl4.daddr; │ ip6h->saddr = attr.fl.fl6.saddr;
ip->saddr = attr.fl.fl4.saddr; │
│
/* add tunneling protocol header */ │ /* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr), │ err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
&ip->protocol, e); │ &ip6h->nexthdr, e);
if (err) │ if (err)
goto free_encap; │ goto free_encap;
│
e->encap_size = ipv4_encap_size; │ e->encap_size = ipv6_encap_size;
kfree(e->encap_header); │ kfree(e->encap_header);
e->encap_header = encap_header; │ e->encap_header = encap_header;
│
if (!(nud_state & NUD_VALID)) { │ if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL); │ neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event │ /* the encap entry will be made valid on neigh update event
* and not used before that. │ * and not used before that.
*/ │ */
goto release_neigh; │ goto release_neigh;
} │ }
│
memset(&reformat_params, 0, sizeof(reformat_params)); │ memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type; │ reformat_params.type = e->reformat_type;
reformat_params.size = ipv4_encap_size; │ reformat_params.size = ipv6_encap_size;
reformat_params.data = encap_header; │ reformat_params.data = encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, │ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB); │ MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) { │ if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat); │ err = PTR_ERR(e->pkt_reformat);
goto free_encap; │ goto free_encap;
} │ }
│
e->flags |= MLX5_ENCAP_ENTRY_VALID; │ e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); │ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr); │ mlx5e_route_lookup_ipv6_put(&attr);
return err; │ return err;
│
free_encap: │ free_encap:
kfree(encap_header); │ kfree(encap_header);
release_neigh: │ release_neigh:
mlx5e_route_lookup_ipv4_put(&attr); │ mlx5e_route_lookup_ipv6_put(&attr);
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10739 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8065
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10739 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7996
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/sfc/rx.c:123 │ linux/drivers/net/ethernet/sfc/falcon/rx.c:532
│
struct efx_nic *efx = rx_queue->efx; │ struct ef4_nic *efx = rx_queue->efx;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue); │ struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf; │ struct ef4_rx_buffer *rx_buf;
│
rx_queue->rx_packets++; │ rx_queue->rx_packets++;
│
rx_buf = efx_rx_buffer(rx_queue, index); │ rx_buf = ef4_rx_buffer(rx_queue, index);
rx_buf->flags |= flags; │ rx_buf->flags |= flags;
│
/* Validate the number of fragments and completed length */ │ /* Validate the number of fragments and completed length */
if (n_frags == 1) { │ if (n_frags == 1) {
if (!(flags & EFX_RX_PKT_PREFIX_LEN)) │ if (!(flags & EF4_RX_PKT_PREFIX_LEN))
efx_rx_packet__check_len(rx_queue, rx_buf, len); │ ef4_rx_packet__check_len(rx_queue, rx_buf, len);
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || │ } else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) ||
unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || │ unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
unlikely(len > n_frags * efx->rx_dma_len) || │ unlikely(len > n_frags * efx->rx_dma_len) ||
unlikely(!efx->rx_scatter)) { │ unlikely(!efx->rx_scatter)) {
/* If this isn't an explicit discard request, either │ /* If this isn't an explicit discard request, either
* the hardware or the driver is broken. │ * the hardware or the driver is broken.
*/ │ */
WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); │ WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
rx_buf->flags |= EFX_RX_PKT_DISCARD; │ rx_buf->flags |= EF4_RX_PKT_DISCARD;
} │ }
│
netif_vdbg(efx, rx_status, efx->net_dev, │ netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received ids %x-%x len %d %s%s\n", │ "RX queue %d received ids %x-%x len %d %s%s\n",
efx_rx_queue_index(rx_queue), index, │ ef4_rx_queue_index(rx_queue), index,
(index + n_frags - 1) & rx_queue->ptr_mask, len, │ (index + n_frags - 1) & rx_queue->ptr_mask, len,
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", │ (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); │ (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
│
/* Discard packet, if instructed to do so. Process the │ /* Discard packet, if instructed to do so. Process the
* previous receive first. │ * previous receive first.
*/ │ */
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { │ if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
efx_rx_flush_packet(channel); │ ef4_rx_flush_packet(channel);
efx_discard_rx_packet(channel, rx_buf, n_frags); │ ef4_discard_rx_packet(channel, rx_buf, n_frags);
return; │ return;
} │ }
│
if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) │ if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN))
rx_buf->len = len; │ rx_buf->len = len;
│
/* Release and/or sync the DMA mapping - assumes all RX buffers │ /* Release and/or sync the DMA mapping - assumes all RX buffers
* consumed in-order per RX queue. │ * consumed in-order per RX queue.
*/ │ */
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); │ ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
│
/* Prefetch nice and early so data will (hopefully) be in cache by │ /* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it. │ * the time we look at it.
*/ │ */
prefetch(efx_rx_buf_va(rx_buf)); │ prefetch(ef4_rx_buf_va(rx_buf));
│
rx_buf->page_offset += efx->rx_prefix_size; │ rx_buf->page_offset += efx->rx_prefix_size;
rx_buf->len -= efx->rx_prefix_size; │ rx_buf->len -= efx->rx_prefix_size;
│
if (n_frags > 1) { │ if (n_frags > 1) {
/* Release/sync DMA mapping for additional fragments. │ /* Release/sync DMA mapping for additional fragments.
* Fix length for last fragment. │ * Fix length for last fragment.
*/ │ */
unsigned int tail_frags = n_frags - 1; │ unsigned int tail_frags = n_frags - 1;
│
for (;;) { │ for (;;) {
rx_buf = efx_rx_buf_next(rx_queue, rx_buf); │ rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
if (--tail_frags == 0) │ if (--tail_frags == 0)
break; │ break;
efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); │ ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
} │ }
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; │ rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); │ ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
} │ }
│
/* All fragments have been DMA-synced, so recycle pages. */ │ /* All fragments have been DMA-synced, so recycle pages. */
rx_buf = efx_rx_buffer(rx_queue, index); │ rx_buf = ef4_rx_buffer(rx_queue, index);
efx_recycle_rx_pages(channel, rx_buf, n_frags); │ ef4_recycle_rx_pages(channel, rx_buf, n_frags);
│
/* Pipeline receives so that we give time for packet headers to be │ /* Pipeline receives so that we give time for packet headers to be
* prefetched into cache. │ * prefetched into cache.
*/ │ */
efx_rx_flush_packet(channel); │ ef4_rx_flush_packet(channel);
channel->rx_pkt_n_frags = n_frags; │ channel->rx_pkt_n_frags = n_frags;
channel->rx_pkt_index = index; │ channel->rx_pkt_index = index;
} │
next prev up linux/drivers/net/ethernet/sfc/selftest.c:283 │ linux/drivers/net/ethernet/sfc/falcon/selftest.c:285
│
struct efx_loopback_state *state = efx->loopback_selftest; │ struct ef4_loopback_state *state = efx->loopback_selftest;
struct efx_loopback_payload *received; │ struct ef4_loopback_payload *received;
struct efx_loopback_payload *payload; │ struct ef4_loopback_payload *payload;
│
BUG_ON(!buf_ptr); │ BUG_ON(!buf_ptr);
│
/* If we are just flushing, then drop the packet */ │ /* If we are just flushing, then drop the packet */
if ((state == NULL) || state->flush) │ if ((state == NULL) || state->flush)
return; │ return;
│
payload = &state->payload; │ payload = &state->payload;
│
received = (struct efx_loopback_payload *) buf_ptr; │ received = (struct ef4_loopback_payload *) buf_ptr;
received->ip.saddr = payload->ip.saddr; │ received->ip.saddr = payload->ip.saddr;
if (state->offload_csum) │ if (state->offload_csum)
received->ip.check = payload->ip.check; │ received->ip.check = payload->ip.check;
│
/* Check that header exists */ │ /* Check that header exists */
if (pkt_len < sizeof(received->header)) { │ if (pkt_len < sizeof(received->header)) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"saw runt RX packet (length %d) in %s loopback " │ "saw runt RX packet (length %d) in %s loopback "
"test\n", pkt_len, LOOPBACK_MODE(efx)); │ "test\n", pkt_len, LOOPBACK_MODE(efx));
goto err; │ goto err;
} │ }
│
/* Check that the ethernet header exists */ │ /* Check that the ethernet header exists */
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { │ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"saw non-loopback RX packet in %s loopback test\n", │ "saw non-loopback RX packet in %s loopback test\n",
LOOPBACK_MODE(efx)); │ LOOPBACK_MODE(efx));
goto err; │ goto err;
} │ }
│
/* Check packet length */ │ /* Check packet length */
if (pkt_len != sizeof(*payload)) { │ if (pkt_len != sizeof(*payload)) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"saw incorrect RX packet length %d (wanted %d) in " │ "saw incorrect RX packet length %d (wanted %d) in "
"%s loopback test\n", pkt_len, (int)sizeof(*payload), │ "%s loopback test\n", pkt_len, (int)sizeof(*payload),
LOOPBACK_MODE(efx)); │ LOOPBACK_MODE(efx));
goto err; │ goto err;
} │ }
│
/* Check that IP header matches */ │ /* Check that IP header matches */
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { │ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"saw corrupted IP header in %s loopback test\n", │ "saw corrupted IP header in %s loopback test\n",
LOOPBACK_MODE(efx)); │ LOOPBACK_MODE(efx));
goto err; │ goto err;
} │ }
│
/* Check that msg and padding matches */ │ /* Check that msg and padding matches */
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { │ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"saw corrupted RX packet in %s loopback test\n", │ "saw corrupted RX packet in %s loopback test\n",
LOOPBACK_MODE(efx)); │ LOOPBACK_MODE(efx));
goto err; │ goto err;
} │ }
│
/* Check that iteration matches */ │ /* Check that iteration matches */
if (received->iteration != payload->iteration) { │ if (received->iteration != payload->iteration) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"saw RX packet from iteration %d (wanted %d) in " │ "saw RX packet from iteration %d (wanted %d) in "
"%s loopback test\n", ntohs(received->iteration), │ "%s loopback test\n", ntohs(received->iteration),
ntohs(payload->iteration), LOOPBACK_MODE(efx)); │ ntohs(payload->iteration), LOOPBACK_MODE(efx));
goto err; │ goto err;
} │ }
│
/* Increase correct RX count */ │ /* Increase correct RX count */
netif_vdbg(efx, drv, efx->net_dev, │ netif_vdbg(efx, drv, efx->net_dev,
"got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); │ "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
│
atomic_inc(&state->rx_good); │ atomic_inc(&state->rx_good);
return; │ return;
│
err: │ err:
#ifdef DEBUG │ #ifdef DEBUG
if (atomic_read(&state->rx_bad) == 0) { │ if (atomic_read(&state->rx_bad) == 0) {
netif_err(efx, drv, efx->net_dev, "received packet:\n"); │ netif_err(efx, drv, efx->net_dev, "received packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, │ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
buf_ptr, pkt_len, 0); │ buf_ptr, pkt_len, 0);
netif_err(efx, drv, efx->net_dev, "expected packet:\n"); │ netif_err(efx, drv, efx->net_dev, "expected packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, │ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
&state->payload, sizeof(state->payload), 0); │ &state->payload, sizeof(state->payload), 0);
} │ }
#endif │ #endif
atomic_inc(&state->rx_bad); │ atomic_inc(&state->rx_bad);
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:798 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8065
│
u8 reserved; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 rx_producers; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:798 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7996
│
u8 reserved; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 rx_producers; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:798 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10739
│
u8 reserved; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 rx_producers; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10086 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8065
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10086 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7996
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10086 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10739
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10086 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:798
│
u8 byte0; │ u8 reserved;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 │ #define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 │ #define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 │ #define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 │ #define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 │ #define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 │ #define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 │ #define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 │ #define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2; │ u8 flags2;
#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 │ #define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 │ #define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 │ #define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 │ #define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 │ #define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3; │ u8 flags3;
#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 │ #define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 │ #define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 │ #define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 │ #define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 │ #define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 │ #define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 │ #define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 │ #define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 │ #define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 rx_producers;
__le32 reg1; │ __le32 reg1;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4/sge.c:2027 │ linux/drivers/net/ethernet/chelsio/cxgb4vf/sge.c:1378
│
/* Write the skb's Scatter/Gather list into the TX Packet CPL │ /*
│ * Write the skb's Scatter/Gather list into the TX Packet CPL
* message and retain a pointer to the skb so we can free it │ * message and retain a pointer to the skb so we can free it
* later when its DMA completes. (We store the skb pointer │ * later when its DMA completes. (We store the skb pointer
* in the Software Descriptor corresponding to the last TX │ * in the Software Descriptor corresponding to the last TX
* Descriptor used by the Work Request.) │ * Descriptor used by the Work Request.)
* │ *
* The retained skb will be freed when the corresponding TX │ * The retained skb will be freed when the corresponding TX
* Descriptors are reclaimed after their DMAs complete. │ * Descriptors are reclaimed after their DMAs complete.
* However, this could take quite a while since, in general, │ * However, this could take quite a while since, in general,
* the hardware is set up to be lazy about sending DMA │ * the hardware is set up to be lazy about sending DMA
* completion notifications to us and we mostly perform TX │ * completion notifications to us and we mostly perform TX
* reclaims in the transmit routine. │ * reclaims in the transmit routine.
* │ *
* This is good for performamce but means that we rely on new │ * This is good for performamce but means that we rely on new
* TX packets arriving to run the destructors of completed │ * TX packets arriving to run the destructors of completed
* packets, which open up space in their sockets' send queues. │ * packets, which open up space in their sockets' send queues.
* Sometimes we do not get such new packets causing TX to │ * Sometimes we do not get such new packets causing TX to
* stall. A single UDP transmitter is a good example of this │ * stall. A single UDP transmitter is a good example of this
* situation. We have a clean up timer that periodically │ * situation. We have a clean up timer that periodically
* reclaims completed packets but it doesn't run often enough │ * reclaims completed packets but it doesn't run often enough
* (nor do we want it to) to prevent lengthy stalls. A │ * (nor do we want it to) to prevent lengthy stalls. A
* solution to this problem is to run the destructor early, │ * solution to this problem is to run the destructor early,
* after the packet is queued but before it's DMAd. A con is │ * after the packet is queued but before it's DMAd. A con is
* that we lie to socket memory accounting, but the amount of │ * that we lie to socket memory accounting, but the amount of
* extra memory is reasonable (limited by the number of TX │ * extra memory is reasonable (limited by the number of TX
* descriptors), the packets do actually get freed quickly by │ * descriptors), the packets do actually get freed quickly by
* new packets almost always, and for protocols like TCP that │ * new packets almost always, and for protocols like TCP that
* wait for acks to really free up the data the extra memory │ * wait for acks to really free up the data the extra memory
* is even less. On the positive side we run the destructors │ * is even less. On the positive side we run the destructors
* on the sending CPU rather than on a potentially different │ * on the sending CPU rather than on a potentially different
* completing CPU, usually a good thing. │ * completing CPU, usually a good thing.
* │ *
* Run the destructor before telling the DMA engine about the │ * Run the destructor before telling the DMA engine about the
* packet to make sure it doesn't complete and get freed │ * packet to make sure it doesn't complete and get freed
* prematurely. │ * prematurely.
*/ │ */
struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); │ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
struct sge_txq *tq = &txq->q; │ struct sge_txq *tq = &txq->q;
│ int last_desc;
│
/* If the Work Request header was an exact multiple of our TX │ /*
│ * If the Work Request header was an exact multiple of our TX
* Descriptor length, then it's possible that the starting SGL │ * Descriptor length, then it's possible that the starting SGL
* pointer lines up exactly with the end of our TX Descriptor │ * pointer lines up exactly with the end of our TX Descriptor
* ring. If that's the case, wrap around to the beginning │ * ring. If that's the case, wrap around to the beginning
* here ... │ * here ...
*/ │ */
if (unlikely((void *)sgl == (void *)tq->stat)) { │ if (unlikely((void *)sgl == (void *)tq->stat)) {
sgl = (void *)tq->desc; │ sgl = (void *)tq->desc;
end = (void *)((void *)tq->desc + │ end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
((void *)end - (void *)tq->stat)); │
} │ }
│
cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); │ write_sgl(skb, tq, sgl, end, 0, addr);
skb_orphan(skb); │ skb_orphan(skb);
sgl_sdesc->skb = skb; │
│ last_desc = tq->pidx + ndesc - 1;
│ if (last_desc >= tq->size)
│ last_desc -= tq->size;
│ tq->sdesc[last_desc].skb = skb;
│ tq->sdesc[last_desc].sgl = sgl;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:5887 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:7980
│
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; │ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
union { │ union {
struct iphdr *v4; │ struct iphdr *v4;
struct ipv6hdr *v6; │ struct ipv6hdr *v6;
unsigned char *hdr; │ unsigned char *hdr;
} ip; │ } ip;
union { │ union {
struct tcphdr *tcp; │ struct tcphdr *tcp;
struct udphdr *udp; │ struct udphdr *udp;
unsigned char *hdr; │ unsigned char *hdr;
} l4; │ } l4;
u32 paylen, l4_offset; │ u32 paylen, l4_offset;
│ u32 fceof_saidx = 0;
int err; │ int err;
│
if (skb->ip_summed != CHECKSUM_PARTIAL) │ if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0; │ return 0;
│
if (!skb_is_gso(skb)) │ if (!skb_is_gso(skb))
return 0; │ return 0;
│
err = skb_cow_head(skb, 0); │ err = skb_cow_head(skb, 0);
if (err < 0) │ if (err < 0)
return err; │ return err;
│
ip.hdr = skb_network_header(skb); │ if (eth_p_mpls(first->protocol))
│ ip.hdr = skb_inner_network_header(skb);
│ else
│ ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb); │ l4.hdr = skb_checksum_start(skb);
│
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ │ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? │ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP; │ IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
│
/* initialize outer IP header fields */ │ /* initialize outer IP header fields */
if (ip.v4->version == 4) { │ if (ip.v4->version == 4) {
unsigned char *csum_start = skb_checksum_start(skb); │ unsigned char *csum_start = skb_checksum_start(skb);
unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); │ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
│ int len = csum_start - trans_start;
│
/* IP header will have to cancel out any data that │ /* IP header will have to cancel out any data that
* is not a part of the outer IP header │ * is not a part of the outer IP header, so set to
│ * a reverse csum if needed, else init check to 0.
*/ │ */
ip.v4->check = csum_fold(csum_partial(trans_start, │ ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
csum_start - trans_start, │ csum_fold(csum_partial(trans_start,
0)); │ len, 0)) : 0;
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; │ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
│
ip.v4->tot_len = 0; │ ip.v4->tot_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO | │ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM | │ IXGBE_TX_FLAGS_CSUM |
IGB_TX_FLAGS_IPV4; │ IXGBE_TX_FLAGS_IPV4;
} else { │ } else {
ip.v6->payload_len = 0; │ ip.v6->payload_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO | │ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM; │ IXGBE_TX_FLAGS_CSUM;
} │ }
│
/* determine offset of inner transport header */ │ /* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data; │ l4_offset = l4.hdr - skb->data;
│
/* remove payload length from inner checksum */ │ /* remove payload length from inner checksum */
paylen = skb->len - l4_offset; │ paylen = skb->len - l4_offset;
if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) { │
│ if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
/* compute length of segmentation header */ │ /* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset; │ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
csum_replace_by_diff(&l4.tcp->check, │ csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen)); │ (__force __wsum)htonl(paylen));
} else { │ } else {
/* compute length of segmentation header */ │ /* compute length of segmentation header */
*hdr_len = sizeof(*l4.udp) + l4_offset; │ *hdr_len = sizeof(*l4.udp) + l4_offset;
csum_replace_by_diff(&l4.udp->check, │ csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen)); │ (__force __wsum)htonl(paylen));
} │ }
│
/* update gso size and bytecount with header size */ │ /* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs; │ first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len; │ first->bytecount += (first->gso_segs - 1) * *hdr_len;
│
/* MSS L4LEN IDX */ │ /* mss_l4len_id: use 0 as index for TSO */
mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; │ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; │ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
│
│ fceof_saidx |= itd->sa_idx;
│ type_tucmd |= itd->flags | itd->trailer_len;
│
/* VLAN MACLEN IPLEN */ │ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = l4.hdr - ip.hdr; │ vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; │ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; │ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
│
igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, │ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
type_tucmd, mss_l4len_idx); │ mss_l4len_idx);
│
return 1; │ return 1;
} │
next prev up linux/drivers/net/ethernet/neterion/vxge/vxge-config.c:3230 │ linux/drivers/net/ethernet/neterion/vxge/vxge-config.c:3104
│
enum vxge_hw_status status = VXGE_HW_OK; │ enum vxge_hw_status status = VXGE_HW_OK;
│
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { │ if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
status = VXGE_HW_ERR_INVALID_DEVICE; │ status = VXGE_HW_ERR_INVALID_DEVICE;
goto exit; │ goto exit;
} │ }
│
switch (type) { │ switch (type) {
case vxge_hw_mgmt_reg_type_legacy: │ case vxge_hw_mgmt_reg_type_legacy:
if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { │ if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET; │ status = VXGE_HW_ERR_INVALID_OFFSET;
break; │ break;
} │ }
writeq(value, (void __iomem *)hldev->legacy_reg + offset); │ *value = readq((void __iomem *)hldev->legacy_reg + offset);
break; │ break;
case vxge_hw_mgmt_reg_type_toc: │ case vxge_hw_mgmt_reg_type_toc:
if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { │ if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET; │ status = VXGE_HW_ERR_INVALID_OFFSET;
break; │ break;
} │ }
writeq(value, (void __iomem *)hldev->toc_reg + offset); │ *value = readq((void __iomem *)hldev->toc_reg + offset);
break; │ break;
case vxge_hw_mgmt_reg_type_common: │ case vxge_hw_mgmt_reg_type_common:
if (offset > sizeof(struct vxge_hw_common_reg) - 8) { │ if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET; │ status = VXGE_HW_ERR_INVALID_OFFSET;
break; │ break;
} │ }
writeq(value, (void __iomem *)hldev->common_reg + offset); │ *value = readq((void __iomem *)hldev->common_reg + offset);
break; │ break;
case vxge_hw_mgmt_reg_type_mrpcim: │ case vxge_hw_mgmt_reg_type_mrpcim:
if (!(hldev->access_rights & │ if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { │ VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
status = VXGE_HW_ERR_PRIVILEGED_OPERATION; │ status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
break; │ break;
} │ }
if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { │ if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET; │ status = VXGE_HW_ERR_INVALID_OFFSET;
break; │ break;
} │ }
writeq(value, (void __iomem *)hldev->mrpcim_reg + offset); │ *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
break; │ break;
case vxge_hw_mgmt_reg_type_srpcim: │ case vxge_hw_mgmt_reg_type_srpcim:
if (!(hldev->access_rights & │ if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { │ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
status = VXGE_HW_ERR_PRIVILEGED_OPERATION; │ status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
break; │ break;
} │ }
if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { │ if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
status = VXGE_HW_ERR_INVALID_INDEX; │ status = VXGE_HW_ERR_INVALID_INDEX;
break; │ break;
} │ }
if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { │ if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET; │ status = VXGE_HW_ERR_INVALID_OFFSET;
break; │ break;
} │ }
writeq(value, (void __iomem *)hldev->srpcim_reg[index] + │ *value = readq((void __iomem *)hldev->srpcim_reg[index] +
offset); │ offset);
│
break; │ break;
case vxge_hw_mgmt_reg_type_vpmgmt: │ case vxge_hw_mgmt_reg_type_vpmgmt:
if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || │ if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
(!(hldev->vpath_assignments & vxge_mBIT(index)))) { │ (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
status = VXGE_HW_ERR_INVALID_INDEX; │ status = VXGE_HW_ERR_INVALID_INDEX;
break; │ break;
} │ }
if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { │ if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET; │ status = VXGE_HW_ERR_INVALID_OFFSET;
break; │ break;
} │ }
writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] + │ *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
offset); │ offset);
break; │ break;
case vxge_hw_mgmt_reg_type_vpath: │ case vxge_hw_mgmt_reg_type_vpath:
if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) || │ if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
(!(hldev->vpath_assignments & vxge_mBIT(index)))) { │ (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
status = VXGE_HW_ERR_INVALID_INDEX; │ status = VXGE_HW_ERR_INVALID_INDEX;
break; │ break;
} │ }
│ if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
│ status = VXGE_HW_ERR_INVALID_INDEX;
│ break;
│ }
if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { │ if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET; │ status = VXGE_HW_ERR_INVALID_OFFSET;
break; │ break;
} │ }
writeq(value, (void __iomem *)hldev->vpath_reg[index] + │ *value = readq((void __iomem *)hldev->vpath_reg[index] +
offset); │ offset);
break; │ break;
default: │ default:
status = VXGE_HW_ERR_INVALID_TYPE; │ status = VXGE_HW_ERR_INVALID_TYPE;
break; │ break;
} │ }
│
exit: │ exit:
return status; │ return status;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:2143 │ linux/drivers/net/ethernet/intel/igb/e1000_phy.c:2123
│
e_dbg("Running IGP 3 PHY init script\n"); │ hw_dbg("Running IGP 3 PHY init script\n");
│
/* PHY init IGP 3 */ │ /* PHY init IGP 3 */
/* Enable rise/fall, 10-mode work in class-A */ │ /* Enable rise/fall, 10-mode work in class-A */
e1e_wphy(hw, 0x2F5B, 0x9018); │ hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
/* Remove all caps from Replica path filter */ │ /* Remove all caps from Replica path filter */
e1e_wphy(hw, 0x2F52, 0x0000); │ hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
/* Bias trimming for ADC, AFE and Driver (Default) */ │ /* Bias trimming for ADC, AFE and Driver (Default) */
e1e_wphy(hw, 0x2FB1, 0x8B24); │ hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
/* Increase Hybrid poly bias */ │ /* Increase Hybrid poly bias */
e1e_wphy(hw, 0x2FB2, 0xF8F0); │ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
/* Add 4% to Tx amplitude in Gig mode */ │ /* Add 4% to TX amplitude in Giga mode */
e1e_wphy(hw, 0x2010, 0x10B0); │ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
/* Disable trimming (TTT) */ │ /* Disable trimming (TTT) */
e1e_wphy(hw, 0x2011, 0x0000); │ hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
/* Poly DC correction to 94.6% + 2% for all channels */ │ /* Poly DC correction to 94.6% + 2% for all channels */
e1e_wphy(hw, 0x20DD, 0x249A); │ hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
/* ABS DC correction to 95.9% */ │ /* ABS DC correction to 95.9% */
e1e_wphy(hw, 0x20DE, 0x00D3); │ hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
/* BG temp curve trim */ │ /* BG temp curve trim */
e1e_wphy(hw, 0x28B4, 0x04CE); │ hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
/* Increasing ADC OPAMP stage 1 currents to max */ │ /* Increasing ADC OPAMP stage 1 currents to max */
e1e_wphy(hw, 0x2F70, 0x29E4); │ hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
/* Force 1000 ( required for enabling PHY regs configuration) */ │ /* Force 1000 ( required for enabling PHY regs configuration) */
e1e_wphy(hw, 0x0000, 0x0140); │ hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
/* Set upd_freq to 6 */ │ /* Set upd_freq to 6 */
e1e_wphy(hw, 0x1F30, 0x1606); │ hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
/* Disable NPDFE */ │ /* Disable NPDFE */
e1e_wphy(hw, 0x1F31, 0xB814); │ hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
/* Disable adaptive fixed FFE (Default) */ │ /* Disable adaptive fixed FFE (Default) */
e1e_wphy(hw, 0x1F35, 0x002A); │ hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
/* Enable FFE hysteresis */ │ /* Enable FFE hysteresis */
e1e_wphy(hw, 0x1F3E, 0x0067); │ hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
/* Fixed FFE for short cable lengths */ │ /* Fixed FFE for short cable lengths */
e1e_wphy(hw, 0x1F54, 0x0065); │ hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
/* Fixed FFE for medium cable lengths */ │ /* Fixed FFE for medium cable lengths */
e1e_wphy(hw, 0x1F55, 0x002A); │ hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
/* Fixed FFE for long cable lengths */ │ /* Fixed FFE for long cable lengths */
e1e_wphy(hw, 0x1F56, 0x002A); │ hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
/* Enable Adaptive Clip Threshold */ │ /* Enable Adaptive Clip Threshold */
e1e_wphy(hw, 0x1F72, 0x3FB0); │ hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
/* AHT reset limit to 1 */ │ /* AHT reset limit to 1 */
e1e_wphy(hw, 0x1F76, 0xC0FF); │ hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
/* Set AHT master delay to 127 msec */ │ /* Set AHT master delay to 127 msec */
e1e_wphy(hw, 0x1F77, 0x1DEC); │ hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
/* Set scan bits for AHT */ │ /* Set scan bits for AHT */
e1e_wphy(hw, 0x1F78, 0xF9EF); │ hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
/* Set AHT Preset bits */ │ /* Set AHT Preset bits */
e1e_wphy(hw, 0x1F79, 0x0210); │ hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
/* Change integ_factor of channel A to 3 */ │ /* Change integ_factor of channel A to 3 */
e1e_wphy(hw, 0x1895, 0x0003); │ hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
/* Change prop_factor of channels BCD to 8 */ │ /* Change prop_factor of channels BCD to 8 */
e1e_wphy(hw, 0x1796, 0x0008); │ hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
/* Change cg_icount + enable integbp for channels BCD */ │ /* Change cg_icount + enable integbp for channels BCD */
e1e_wphy(hw, 0x1798, 0xD008); │ hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
/* Change cg_icount + enable integbp + change prop_factor_master │ /* Change cg_icount + enable integbp + change prop_factor_master
* to 8 for channel A │ * to 8 for channel A
*/ │ */
e1e_wphy(hw, 0x1898, 0xD918); │ hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
/* Disable AHT in Slave mode on channel A */ │ /* Disable AHT in Slave mode on channel A */
e1e_wphy(hw, 0x187A, 0x0800); │ hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
/* Enable LPLU and disable AN to 1000 in non-D0a states, │ /* Enable LPLU and disable AN to 1000 in non-D0a states,
* Enable SPD+B2B │ * Enable SPD+B2B
*/ │ */
e1e_wphy(hw, 0x0019, 0x008D); │ hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
/* Enable restart AN on an1000_dis change */ │ /* Enable restart AN on an1000_dis change */
e1e_wphy(hw, 0x001B, 0x2080); │ hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
/* Enable wh_fifo read clock in 10/100 modes */ │ /* Enable wh_fifo read clock in 10/100 modes */
e1e_wphy(hw, 0x0014, 0x0045); │ hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
/* Restart AN, Speed selection is 1000 */ │ /* Restart AN, Speed selection is 1000 */
e1e_wphy(hw, 0x0000, 0x1340); │ hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/nvm.c:413 │ linux/drivers/net/ethernet/intel/igb/e1000_nvm.c:487
│
s32 ret_val; │ s32 ret_val;
u16 nvm_data; │ u16 nvm_data;
u16 pba_ptr; │ u16 pointer;
u16 offset; │ u16 offset;
u16 length; │ u16 length;
│
if (pba_num == NULL) { │ if (part_num == NULL) {
e_dbg("PBA string buffer was null\n"); │ hw_dbg("PBA string buffer was null\n");
return -E1000_ERR_INVALID_ARGUMENT; │ ret_val = E1000_ERR_INVALID_ARGUMENT;
│ goto out;
} │ }
│
ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); │ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
if (ret_val) { │ if (ret_val) {
e_dbg("NVM Read Error\n"); │ hw_dbg("NVM Read Error\n");
return ret_val; │ goto out;
} │ }
│
ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); │ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
if (ret_val) { │ if (ret_val) {
e_dbg("NVM Read Error\n"); │ hw_dbg("NVM Read Error\n");
return ret_val; │ goto out;
} │ }
│
/* if nvm_data is not ptr guard the PBA must be in legacy format which │ /* if nvm_data is not ptr guard the PBA must be in legacy format which
* means pba_ptr is actually our second data word for the PBA number │ * means pointer is actually our second data word for the PBA number
* and we can decode it into an ascii string │ * and we can decode it into an ascii string
*/ │ */
if (nvm_data != NVM_PBA_PTR_GUARD) { │ if (nvm_data != NVM_PBA_PTR_GUARD) {
e_dbg("NVM PBA number is not stored as string\n"); │ hw_dbg("NVM PBA number is not stored as string\n");
│
/* make sure callers buffer is big enough to store the PBA */ │ /* we will need 11 characters to store the PBA */
if (pba_num_size < E1000_PBANUM_LENGTH) { │ if (part_num_size < 11) {
e_dbg("PBA string buffer too small\n"); │ hw_dbg("PBA string buffer too small\n");
return E1000_ERR_NO_SPACE; │ return E1000_ERR_NO_SPACE;
} │ }
│
/* extract hex string from data and pba_ptr */ │ /* extract hex string from data and pointer */
pba_num[0] = (nvm_data >> 12) & 0xF; │ part_num[0] = (nvm_data >> 12) & 0xF;
pba_num[1] = (nvm_data >> 8) & 0xF; │ part_num[1] = (nvm_data >> 8) & 0xF;
pba_num[2] = (nvm_data >> 4) & 0xF; │ part_num[2] = (nvm_data >> 4) & 0xF;
pba_num[3] = nvm_data & 0xF; │ part_num[3] = nvm_data & 0xF;
pba_num[4] = (pba_ptr >> 12) & 0xF; │ part_num[4] = (pointer >> 12) & 0xF;
pba_num[5] = (pba_ptr >> 8) & 0xF; │ part_num[5] = (pointer >> 8) & 0xF;
pba_num[6] = '-'; │ part_num[6] = '-';
pba_num[7] = 0; │ part_num[7] = 0;
pba_num[8] = (pba_ptr >> 4) & 0xF; │ part_num[8] = (pointer >> 4) & 0xF;
pba_num[9] = pba_ptr & 0xF; │ part_num[9] = pointer & 0xF;
│
/* put a null character on the end of our string */ │ /* put a null character on the end of our string */
pba_num[10] = '\0'; │ part_num[10] = '\0';
│
/* switch all the data but the '-' to hex char */ │ /* switch all the data but the '-' to hex char */
for (offset = 0; offset < 10; offset++) { │ for (offset = 0; offset < 10; offset++) {
if (pba_num[offset] < 0xA) │ if (part_num[offset] < 0xA)
pba_num[offset] += '0'; │ part_num[offset] += '0';
else if (pba_num[offset] < 0x10) │ else if (part_num[offset] < 0x10)
pba_num[offset] += 'A' - 0xA; │ part_num[offset] += 'A' - 0xA;
} │ }
│
return 0; │ goto out;
} │ }
│
ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); │ ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
if (ret_val) { │ if (ret_val) {
e_dbg("NVM Read Error\n"); │ hw_dbg("NVM Read Error\n");
return ret_val; │ goto out;
} │ }
│
if (length == 0xFFFF || length == 0) { │ if (length == 0xFFFF || length == 0) {
e_dbg("NVM PBA number section invalid length\n"); │ hw_dbg("NVM PBA number section invalid length\n");
return -E1000_ERR_NVM_PBA_SECTION; │ ret_val = E1000_ERR_NVM_PBA_SECTION;
} │ goto out;
/* check if pba_num buffer is big enough */ │ }
if (pba_num_size < (((u32)length * 2) - 1)) { │ /* check if part_num buffer is big enough */
e_dbg("PBA string buffer too small\n"); │ if (part_num_size < (((u32)length * 2) - 1)) {
return -E1000_ERR_NO_SPACE; │ hw_dbg("PBA string buffer too small\n");
│ ret_val = E1000_ERR_NO_SPACE;
│ goto out;
} │ }
│
/* trim pba length from start of string */ │ /* trim pba length from start of string */
pba_ptr++; │ pointer++;
length--; │ length--;
│
for (offset = 0; offset < length; offset++) { │ for (offset = 0; offset < length; offset++) {
ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); │ ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
if (ret_val) { │ if (ret_val) {
e_dbg("NVM Read Error\n"); │ hw_dbg("NVM Read Error\n");
return ret_val; │ goto out;
} │ }
pba_num[offset * 2] = (u8)(nvm_data >> 8); │ part_num[offset * 2] = (u8)(nvm_data >> 8);
pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); │ part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
} │ }
pba_num[offset * 2] = '\0'; │ part_num[offset * 2] = '\0';
│
return 0; │ out:
│ return ret_val;
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_main.c:1324 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:5887
│
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; │ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
union { │ union {
struct iphdr *v4; │ struct iphdr *v4;
struct ipv6hdr *v6; │ struct ipv6hdr *v6;
unsigned char *hdr; │ unsigned char *hdr;
} ip; │ } ip;
union { │ union {
struct tcphdr *tcp; │ struct tcphdr *tcp;
struct udphdr *udp; │ struct udphdr *udp;
unsigned char *hdr; │ unsigned char *hdr;
} l4; │ } l4;
u32 paylen, l4_offset; │ u32 paylen, l4_offset;
int err; │ int err;
│
if (skb->ip_summed != CHECKSUM_PARTIAL) │ if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0; │ return 0;
│
if (!skb_is_gso(skb)) │ if (!skb_is_gso(skb))
return 0; │ return 0;
│
err = skb_cow_head(skb, 0); │ err = skb_cow_head(skb, 0);
if (err < 0) │ if (err < 0)
return err; │ return err;
│
ip.hdr = skb_network_header(skb); │ ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb); │ l4.hdr = skb_checksum_start(skb);
│
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ │ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; │ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
│ E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
│
/* initialize outer IP header fields */ │ /* initialize outer IP header fields */
if (ip.v4->version == 4) { │ if (ip.v4->version == 4) {
unsigned char *csum_start = skb_checksum_start(skb); │ unsigned char *csum_start = skb_checksum_start(skb);
unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); │ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
│
/* IP header will have to cancel out any data that │ /* IP header will have to cancel out any data that
* is not a part of the outer IP header │ * is not a part of the outer IP header
*/ │ */
ip.v4->check = csum_fold(csum_partial(trans_start, │ ip.v4->check = csum_fold(csum_partial(trans_start,
csum_start - trans_start, │ csum_start - trans_start,
0)); │ 0));
type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; │ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
│
ip.v4->tot_len = 0; │ ip.v4->tot_len = 0;
first->tx_flags |= IGC_TX_FLAGS_TSO | │ first->tx_flags |= IGB_TX_FLAGS_TSO |
IGC_TX_FLAGS_CSUM | │ IGB_TX_FLAGS_CSUM |
IGC_TX_FLAGS_IPV4; │ IGB_TX_FLAGS_IPV4;
} else { │ } else {
ip.v6->payload_len = 0; │ ip.v6->payload_len = 0;
first->tx_flags |= IGC_TX_FLAGS_TSO | │ first->tx_flags |= IGB_TX_FLAGS_TSO |
IGC_TX_FLAGS_CSUM; │ IGB_TX_FLAGS_CSUM;
} │ }
│
/* determine offset of inner transport header */ │ /* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data; │ l4_offset = l4.hdr - skb->data;
│
/* remove payload length from inner checksum */ │ /* remove payload length from inner checksum */
paylen = skb->len - l4_offset; │ paylen = skb->len - l4_offset;
if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { │ if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
/* compute length of segmentation header */ │ /* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset; │ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
csum_replace_by_diff(&l4.tcp->check, │ csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen)); │ (__force __wsum)htonl(paylen));
} else { │ } else {
/* compute length of segmentation header */ │ /* compute length of segmentation header */
*hdr_len = sizeof(*l4.udp) + l4_offset; │ *hdr_len = sizeof(*l4.udp) + l4_offset;
csum_replace_by_diff(&l4.udp->check, │ csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen)); │ (__force __wsum)htonl(paylen));
} │ }
│
/* update gso size and bytecount with header size */ │ /* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs; │ first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len; │ first->bytecount += (first->gso_segs - 1) * *hdr_len;
│
/* MSS L4LEN IDX */ │ /* MSS L4LEN IDX */
mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; │ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; │ mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
│
/* VLAN MACLEN IPLEN */ │ /* VLAN MACLEN IPLEN */
vlan_macip_lens = l4.hdr - ip.hdr; │ vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; │ vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; │ vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
│
igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, │ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
type_tucmd, mss_l4len_idx); │ type_tucmd, mss_l4len_idx);
│
return 1; │ return 1;
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:2129 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:2062
│
bool is_full = false; │ bool is_full = false;
│
/* *gen_spec should be completely initialised, to be consistent │ /* *gen_spec should be completely initialised, to be consistent
* with efx_filter_init_{rx,tx}() and in case we want to copy │ * with ef4_filter_init_{rx,tx}() and in case we want to copy
* it back to userland. │ * it back to userland.
*/ │ */
memset(gen_spec, 0, sizeof(*gen_spec)); │ memset(gen_spec, 0, sizeof(*gen_spec));
│
gen_spec->priority = spec->priority; │ gen_spec->priority = spec->priority;
gen_spec->flags = spec->flags; │ gen_spec->flags = spec->flags;
gen_spec->dmaq_id = spec->dmaq_id; │ gen_spec->dmaq_id = spec->dmaq_id;
│
switch (spec->type) { │ switch (spec->type) {
case EFX_FARCH_FILTER_TCP_FULL: │ case EF4_FARCH_FILTER_TCP_FULL:
case EFX_FARCH_FILTER_UDP_FULL: │ case EF4_FARCH_FILTER_UDP_FULL:
is_full = true; │ is_full = true;
fallthrough; │ fallthrough;
case EFX_FARCH_FILTER_TCP_WILD: │ case EF4_FARCH_FILTER_TCP_WILD:
case EFX_FARCH_FILTER_UDP_WILD: { │ case EF4_FARCH_FILTER_UDP_WILD: {
__be32 host1, host2; │ __be32 host1, host2;
__be16 port1, port2; │ __be16 port1, port2;
│
gen_spec->match_flags = │ gen_spec->match_flags =
EFX_FILTER_MATCH_ETHER_TYPE | │ EF4_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_IP_PROTO | │ EF4_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; │ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
if (is_full) │ if (is_full)
gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | │ gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST |
EFX_FILTER_MATCH_REM_PORT); │ EF4_FILTER_MATCH_REM_PORT);
gen_spec->ether_type = htons(ETH_P_IP); │ gen_spec->ether_type = htons(ETH_P_IP);
gen_spec->ip_proto = │ gen_spec->ip_proto =
(spec->type == EFX_FARCH_FILTER_TCP_FULL || │ (spec->type == EF4_FARCH_FILTER_TCP_FULL ||
spec->type == EFX_FARCH_FILTER_TCP_WILD) ? │ spec->type == EF4_FARCH_FILTER_TCP_WILD) ?
IPPROTO_TCP : IPPROTO_UDP; │ IPPROTO_TCP : IPPROTO_UDP;
│
host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); │ host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
port1 = htons(spec->data[0]); │ port1 = htons(spec->data[0]);
host2 = htonl(spec->data[2]); │ host2 = htonl(spec->data[2]);
port2 = htons(spec->data[1] >> 16); │ port2 = htons(spec->data[1] >> 16);
if (spec->flags & EFX_FILTER_FLAG_TX) { │ if (spec->flags & EF4_FILTER_FLAG_TX) {
gen_spec->loc_host[0] = host1; │ gen_spec->loc_host[0] = host1;
gen_spec->rem_host[0] = host2; │ gen_spec->rem_host[0] = host2;
} else { │ } else {
gen_spec->loc_host[0] = host2; │ gen_spec->loc_host[0] = host2;
gen_spec->rem_host[0] = host1; │ gen_spec->rem_host[0] = host1;
} │ }
if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ │ if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^
(!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { │ (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
gen_spec->loc_port = port1; │ gen_spec->loc_port = port1;
gen_spec->rem_port = port2; │ gen_spec->rem_port = port2;
} else { │ } else {
gen_spec->loc_port = port2; │ gen_spec->loc_port = port2;
gen_spec->rem_port = port1; │ gen_spec->rem_port = port1;
} │ }
│
break; │ break;
} │ }
│
case EFX_FARCH_FILTER_MAC_FULL: │ case EF4_FARCH_FILTER_MAC_FULL:
is_full = true; │ is_full = true;
fallthrough; │ fallthrough;
case EFX_FARCH_FILTER_MAC_WILD: │ case EF4_FARCH_FILTER_MAC_WILD:
gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; │ gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
if (is_full) │ if (is_full)
gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; │ gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
gen_spec->loc_mac[0] = spec->data[2] >> 8; │ gen_spec->loc_mac[0] = spec->data[2] >> 8;
gen_spec->loc_mac[1] = spec->data[2]; │ gen_spec->loc_mac[1] = spec->data[2];
gen_spec->loc_mac[2] = spec->data[1] >> 24; │ gen_spec->loc_mac[2] = spec->data[1] >> 24;
gen_spec->loc_mac[3] = spec->data[1] >> 16; │ gen_spec->loc_mac[3] = spec->data[1] >> 16;
gen_spec->loc_mac[4] = spec->data[1] >> 8; │ gen_spec->loc_mac[4] = spec->data[1] >> 8;
gen_spec->loc_mac[5] = spec->data[1]; │ gen_spec->loc_mac[5] = spec->data[1];
gen_spec->outer_vid = htons(spec->data[0]); │ gen_spec->outer_vid = htons(spec->data[0]);
break; │ break;
│
case EFX_FARCH_FILTER_UC_DEF: │ case EF4_FARCH_FILTER_UC_DEF:
case EFX_FARCH_FILTER_MC_DEF: │ case EF4_FARCH_FILTER_MC_DEF:
gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; │ gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG;
gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; │ gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF;
break; │ break;
│
default: │ default:
WARN_ON(1); │ WARN_ON(1);
break; │ break;
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_ethtool.c:1102 │ linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:2583
│
u32 flags = adapter->flags; │ u32 flags = adapter->flags;
│
/* RSS does not support anything other than hashing │ /* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports │ * to queues on src and dst IPs and ports
*/ │ */
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | │ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) │ RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL; │ return -EINVAL;
│
switch (nfc->flow_type) { │ switch (nfc->flow_type) {
case TCP_V4_FLOW: │ case TCP_V4_FLOW:
case TCP_V6_FLOW: │ case TCP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) || │ if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) || │ !(nfc->data & RXH_IP_DST) ||
!(nfc->data & RXH_L4_B_0_1) || │ !(nfc->data & RXH_L4_B_0_1) ||
!(nfc->data & RXH_L4_B_2_3)) │ !(nfc->data & RXH_L4_B_2_3))
return -EINVAL; │ return -EINVAL;
break; │ break;
case UDP_V4_FLOW: │ case UDP_V4_FLOW:
if (!(nfc->data & RXH_IP_SRC) || │ if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST)) │ !(nfc->data & RXH_IP_DST))
return -EINVAL; │ return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { │ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: │ case 0:
flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; │ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
break; │ break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): │ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; │ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
break; │ break;
default: │ default:
return -EINVAL; │ return -EINVAL;
} │ }
break; │ break;
case UDP_V6_FLOW: │ case UDP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) || │ if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST)) │ !(nfc->data & RXH_IP_DST))
return -EINVAL; │ return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { │ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: │ case 0:
flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; │ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
break; │ break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): │ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; │ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
break; │ break;
default: │ default:
return -EINVAL; │ return -EINVAL;
} │ }
break; │ break;
case AH_ESP_V4_FLOW: │ case AH_ESP_V4_FLOW:
case AH_V4_FLOW: │ case AH_V4_FLOW:
case ESP_V4_FLOW: │ case ESP_V4_FLOW:
case SCTP_V4_FLOW: │ case SCTP_V4_FLOW:
case AH_ESP_V6_FLOW: │ case AH_ESP_V6_FLOW:
case AH_V6_FLOW: │ case AH_V6_FLOW:
case ESP_V6_FLOW: │ case ESP_V6_FLOW:
case SCTP_V6_FLOW: │ case SCTP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) || │ if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) || │ !(nfc->data & RXH_IP_DST) ||
(nfc->data & RXH_L4_B_0_1) || │ (nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3)) │ (nfc->data & RXH_L4_B_2_3))
return -EINVAL; │ return -EINVAL;
break; │ break;
default: │ default:
return -EINVAL; │ return -EINVAL;
} │ }
│
/* if we changed something we need to update flags */ │ /* if we changed something we need to update flags */
if (flags != adapter->flags) { │ if (flags != adapter->flags) {
struct igc_hw *hw = &adapter->hw; │ struct e1000_hw *hw = &adapter->hw;
u32 mrqc = rd32(IGC_MRQC); │ u32 mrqc = rd32(E1000_MRQC);
│
if ((flags & UDP_RSS_FLAGS) && │ if ((flags & UDP_RSS_FLAGS) &&
!(adapter->flags & UDP_RSS_FLAGS)) │ !(adapter->flags & UDP_RSS_FLAGS))
netdev_err(adapter->netdev, │ dev_err(&adapter->pdev->dev,
"Enabling UDP RSS: fragmented packets may arrive out │ "enabling UDP RSS: fragmented packets may arrive out of
│
adapter->flags = flags; │ adapter->flags = flags;
│
/* Perform hash on these packet types */ │ /* Perform hash on these packet types */
mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | │ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
IGC_MRQC_RSS_FIELD_IPV4_TCP | │ E1000_MRQC_RSS_FIELD_IPV4_TCP |
IGC_MRQC_RSS_FIELD_IPV6 | │ E1000_MRQC_RSS_FIELD_IPV6 |
IGC_MRQC_RSS_FIELD_IPV6_TCP; │ E1000_MRQC_RSS_FIELD_IPV6_TCP;
│
mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | │ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
IGC_MRQC_RSS_FIELD_IPV6_UDP); │ E1000_MRQC_RSS_FIELD_IPV6_UDP);
│
if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) │ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; │ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
│
if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) │ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; │ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
│
wr32(IGC_MRQC, mrqc); │ wr32(E1000_MRQC, mrqc);
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:1238 │ linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:595
│
struct rvu_npa_health_reporters *rvu_reporters; │ struct rvu_nix_health_reporters *rvu_reporters;
struct rvu_npa_event_ctx *npa_event_context; │ struct rvu_nix_event_ctx *nix_event_context;
struct rvu *rvu = rvu_dl->rvu; │ struct rvu *rvu = rvu_dl->rvu;
│
rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL); │ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
if (!rvu_reporters) │ if (!rvu_reporters)
return -ENOMEM; │ return -ENOMEM;
│
rvu_dl->rvu_npa_health_reporter = rvu_reporters; │ rvu_dl->rvu_nix_health_reporter = rvu_reporters;
npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL); │ nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
if (!npa_event_context) │ if (!nix_event_context)
return -ENOMEM; │ return -ENOMEM;
│
rvu_reporters->npa_event_ctx = npa_event_context; │ rvu_reporters->nix_event_ctx = nix_event_context;
rvu_reporters->rvu_hw_npa_intr_reporter = │ rvu_reporters->rvu_hw_nix_intr_reporter =
devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops │ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops
if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) { │ if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n", │ dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)); │ PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter); │ return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
} │ }
│
rvu_reporters->rvu_hw_npa_gen_reporter = │ rvu_reporters->rvu_hw_nix_gen_reporter =
devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, │ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops,
if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) { │ if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n", │ dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)); │ PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter); │ return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
} │ }
│
rvu_reporters->rvu_hw_npa_err_reporter = │ rvu_reporters->rvu_hw_nix_err_reporter =
devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, │ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops,
if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) { │ if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n", │ dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter)); │ PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter); │ return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
} │ }
│
rvu_reporters->rvu_hw_npa_ras_reporter = │ rvu_reporters->rvu_hw_nix_ras_reporter =
devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, │ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops,
if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) { │ if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n", │ dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)); │ PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter); │ return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
} │ }
│
rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); │ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
if (!rvu_dl->devlink_wq) │ if (!rvu_dl->devlink_wq)
goto err; │ goto err;
│
INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work); │ INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work); │ INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work); │ INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work); │ INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
│
return 0; │ return 0;
err: │ err:
rvu_npa_health_reporters_destroy(rvu_dl); │ rvu_nix_health_reporters_destroy(rvu_dl);
return -ENOMEM; │ return -ENOMEM;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:1055 │ linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:1141
│
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, │ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, │ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, │ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, │ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, │ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
/* RDH is read-only for i350, only test RDT. */ │ /* Enable all RX queues before testing. */
│ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
│ E1000_RXDCTL_QUEUE_ENABLE },
│ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
│ E1000_RXDCTL_QUEUE_ENABLE },
│ /* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
│ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
│ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, │ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, │ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, │ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, │ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, │ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, │ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, │ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, │ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO, │ { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI, │ { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
0xC3FFFFFF, 0xFFFFFFFF }, │ { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 16, TABLE64_TEST_LO, │ { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
0xFFFFFFFF, 0xFFFFFFFF }, │
{ E1000_RA2, 0, 16, TABLE64_TEST_HI, │
0xC3FFFFFF, 0xFFFFFFFF }, │
{ E1000_MTA, 0, 128, TABLE32_TEST, │
0xFFFFFFFF, 0xFFFFFFFF }, │
{ 0, 0, 0, 0 } │ { 0, 0, 0, 0 }
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:1098 │ linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:1141
│
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, │ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, │ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, │ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, │ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
/* RDH is read-only for 82580, only test RDT. */ │ /* Enable all RX queues before testing. */
│ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
│ E1000_RXDCTL_QUEUE_ENABLE },
│ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
│ E1000_RXDCTL_QUEUE_ENABLE },
│ /* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
│ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
│ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, │ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, │ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, │ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, │ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, │ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, │ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, │ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, │ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO, │ { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI, │ { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
0x83FFFFFF, 0xFFFFFFFF }, │ { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, │ { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
0xFFFFFFFF, 0xFFFFFFFF }, │
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, │
0x83FFFFFF, 0xFFFFFFFF }, │
{ E1000_MTA, 0, 128, TABLE32_TEST, │
0xFFFFFFFF, 0xFFFFFFFF }, │
{ 0, 0, 0, 0 } │ { 0, 0, 0, 0 }
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:1098 │ linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:1055
│
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, │ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, │ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, │ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
{ E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, │ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
/* RDH is read-only for 82580, only test RDT. */ │ /* RDH is read-only for i350, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, │ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, │ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, │ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
{ E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, │ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, │ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, │ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, │ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, │ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, │ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, │ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, │ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO, │ { E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF }, │ 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI, │ { E1000_RA, 0, 16, TABLE64_TEST_HI,
0x83FFFFFF, 0xFFFFFFFF }, │ 0xC3FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, │ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF }, │ 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, │ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
0x83FFFFFF, 0xFFFFFFFF }, │ 0xC3FFFFFF, 0xFFFFFFFF },
{ E1000_MTA, 0, 128, TABLE32_TEST, │ { E1000_MTA, 0, 128, TABLE32_TEST,
0xFFFFFFFF, 0xFFFFFFFF }, │ 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 } │ { 0, 0, 0, 0 }
} │
next prev up linux/drivers/net/ethernet/sfc/selftest.c:175 │ linux/drivers/net/ethernet/sfc/falcon/selftest.c:174
│
struct efx_channel *channel; │ struct ef4_channel *channel;
unsigned int read_ptr[EFX_MAX_CHANNELS]; │ unsigned int read_ptr[EF4_MAX_CHANNELS];
unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0; │ unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
unsigned long timeout, wait; │ unsigned long timeout, wait;
│
BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG); │ BUILD_BUG_ON(EF4_MAX_CHANNELS > BITS_PER_LONG);
│
efx_for_each_channel(channel, efx) { │ ef4_for_each_channel(channel, efx) {
read_ptr[channel->channel] = channel->eventq_read_ptr; │ read_ptr[channel->channel] = channel->eventq_read_ptr;
set_bit(channel->channel, &dma_pend); │ set_bit(channel->channel, &dma_pend);
set_bit(channel->channel, &int_pend); │ set_bit(channel->channel, &int_pend);
efx_nic_event_test_start(channel); │ ef4_nic_event_test_start(channel);
} │ }
│
timeout = jiffies + IRQ_TIMEOUT; │ timeout = jiffies + IRQ_TIMEOUT;
wait = 1; │ wait = 1;
│
/* Wait for arrival of interrupts. NAPI processing may or may │ /* Wait for arrival of interrupts. NAPI processing may or may
* not complete in time, but we can cope in any case. │ * not complete in time, but we can cope in any case.
*/ │ */
do { │ do {
schedule_timeout_uninterruptible(wait); │ schedule_timeout_uninterruptible(wait);
│
efx_for_each_channel(channel, efx) { │ ef4_for_each_channel(channel, efx) {
efx_stop_eventq(channel); │ ef4_stop_eventq(channel);
if (channel->eventq_read_ptr != │ if (channel->eventq_read_ptr !=
read_ptr[channel->channel]) { │ read_ptr[channel->channel]) {
set_bit(channel->channel, &napi_ran); │ set_bit(channel->channel, &napi_ran);
clear_bit(channel->channel, &dma_pend); │ clear_bit(channel->channel, &dma_pend);
clear_bit(channel->channel, &int_pend); │ clear_bit(channel->channel, &int_pend);
} else { │ } else {
if (efx_nic_event_present(channel)) │ if (ef4_nic_event_present(channel))
clear_bit(channel->channel, &dma_pend); │ clear_bit(channel->channel, &dma_pend);
if (efx_nic_event_test_irq_cpu(channel) >= 0) │ if (ef4_nic_event_test_irq_cpu(channel) >= 0)
clear_bit(channel->channel, &int_pend); │ clear_bit(channel->channel, &int_pend);
} │ }
efx_start_eventq(channel); │ ef4_start_eventq(channel);
} │ }
│
wait *= 2; │ wait *= 2;
} while ((dma_pend || int_pend) && time_before(jiffies, timeout)); │ } while ((dma_pend || int_pend) && time_before(jiffies, timeout));
│
efx_for_each_channel(channel, efx) { │ ef4_for_each_channel(channel, efx) {
bool dma_seen = !test_bit(channel->channel, &dma_pend); │ bool dma_seen = !test_bit(channel->channel, &dma_pend);
bool int_seen = !test_bit(channel->channel, &int_pend); │ bool int_seen = !test_bit(channel->channel, &int_pend);
│
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; │ tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
tests->eventq_int[channel->channel] = int_seen ? 1 : -1; │ tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
│
if (dma_seen && int_seen) { │ if (dma_seen && int_seen) {
netif_dbg(efx, drv, efx->net_dev, │ netif_dbg(efx, drv, efx->net_dev,
"channel %d event queue passed (with%s NAPI)\n", │ "channel %d event queue passed (with%s NAPI)\n",
channel->channel, │ channel->channel,
test_bit(channel->channel, &napi_ran) ? │ test_bit(channel->channel, &napi_ran) ?
"" : "out"); │ "" : "out");
} else { │ } else {
/* Report failure and whether either interrupt or DMA │ /* Report failure and whether either interrupt or DMA
* worked │ * worked
*/ │ */
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"channel %d timed out waiting for event queue\n", │ "channel %d timed out waiting for event queue\n",
channel->channel); │ channel->channel);
if (int_seen) │ if (int_seen)
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"channel %d saw interrupt " │ "channel %d saw interrupt "
"during event queue test\n", │ "during event queue test\n",
channel->channel); │ channel->channel);
if (dma_seen) │ if (dma_seen)
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but " │ "channel %d event was generated, but "
"failed to trigger an interrupt\n", │ "failed to trigger an interrupt\n",
channel->channel); │ channel->channel);
} │ }
} │ }
│
return (dma_pend || int_pend) ? -ETIMEDOUT : 0; │ return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
} │
next prev up linux/drivers/net/ethernet/emulex/benet/be_main.c:466 │ linux/drivers/net/ethernet/emulex/benet/be_main.c:512
│
struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); │ struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
struct be_pmem_stats *pmem_sts = &hw_stats->pmem; │ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf; │ struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
struct be_port_rxf_stats_v1 *port_stats = │ struct be_port_rxf_stats_v2 *port_stats =
&rxf_stats->port[adapter->port_num]; │ &rxf_stats->port[adapter->port_num];
struct be_drv_stats *drvs = &adapter->drv_stats; │ struct be_drv_stats *drvs = &adapter->drv_stats;
│
be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); │ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; │ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; │ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
drvs->rx_pause_frames = port_stats->rx_pause_frames; │ drvs->rx_pause_frames = port_stats->rx_pause_frames;
drvs->rx_crc_errors = port_stats->rx_crc_errors; │ drvs->rx_crc_errors = port_stats->rx_crc_errors;
drvs->rx_control_frames = port_stats->rx_control_frames; │ drvs->rx_control_frames = port_stats->rx_control_frames;
drvs->rx_in_range_errors = port_stats->rx_in_range_errors; │ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
drvs->rx_frame_too_long = port_stats->rx_frame_too_long; │ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
drvs->rx_dropped_runt = port_stats->rx_dropped_runt; │ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; │ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; │ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; │ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; │ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; │ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; │ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
drvs->rx_out_range_errors = port_stats->rx_out_range_errors; │ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
drvs->rx_dropped_header_too_small = │ drvs->rx_dropped_header_too_small =
port_stats->rx_dropped_header_too_small; │ port_stats->rx_dropped_header_too_small;
drvs->rx_input_fifo_overflow_drop = │ drvs->rx_input_fifo_overflow_drop =
port_stats->rx_input_fifo_overflow_drop; │ port_stats->rx_input_fifo_overflow_drop;
drvs->rx_address_filtered = port_stats->rx_address_filtered; │ drvs->rx_address_filtered = port_stats->rx_address_filtered;
drvs->rx_alignment_symbol_errors = │ drvs->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors; │ port_stats->rx_alignment_symbol_errors;
drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; │ drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
drvs->tx_pauseframes = port_stats->tx_pauseframes; │ drvs->tx_pauseframes = port_stats->tx_pauseframes;
drvs->tx_controlframes = port_stats->tx_controlframes; │ drvs->tx_controlframes = port_stats->tx_controlframes;
drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes; │ drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
drvs->jabber_events = port_stats->jabber_events; │ drvs->jabber_events = port_stats->jabber_events;
drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; │ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; │ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
drvs->forwarded_packets = rxf_stats->forwarded_packets; │ drvs->forwarded_packets = rxf_stats->forwarded_packets;
drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; │ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; │ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; │ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; │ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
│ if (be_roce_supported(adapter)) {
│ drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
│ drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
│ drvs->rx_roce_frames = port_stats->roce_frames_received;
│ drvs->roce_drops_crc = port_stats->roce_drops_crc;
│ drvs->roce_drops_payload_len =
│ port_stats->roce_drops_payload_len;
│ }
} │
next prev up linux/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c:1070 │ linux/drivers/net/ethernet/intel/ixgbevf/ipsec.c:460
│
struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); │ struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
struct ixgbe_ipsec *ipsec = adapter->ipsec; │ struct ixgbevf_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs; │ struct xfrm_state *xs;
struct sec_path *sp; │ struct sec_path *sp;
struct tx_sa *tsa; │ struct tx_sa *tsa;
│ u16 sa_idx;
│
sp = skb_sec_path(first->skb); │ sp = skb_sec_path(first->skb);
if (unlikely(!sp->len)) { │ if (unlikely(!sp->len)) {
netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", │ netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
__func__, sp->len); │ __func__, sp->len);
return 0; │ return 0;
} │ }
│
xs = xfrm_input_state(first->skb); │ xs = xfrm_input_state(first->skb);
if (unlikely(!xs)) { │ if (unlikely(!xs)) {
netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", │ netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
__func__, xs); │ __func__, xs);
return 0; │ return 0;
} │ }
│
itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; │ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { │ if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", │ netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
__func__, itd->sa_idx, xs->xso.offload_handle); │ __func__, sa_idx, xs->xso.offload_handle);
return 0; │ return 0;
} │ }
│
tsa = &ipsec->tx_tbl[itd->sa_idx]; │ tsa = &ipsec->tx_tbl[sa_idx];
if (unlikely(!tsa->used)) { │ if (unlikely(!tsa->used)) {
netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", │ netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
__func__, itd->sa_idx); │ __func__, sa_idx);
return 0; │ return 0;
} │ }
│
first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC; │ itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
│
if (xs->id.proto == IPPROTO_ESP) { │ first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM;
│
│ if (xs->id.proto == IPPROTO_ESP) {
itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | │ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
IXGBE_ADVTXD_TUCMD_L4T_TCP; │ IXGBE_ADVTXD_TUCMD_L4T_TCP;
if (first->protocol == htons(ETH_P_IP)) │ if (first->protocol == htons(ETH_P_IP))
itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; │ itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
│
/* The actual trailer length is authlen (16 bytes) plus │ /* The actual trailer length is authlen (16 bytes) plus
* 2 bytes for the proto and the padlen values, plus │ * 2 bytes for the proto and the padlen values, plus
* padlen bytes of padding. This ends up not the same │ * padlen bytes of padding. This ends up not the same
* as the static value found in xs->props.trailer_len (21). │ * as the static value found in xs->props.trailer_len (21).
* │ *
* ... but if we're doing GSO, don't bother as the stack │ * ... but if we're doing GSO, don't bother as the stack
* doesn't add a trailer for those. │ * doesn't add a trailer for those.
*/ │ */
if (!skb_is_gso(first->skb)) { │ if (!skb_is_gso(first->skb)) {
/* The "correct" way to get the auth length would be │ /* The "correct" way to get the auth length would be
* to use │ * to use
* authlen = crypto_aead_authsize(xs->data); │ * authlen = crypto_aead_authsize(xs->data);
* but since we know we only have one size to worry │ * but since we know we only have one size to worry
* about * we can let the compiler use the constant │ * about * we can let the compiler use the constant
* and save us a few CPU cycles. │ * and save us a few CPU cycles.
*/ │ */
const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; │ const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
u8 padlen; │ u8 padlen;
int ret; │ int ret;
│
ret = skb_copy_bits(skb, skb->len - (authlen + 2), │ ret = skb_copy_bits(skb, skb->len - (authlen + 2),
&padlen, 1); │ &padlen, 1);
if (unlikely(ret)) │ if (unlikely(ret))
return 0; │ return 0;
itd->trailer_len = authlen + 2 + padlen; │ itd->trailer_len = authlen + 2 + padlen;
} │ }
} │ }
if (tsa->encrypt) │ if (tsa->encrypt)
itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; │ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
│
return 1; │ return 1;
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/farch.c:1184 │ linux/drivers/net/ethernet/sfc/farch.c:1175
│
struct ef4_nic *efx = channel->efx; │ struct efx_nic *efx = channel->efx;
unsigned int ev_sub_code; │ unsigned int ev_sub_code;
unsigned int ev_sub_data; │ unsigned int ev_sub_data;
│
ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); │ ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); │ ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
│
switch (ev_sub_code) { │ switch (ev_sub_code) {
case FSE_AZ_TX_DESCQ_FLS_DONE_EV: │ case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", │ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data); │ channel->channel, ev_sub_data);
ef4_farch_handle_tx_flush_done(efx, event); │ efx_farch_handle_tx_flush_done(efx, event);
│ #ifdef CONFIG_SFC_SRIOV
│ efx_siena_sriov_tx_flush_done(efx, event);
│ #endif
break; │ break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV: │ case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", │ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data); │ channel->channel, ev_sub_data);
ef4_farch_handle_rx_flush_done(efx, event); │ efx_farch_handle_rx_flush_done(efx, event);
│ #ifdef CONFIG_SFC_SRIOV
│ efx_siena_sriov_rx_flush_done(efx, event);
│ #endif
break; │ break;
case FSE_AZ_EVQ_INIT_DONE_EV: │ case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev, │ netif_dbg(efx, hw, efx->net_dev,
"channel %d EVQ %d initialised\n", │ "channel %d EVQ %d initialised\n",
channel->channel, ev_sub_data); │ channel->channel, ev_sub_data);
break; │ break;
case FSE_AZ_SRM_UPD_DONE_EV: │ case FSE_AZ_SRM_UPD_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, │ netif_vdbg(efx, hw, efx->net_dev,
"channel %d SRAM update done\n", channel->channel); │ "channel %d SRAM update done\n", channel->channel);
break; │ break;
case FSE_AZ_WAKE_UP_EV: │ case FSE_AZ_WAKE_UP_EV:
netif_vdbg(efx, hw, efx->net_dev, │ netif_vdbg(efx, hw, efx->net_dev,
"channel %d RXQ %d wakeup event\n", │ "channel %d RXQ %d wakeup event\n",
channel->channel, ev_sub_data); │ channel->channel, ev_sub_data);
break; │ break;
case FSE_AZ_TIMER_EV: │ case FSE_AZ_TIMER_EV:
netif_vdbg(efx, hw, efx->net_dev, │ netif_vdbg(efx, hw, efx->net_dev,
"channel %d RX queue %d timer expired\n", │ "channel %d RX queue %d timer expired\n",
channel->channel, ev_sub_data); │ channel->channel, ev_sub_data);
break; │ break;
case FSE_AA_RX_RECOVER_EV: │ case FSE_AA_RX_RECOVER_EV:
netif_err(efx, rx_err, efx->net_dev, │ netif_err(efx, rx_err, efx->net_dev,
"channel %d seen DRIVER RX_RESET event. " │ "channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel); │ "Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset); │ atomic_inc(&efx->rx_reset);
ef4_schedule_reset(efx, │ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
EF4_WORKAROUND_6555(efx) ? │
RESET_TYPE_RX_RECOVERY : │
RESET_TYPE_DISABLE); │
break; │ break;
case FSE_BZ_RX_DSC_ERROR_EV: │ case FSE_BZ_RX_DSC_ERROR_EV:
netif_err(efx, rx_err, efx->net_dev, │ if (ev_sub_data < EFX_VI_BASE) {
"RX DMA Q %d reports descriptor fetch error." │ netif_err(efx, rx_err, efx->net_dev,
" RX Q %d is disabled.\n", ev_sub_data, │ "RX DMA Q %d reports descriptor fetch error."
ev_sub_data); │ " RX Q %d is disabled.\n", ev_sub_data,
ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR); │ ev_sub_data);
│ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
│ }
│ #ifdef CONFIG_SFC_SRIOV
│ else
│ efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
│ #endif
break; │ break;
case FSE_BZ_TX_DSC_ERROR_EV: │ case FSE_BZ_TX_DSC_ERROR_EV:
netif_err(efx, tx_err, efx->net_dev, │ if (ev_sub_data < EFX_VI_BASE) {
"TX DMA Q %d reports descriptor fetch error." │ netif_err(efx, tx_err, efx->net_dev,
" TX Q %d is disabled.\n", ev_sub_data, │ "TX DMA Q %d reports descriptor fetch error."
ev_sub_data); │ " TX Q %d is disabled.\n", ev_sub_data,
ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR); │ ev_sub_data);
│ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
│ }
│ #ifdef CONFIG_SFC_SRIOV
│ else
│ efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
│ #endif
break; │ break;
default: │ default:
netif_vdbg(efx, hw, efx->net_dev, │ netif_vdbg(efx, hw, efx->net_dev,
"channel %d unknown driver event code %d " │ "channel %d unknown driver event code %d "
"data %04x\n", channel->channel, ev_sub_code, │ "data %04x\n", channel->channel, ev_sub_code,
ev_sub_data); │ ev_sub_data);
break; │ break;
} │ }
} │
next prev up linux/drivers/net/ethernet/3com/3c589_cs.c:598 │ linux/drivers/net/ethernet/3com/3c574_cs.c:766
│
struct net_device *dev = (struct net_device *) dev_id; │ struct net_device *dev = (struct net_device *) dev_id;
struct el3_private *lp = netdev_priv(dev); │ struct el3_private *lp = netdev_priv(dev);
unsigned int ioaddr; │ unsigned int ioaddr;
__u16 status; │ unsigned status;
int i = 0, handled = 1; │ int work_budget = max_interrupt_work;
│ int handled = 0;
│
if (!netif_device_present(dev)) │ if (!netif_device_present(dev))
return IRQ_NONE; │ return IRQ_NONE;
│
ioaddr = dev->base_addr; │ ioaddr = dev->base_addr;
│
netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); │ pr_debug("%s: interrupt, status %4.4x.\n",
│ dev->name, inw(ioaddr + EL3_STATUS));
│
spin_lock(&lp->lock); │ spin_lock(&lp->window_lock);
│
while ((status = inw(ioaddr + EL3_STATUS)) & │ while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) { │ (IntLatch | RxComplete | RxEarly | StatsFull)) {
if ((status & 0xe000) != 0x2000) { │ if (!netif_device_present(dev) ||
netdev_dbg(dev, "interrupt from dead card\n"); │ ((status & 0xe000) != 0x2000)) {
handled = 0; │ pr_debug("%s: Interrupt from dead card\n", dev->name);
break; │ break;
} │ }
│
│ handled = 1;
│
if (status & RxComplete) │ if (status & RxComplete)
el3_rx(dev); │ work_budget = el3_rx(dev, work_budget);
│
if (status & TxAvailable) { │ if (status & TxAvailable) {
netdev_dbg(dev, " TX room bit was handled.\n"); │ pr_debug(" TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */ │ /* There's room in the FIFO for a full-sized packet. */
outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); │ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue(dev); │ netif_wake_queue(dev);
} │ }
│
if (status & TxComplete) │ if (status & TxComplete)
pop_tx_status(dev); │ pop_tx_status(dev);
│
if (status & (AdapterFailure | RxEarly | StatsFull)) { │ if (status & (AdapterFailure | RxEarly | StatsFull)) {
/* Handle all uncommon interrupts. */ │ /* Handle all uncommon interrupts. */
if (status & StatsFull) /* Empty statistics. */ │ if (status & StatsFull)
update_stats(dev); │ update_stats(dev);
if (status & RxEarly) { │ if (status & RxEarly) {
/* Rx early is unused. */ │ work_budget = el3_rx(dev, work_budget);
el3_rx(dev); │
outw(AckIntr | RxEarly, ioaddr + EL3_CMD); │ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
} │ }
if (status & AdapterFailure) { │ if (status & AdapterFailure) {
u16 fifo_diag; │ u16 fifo_diag;
EL3WINDOW(4); │ EL3WINDOW(4);
fifo_diag = inw(ioaddr + 4); │ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
EL3WINDOW(1); │ EL3WINDOW(1);
netdev_warn(dev, "adapter failure, FIFO diagnostic regis │ netdev_notice(dev, "adapter failure, FIFO diagnostic reg
fifo_diag); │ fifo_diag);
if (fifo_diag & 0x0400) { │ if (fifo_diag & 0x0400) {
/* Tx overrun */ │ /* Tx overrun */
tc589_wait_for_completion(dev, TxReset); │ tc574_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD); │ outw(TxEnable, ioaddr + EL3_CMD);
} │ }
if (fifo_diag & 0x2000) { │ if (fifo_diag & 0x2000) {
/* Rx underrun */ │ /* Rx underrun */
tc589_wait_for_completion(dev, RxReset); │ tc574_wait_for_completion(dev, RxReset);
set_rx_mode(dev); │ set_rx_mode(dev);
outw(RxEnable, ioaddr + EL3_CMD); │ outw(RxEnable, ioaddr + EL3_CMD);
} │ }
outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); │ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
} │ }
} │ }
if (++i > 10) { │
netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", │ if (--work_budget < 0) {
status); │ pr_debug("%s: Too much work in interrupt, "
│ "status %4.4x.\n", dev->name, status);
/* Clear all interrupts */ │ /* Clear all interrupts */
outw(AckIntr | 0xFF, ioaddr + EL3_CMD); │ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
break; │ break;
} │ }
/* Acknowledge the IRQ. */ │ /* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); │ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
} │ }
lp->last_irq = jiffies; │
spin_unlock(&lp->lock); │ pr_debug("%s: exiting interrupt, status %4.4x.\n",
netdev_dbg(dev, "exiting interrupt, status %4.4x.\n", │ dev->name, inw(ioaddr + EL3_STATUS));
inw(ioaddr + EL3_STATUS)); │
│ spin_unlock(&lp->window_lock);
return IRQ_RETVAL(handled); │ return IRQ_RETVAL(handled);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c:934 │ linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c:1131
│
struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer; │ struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_spec = &value->misc; │ struct mlx5dr_match_misc *misc_spec = &value->misc;
│
DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid); │ DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi); │ DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio); │ DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag); │ DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype); │ DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
│
if (spec->ip_version) { │ if (spec->ip_version == IP_VERSION_IPV4) {
if (spec->ip_version == IP_VERSION_IPV4) { │ MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4); │ spec->ip_version = 0;
spec->ip_version = 0; │ } else if (spec->ip_version == IP_VERSION_IPV6) {
} else if (spec->ip_version == IP_VERSION_IPV6) { │ MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6); │ spec->ip_version = 0;
spec->ip_version = 0; │ } else if (spec->ip_version) {
} else { │ return -EINVAL;
return -EINVAL; │
} │
} │ }
│
if (spec->cvlan_tag) { │ if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN); │ MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0; │ spec->cvlan_tag = 0;
} else if (spec->svlan_tag) { │ } else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN); │ MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0; │ spec->svlan_tag = 0;
} │ }
│
if (inner) { │ if (inner) {
if (misc_spec->inner_second_cvlan_tag) { │ if (misc_spec->inner_second_cvlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLA │ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_C
misc_spec->inner_second_cvlan_tag = 0; │ misc_spec->inner_second_cvlan_tag = 0;
} else if (misc_spec->inner_second_svlan_tag) { │ } else if (misc_spec->inner_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLA │ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_S
misc_spec->inner_second_svlan_tag = 0; │ misc_spec->inner_second_svlan_tag = 0;
} │ }
│
DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_ │ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_seco
DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi) │ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_c
DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second │ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_sec
} else { │ } else {
if (misc_spec->outer_second_cvlan_tag) { │ if (misc_spec->outer_second_cvlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLA │ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_C
misc_spec->outer_second_cvlan_tag = 0; │ misc_spec->outer_second_cvlan_tag = 0;
} else if (misc_spec->outer_second_svlan_tag) { │ } else if (misc_spec->outer_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLA │ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_S
misc_spec->outer_second_svlan_tag = 0; │ misc_spec->outer_second_svlan_tag = 0;
} │ }
DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_ │ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_seco
DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi) │ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_c
DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second │ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_sec
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/marvell/skge.h:975 │ linux/drivers/net/ethernet/marvell/sky2.h:1137
│
PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ │ PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ │ PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ │ PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ │ PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ │ PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ │ PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ │ PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ │ PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ │ PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
/* Marvel-specific registers */ │ /* Marvel-specific registers */
PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ │ PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ │ PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ │ PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ │ PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ │ PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ │ PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ │ PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ │ PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ │ PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ │ PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ │ PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ │ PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ │ PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ │ PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ │ PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ │ PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ │ PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ │ PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
│
/* for 10/100 Fast Ethernet PHY (88E3082 only) */ │ /* for 10/100 Fast Ethernet PHY (88E3082 only) */
PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ │ PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ │ PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ │ PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ │ PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ │ PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
} │
next prev up linux/drivers/net/ethernet/sun/cassini.h:2367 │ linux/drivers/net/ethernet/sun/cassini.h:2287
│
{ "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, │ { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT,
S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000} , │ CL_REG, 0x3ff, 1, 0x0, 0x0000},
{ "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, │ { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
IM_CTL, 0x04a, 3, 0x0, 0xffff}, │ IM_CTL, 0x00a, 3, 0x0, 0xffff},
{ "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023, │ { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S3_CLNP, 1, S1_8023,
CL_REG, 0x000, 0, 0x0, 0x0000}, │ CL_REG, 0x000, 0, 0x0, 0x0000},
{ "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, │ { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
CL_REG, 0x000, 0, 0x0, 0x0000}, │ CL_REG, 0x000, 0, 0x0, 0x0000},
{ "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, │ { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S3_CLNP,
CL_REG, 0x000, 0, 0x0, 0x0000}, │ CL_REG, 0x000, 0, 0x0, 0x0000},
{ "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, │ { "LLCc?",0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S3_CLNP,
CL_REG, 0x000, 0, 0x0, 0x0000}, │ CL_REG, 0x000, 0, 0x0, 0x0000},
{ "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, │ { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
IM_SAP, 0x6AE, 3, 0x0, 0xffff}, │ LD_SAP, 0x100, 3, 0x0, 0xffff},
{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, │ { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S3_CLNP,
LD_SUM, 0x00a, 1, 0x0, 0x0000}, │ LD_SUM, 0x00a, 1, 0x0, 0x0000},
{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, │ { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S3_FRAG,
LD_LEN, 0x03e, 1, 0x0, 0xffff}, │ LD_LEN, 0x03e, 3, 0x0, 0xffff},
{ "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, │ { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S3_TCPSQ, 0, S3_CLNP,
LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ │ LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
{ "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, │ { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S3_IPV6c, 0, S3_CLNP,
LD_SUM, 0x015, 1, 0x0, 0x0000}, │ LD_SUM, 0x015, 1, 0x0, 0x0000},
{ "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, │ { "IPV6 cont?", 0xf000, 0x6000, OP_EQ, 3, S3_TCP64, 0, S3_CLNP,
IM_R1, 0x128, 1, 0x0, 0xffff}, │
{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, │
LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ │ LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, │ { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP,
LD_LEN, 0x03f, 1, 0x0, 0xffff}, │ LD_LEN, 0x03f, 1, 0x0, 0xffff},
{ "TCP seq", /* DADDR should point to dest port */ │ { "TCP seq", /* DADDR should point to dest port */
0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ, │ 0x0000, 0x0000, OP_EQ, 0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ,
0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ │ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
{ "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, │ { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHL, 0,
S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ │ S3_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
{ "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, │ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHc, 0, S3_TCPHc,
LD_R1, 0x205, 3, 0xB, 0xf000}, │ LD_R1, 0x205, 3, 0xB, 0xf000},
{ "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, │ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, │ LD_HDR, 0x0ff, 3, 0x0, 0xffff},
{ "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, │ { "IP4 Fragment", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
IM_SAP, 0x6AE, 3, 0x0, 0xffff} , │ LD_FID, 0x103, 3, 0x0, 0xffff}, /* FID IP4 src+dst */
{ "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, │ { "IP4 frag offset", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
│ LD_SEQ, 0x040, 1, 0xD, 0xfff8},
│ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
IM_CTL, 0x001, 3, 0x0, 0x0001}, │ IM_CTL, 0x001, 3, 0x0, 0x0001},
{ NULL }, │ { NULL },
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c:1333 │ linux/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c:1452
│
struct flow_dissector_key_ipv4_addrs key, mask; │ struct {
│ struct flow_dissector_key_basic bkey;
│ struct flow_dissector_key_ports key;
│ } __aligned(BITS_PER_LONG / 8) keys;
│ struct {
│ struct flow_dissector_key_basic bmask;
│ struct flow_dissector_key_ports mask;
│ } __aligned(BITS_PER_LONG / 8) masks;
unsigned long dummy_cookie = 0xdeadbeef; │ unsigned long dummy_cookie = 0xdeadbeef;
struct stmmac_packet_attrs attr = { }; │ struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector; │ struct flow_dissector *dissector;
struct flow_cls_offload *cls; │ struct flow_cls_offload *cls;
int ret, old_enable = 0; │ int ret, old_enable = 0;
struct flow_rule *rule; │ struct flow_rule *rule;
│
if (!tc_can_offload(priv->dev)) │ if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum) │ if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
if (priv->rss.enable) { │ if (priv->rss.enable) {
old_enable = priv->rss.enable; │ old_enable = priv->rss.enable;
priv->rss.enable = false; │ priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, NULL, │ stmmac_rss_configure(priv, priv->hw, NULL,
priv->plat->rx_queues_to_use); │ priv->plat->rx_queues_to_use);
} │ }
│
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); │ dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) { │ if (!dissector) {
ret = -ENOMEM; │ ret = -ENOMEM;
goto cleanup_rss; │ goto cleanup_rss;
} │ }
│
dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS); │ dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0; │ dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
│ dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
│ dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
│
cls = kzalloc(sizeof(*cls), GFP_KERNEL); │ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
if (!cls) { │ if (!cls) {
ret = -ENOMEM; │ ret = -ENOMEM;
goto cleanup_dissector; │ goto cleanup_dissector;
} │ }
│
cls->common.chain_index = 0; │ cls->common.chain_index = 0;
cls->command = FLOW_CLS_REPLACE; │ cls->command = FLOW_CLS_REPLACE;
cls->cookie = dummy_cookie; │ cls->cookie = dummy_cookie;
│
rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); │ rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
if (!rule) { │ if (!rule) {
ret = -ENOMEM; │ ret = -ENOMEM;
goto cleanup_cls; │ goto cleanup_cls;
} │ }
│
rule->match.dissector = dissector; │ rule->match.dissector = dissector;
rule->match.key = (void *)&key; │ rule->match.key = (void *)&keys;
rule->match.mask = (void *)&mask; │ rule->match.mask = (void *)&masks;
│
key.src = htonl(src); │ keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
key.dst = htonl(dst); │ keys.key.src = htons(src);
mask.src = src_mask; │ keys.key.dst = htons(dst);
mask.dst = dst_mask; │ masks.mask.src = src_mask;
│ masks.mask.dst = dst_mask;
│
cls->rule = rule; │ cls->rule = rule;
│
rule->action.entries[0].id = FLOW_ACTION_DROP; │ rule->action.entries[0].id = FLOW_ACTION_DROP;
rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY; │ rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
rule->action.num_entries = 1; │ rule->action.num_entries = 1;
│
attr.dst = priv->dev->dev_addr; │ attr.dst = priv->dev->dev_addr;
attr.ip_dst = dst; │ attr.tcp = !udp;
attr.ip_src = src; │ attr.sport = src;
│ attr.dport = dst;
│ attr.ip_dst = 0;
│
/* Shall receive packet */ │ /* Shall receive packet */
ret = __stmmac_test_loopback(priv, &attr); │ ret = __stmmac_test_loopback(priv, &attr);
if (ret) │ if (ret)
goto cleanup_rule; │ goto cleanup_rule;
│
ret = stmmac_tc_setup_cls(priv, priv, cls); │ ret = stmmac_tc_setup_cls(priv, priv, cls);
if (ret) │ if (ret)
goto cleanup_rule; │ goto cleanup_rule;
│
/* Shall NOT receive packet */ │ /* Shall NOT receive packet */
ret = __stmmac_test_loopback(priv, &attr); │ ret = __stmmac_test_loopback(priv, &attr);
ret = ret ? 0 : -EINVAL; │ ret = ret ? 0 : -EINVAL;
│
cls->command = FLOW_CLS_DESTROY; │ cls->command = FLOW_CLS_DESTROY;
stmmac_tc_setup_cls(priv, priv, cls); │ stmmac_tc_setup_cls(priv, priv, cls);
cleanup_rule: │ cleanup_rule:
kfree(rule); │ kfree(rule);
cleanup_cls: │ cleanup_cls:
kfree(cls); │ kfree(cls);
cleanup_dissector: │ cleanup_dissector:
kfree(dissector); │ kfree(dissector);
cleanup_rss: │ cleanup_rss:
if (old_enable) { │ if (old_enable) {
priv->rss.enable = old_enable; │ priv->rss.enable = old_enable;
stmmac_rss_configure(priv, priv->hw, &priv->rss, │ stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use); │ priv->plat->rx_queues_to_use);
} │ }
│
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c:771 │ linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c:9257
│
unsigned int page_shift, page_size, qpp_shift, qpp_mask; │ unsigned int page_shift, page_size, qpp_shift, qpp_mask;
u64 bar2_page_offset, bar2_qoffset; │ u64 bar2_page_offset, bar2_qoffset;
unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; │ unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
│
/* T4 doesn't support BAR2 SGE Queue registers. │ /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
*/ │ if (!user && is_t4(adapter->params.chip))
if (is_t4(adapter->params.chip)) │
return -EINVAL; │ return -EINVAL;
│
/* Get our SGE Page Size parameters. │ /* Get our SGE Page Size parameters.
*/ │ */
page_shift = adapter->params.sge.sge_vf_hps + 10; │ page_shift = adapter->params.sge.hps + 10;
page_size = 1 << page_shift; │ page_size = 1 << page_shift;
│
/* Get the right Queues per Page parameters for our Queue. │ /* Get the right Queues per Page parameters for our Queue.
*/ │ */
qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS │ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
? adapter->params.sge.sge_vf_eq_qpp │ ? adapter->params.sge.eq_qpp
: adapter->params.sge.sge_vf_iq_qpp); │ : adapter->params.sge.iq_qpp);
qpp_mask = (1 << qpp_shift) - 1; │ qpp_mask = (1 << qpp_shift) - 1;
│
/* Calculate the basics of the BAR2 SGE Queue register area: │ /* Calculate the basics of the BAR2 SGE Queue register area:
* o The BAR2 page the Queue registers will be in. │ * o The BAR2 page the Queue registers will be in.
* o The BAR2 Queue ID. │ * o The BAR2 Queue ID.
* o The BAR2 Queue ID Offset into the BAR2 page. │ * o The BAR2 Queue ID Offset into the BAR2 page.
*/ │ */
bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); │ bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
bar2_qid = qid & qpp_mask; │ bar2_qid = qid & qpp_mask;
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; │ bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
│
/* If the BAR2 Queue ID Offset is less than the Page Size, then the │ /* If the BAR2 Queue ID Offset is less than the Page Size, then the
* hardware will infer the Absolute Queue ID simply from the writes to │ * hardware will infer the Absolute Queue ID simply from the writes to
* the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a │ * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
* BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply │ * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
* write to the first BAR2 SGE Queue Area within the BAR2 Page with │ * write to the first BAR2 SGE Queue Area within the BAR2 Page with
* the BAR2 Queue ID and the hardware will infer the Absolute Queue ID │ * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
* from the BAR2 Page and BAR2 Queue ID. │ * from the BAR2 Page and BAR2 Queue ID.
* │ *
* One important censequence of this is that some BAR2 SGE registers │ * One important censequence of this is that some BAR2 SGE registers
* have a "Queue ID" field and we can write the BAR2 SGE Queue ID │ * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
* there. But other registers synthesize the SGE Queue ID purely │ * there. But other registers synthesize the SGE Queue ID purely
* from the writes to the registers -- the Write Combined Doorbell │ * from the writes to the registers -- the Write Combined Doorbell
* Buffer is a good example. These BAR2 SGE Registers are only │ * Buffer is a good example. These BAR2 SGE Registers are only
* available for those BAR2 SGE Register areas where the SGE Absolute │ * available for those BAR2 SGE Register areas where the SGE Absolute
* Queue ID can be inferred from simple writes. │ * Queue ID can be inferred from simple writes.
*/ │ */
bar2_qoffset = bar2_page_offset; │ bar2_qoffset = bar2_page_offset;
bar2_qinferred = (bar2_qid_offset < page_size); │ bar2_qinferred = (bar2_qid_offset < page_size);
if (bar2_qinferred) { │ if (bar2_qinferred) {
bar2_qoffset += bar2_qid_offset; │ bar2_qoffset += bar2_qid_offset;
bar2_qid = 0; │ bar2_qid = 0;
} │ }
│
*pbar2_qoffset = bar2_qoffset; │ *pbar2_qoffset = bar2_qoffset;
*pbar2_qid = bar2_qid; │ *pbar2_qid = bar2_qid;
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:1513 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:1501
│
struct efx_nic *efx = dev_id; │ struct ef4_nic *efx = dev_id;
bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); │ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr; │ ef4_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE; │ irqreturn_t result = IRQ_NONE;
struct efx_channel *channel; │ struct ef4_channel *channel;
efx_dword_t reg; │ ef4_dword_t reg;
u32 queues; │ u32 queues;
int syserr; │ int syserr;
│
/* Read the ISR which also ACKs the interrupts */ │ /* Read the ISR which also ACKs the interrupts */
efx_readd(efx, ®, FR_BZ_INT_ISR0); │ ef4_readd(efx, ®, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31); │ queues = EF4_EXTRACT_DWORD(reg, 0, 31);
│
/* Legacy interrupts are disabled too late by the EEH kernel │ /* Legacy interrupts are disabled too late by the EEH kernel
* code. Disable them earlier. │ * code. Disable them earlier.
* If an EEH error occurred, the read will have returned all ones. │ * If an EEH error occurred, the read will have returned all ones.
*/ │ */
if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && │ if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
!efx->eeh_disabled_legacy_irq) { │ !efx->eeh_disabled_legacy_irq) {
disable_irq_nosync(efx->legacy_irq); │ disable_irq_nosync(efx->legacy_irq);
efx->eeh_disabled_legacy_irq = true; │ efx->eeh_disabled_legacy_irq = true;
} │ }
│
/* Handle non-event-queue sources */ │ /* Handle non-event-queue sources */
if (queues & (1U << efx->irq_level) && soft_enabled) { │ if (queues & (1U << efx->irq_level) && soft_enabled) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); │ syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr)) │ if (unlikely(syserr))
return efx_farch_fatal_interrupt(efx); │ return ef4_farch_fatal_interrupt(efx);
efx->last_irq_cpu = raw_smp_processor_id(); │ efx->last_irq_cpu = raw_smp_processor_id();
} │ }
│
if (queues != 0) { │ if (queues != 0) {
efx->irq_zero_count = 0; │ efx->irq_zero_count = 0;
│
/* Schedule processing of any interrupting queues */ │ /* Schedule processing of any interrupting queues */
if (likely(soft_enabled)) { │ if (likely(soft_enabled)) {
efx_for_each_channel(channel, efx) { │ ef4_for_each_channel(channel, efx) {
if (queues & 1) │ if (queues & 1)
efx_schedule_channel_irq(channel); │ ef4_schedule_channel_irq(channel);
queues >>= 1; │ queues >>= 1;
} │ }
} │ }
result = IRQ_HANDLED; │ result = IRQ_HANDLED;
│
} else { │ } else {
efx_qword_t *event; │ ef4_qword_t *event;
│
/* Legacy ISR read can return zero once (SF bug 15783) */ │ /* Legacy ISR read can return zero once (SF bug 15783) */
│
/* We can't return IRQ_HANDLED more than once on seeing ISR=0 │ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
* because this might be a shared interrupt. */ │ * because this might be a shared interrupt. */
if (efx->irq_zero_count++ == 0) │ if (efx->irq_zero_count++ == 0)
result = IRQ_HANDLED; │ result = IRQ_HANDLED;
│
/* Ensure we schedule or rearm all event queues */ │ /* Ensure we schedule or rearm all event queues */
if (likely(soft_enabled)) { │ if (likely(soft_enabled)) {
efx_for_each_channel(channel, efx) { │ ef4_for_each_channel(channel, efx) {
event = efx_event(channel, │ event = ef4_event(channel,
channel->eventq_read_ptr); │ channel->eventq_read_ptr);
if (efx_event_present(event)) │ if (ef4_event_present(event))
efx_schedule_channel_irq(channel); │ ef4_schedule_channel_irq(channel);
else │ else
efx_farch_ev_read_ack(channel); │ ef4_farch_ev_read_ack(channel);
} │ }
} │ }
} │ }
│
if (result == IRQ_HANDLED) │ if (result == IRQ_HANDLED)
netif_vdbg(efx, intr, efx->net_dev, │ netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", │ "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); │ irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg));
│
return result; │ return result;
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c:399 │ linux/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c:478
│
reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i; │ reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i;
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); │ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M, │ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M,
DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512); │ DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S, 256);
dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M, │ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M,
DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0); │ DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0);
dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M, │ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M,
DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0); │ DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); │ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
│
reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i; │ reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i;
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); │ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M, │ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M,
DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0); │ DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0);
dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M, │ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M,
DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0); │ DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); │ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
│
reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i; │ reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i;
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); │ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, │ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M,
DSAF_SBM_CFG2_SET_BUF_NUM_S, 104); │ DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 104);
dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, │ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M,
DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128); │ DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 128);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); │ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
│
reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i; │ reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i;
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); │ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg, │ dsaf_set_field(o_sbm_bp_cfg,
DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, │ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); │ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 55);
dsaf_set_field(o_sbm_bp_cfg, │ dsaf_set_field(o_sbm_bp_cfg,
DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, │ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); │ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 110);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); │ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
│
/* for no enable pfc mode */ │ /* for no enable pfc mode */
reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i; │ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i;
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); │ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg, │ dsaf_set_field(o_sbm_bp_cfg,
DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, │ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128); │ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
dsaf_set_field(o_sbm_bp_cfg, │ dsaf_set_field(o_sbm_bp_cfg,
DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, │ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192); │ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); │ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:5120 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:5064
│
u8 reserved; │ u8 reserved;
u8 byte1; │ u8 byte1;
__le16 icid; │ __le16 msem_ctx_upd_seq;
u8 flags0; │ u8 flags0;
#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF │ #define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 │ #define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 │ #define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 │ #define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 │ #define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 │ #define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1; │ u8 flags1;
#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 │ #define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 │ #define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 │ #define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 │ #define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 │ #define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2; │ u8 flags2;
#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 │ #define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 │ #define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key; │ u8 key;
__le32 mw_cnt_or_qp_id; │ __le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq; │ u8 ref_cnt_seq;
u8 ctx_upd_seq; │ u8 ctx_upd_seq;
__le16 dif_flags; │ __le16 dif_flags;
__le16 tx_ref_count; │ __le16 tx_ref_count;
__le16 last_used_ltid; │ __le16 last_used_ltid;
__le16 parent_mr_lo; │ __le16 parent_mr_lo;
__le16 parent_mr_hi; │ __le16 parent_mr_hi;
__le32 fbo_lo; │ __le32 fbo_lo;
__le32 fbo_hi; │ __le32 fbo_hi;
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_main.c:1417 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:6331
│
u16 count = TXD_USE_COUNT(skb_headlen(skb)); │ struct igb_tx_buffer *first;
__be16 protocol = vlan_get_protocol(skb); │ int tso;
struct igc_tx_buffer *first; │
u32 tx_flags = 0; │ u32 tx_flags = 0;
unsigned short f; │ unsigned short f;
│ u16 count = TXD_USE_COUNT(skb_headlen(skb));
│ __be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0; │ u8 hdr_len = 0;
int tso = 0; │
│
/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, │ /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, │ * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head, │ * + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor, │ * + 1 desc for context descriptor,
* otherwise try next time │ * otherwise try next time
*/ │ */
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) │ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_frag_size( │ count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f])); │ &skb_shinfo(skb)->frags[f]));
│
if (igc_maybe_stop_tx(tx_ring, count + 3)) { │ if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */ │ /* this is a hard error */
return NETDEV_TX_BUSY; │ return NETDEV_TX_BUSY;
} │ }
│
/* record the location of the first descriptor for this packet */ │ /* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; │ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGC_TX_BUFFER_TYPE_SKB; │ first->type = IGB_TYPE_SKB;
first->skb = skb; │ first->skb = skb;
first->bytecount = skb->len; │ first->bytecount = skb->len;
first->gso_segs = 1; │ first->gso_segs = 1;
│
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { │ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); │ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
│
/* FIXME: add support for retrieving timestamps from │
* the other timer registers before skipping the │
* timestamping request. │
*/ │
if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && │ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
!test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, │ !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
&adapter->state)) { │ &adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; │ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP; │ tx_flags |= IGB_TX_FLAGS_TSTAMP;
│
adapter->ptp_tx_skb = skb_get(skb); │ adapter->ptp_tx_skb = skb_get(skb);
adapter->ptp_tx_start = jiffies; │ adapter->ptp_tx_start = jiffies;
│ if (adapter->hw.mac.type == e1000_82576)
│ schedule_work(&adapter->ptp_tx_work);
} else { │ } else {
adapter->tx_hwtstamp_skipped++; │ adapter->tx_hwtstamp_skipped++;
} │ }
} │ }
│
if (skb_vlan_tag_present(skb)) { │ if (skb_vlan_tag_present(skb)) {
tx_flags |= IGC_TX_FLAGS_VLAN; │ tx_flags |= IGB_TX_FLAGS_VLAN;
tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); │ tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
} │ }
│
/* record initial flags and protocol */ │ /* record initial flags and protocol */
first->tx_flags = tx_flags; │ first->tx_flags = tx_flags;
first->protocol = protocol; │ first->protocol = protocol;
│
tso = igc_tso(tx_ring, first, &hdr_len); │ tso = igb_tso(tx_ring, first, &hdr_len);
if (tso < 0) │ if (tso < 0)
goto out_drop; │ goto out_drop;
else if (!tso) │ else if (!tso)
igc_tx_csum(tx_ring, first); │ igb_tx_csum(tx_ring, first);
│
igc_tx_map(tx_ring, first, hdr_len); │ if (igb_tx_map(tx_ring, first, hdr_len))
│ goto cleanup_tx_tstamp;
│
return NETDEV_TX_OK; │ return NETDEV_TX_OK;
│
out_drop: │ out_drop:
dev_kfree_skb_any(first->skb); │ dev_kfree_skb_any(first->skb);
first->skb = NULL; │ first->skb = NULL;
│ cleanup_tx_tstamp:
│ if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
│ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
│
│ dev_kfree_skb_any(adapter->ptp_tx_skb);
│ adapter->ptp_tx_skb = NULL;
│ if (adapter->hw.mac.type == e1000_82576)
│ cancel_work_sync(&adapter->ptp_tx_work);
│ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
│ }
│
return NETDEV_TX_OK; │ return NETDEV_TX_OK;
} │
next prev up linux/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c:1515 │ linux/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c:1438
│
int j, ret; │ int j, ret;
u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; │ u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
u64 val; │
void __iomem *mem_crb; │ void __iomem *mem_crb;
│
/* Only 64-bit aligned access */ │ /* Only 64-bit aligned access */
if (off & 7) │ if (off & 7)
return -EIO; │ return -EIO;
│
/* P2 has different SIU and MIU test agent base addr */ │ /* P2 has different SIU and MIU test agent base addr */
if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, │ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
NETXEN_ADDR_QDR_NET_MAX_P2)) { │ NETXEN_ADDR_QDR_NET_MAX_P2)) {
mem_crb = pci_base_offset(adapter, │ mem_crb = pci_base_offset(adapter,
NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); │ NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
addr_hi = SIU_TEST_AGT_ADDR_HI; │ addr_hi = SIU_TEST_AGT_ADDR_HI;
data_lo = SIU_TEST_AGT_RDDATA_LO; │ data_lo = SIU_TEST_AGT_WRDATA_LO;
data_hi = SIU_TEST_AGT_RDDATA_HI; │ data_hi = SIU_TEST_AGT_WRDATA_HI;
off_lo = off & SIU_TEST_AGT_ADDR_MASK; │ off_lo = off & SIU_TEST_AGT_ADDR_MASK;
off_hi = SIU_TEST_AGT_UPPER_ADDR(off); │ off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
goto correct; │ goto correct;
} │ }
│
if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { │ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
mem_crb = pci_base_offset(adapter, │ mem_crb = pci_base_offset(adapter,
NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); │ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
addr_hi = MIU_TEST_AGT_ADDR_HI; │ addr_hi = MIU_TEST_AGT_ADDR_HI;
data_lo = MIU_TEST_AGT_RDDATA_LO; │ data_lo = MIU_TEST_AGT_WRDATA_LO;
data_hi = MIU_TEST_AGT_RDDATA_HI; │ data_hi = MIU_TEST_AGT_WRDATA_HI;
off_lo = off & MIU_TEST_AGT_ADDR_MASK; │ off_lo = off & MIU_TEST_AGT_ADDR_MASK;
off_hi = 0; │ off_hi = 0;
goto correct; │ goto correct;
} │ }
│
if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || │ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { │ ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
if (adapter->ahw.pci_len0 != 0) { │ if (adapter->ahw.pci_len0 != 0) {
return netxen_nic_pci_mem_access_direct(adapter, │ return netxen_nic_pci_mem_access_direct(adapter,
off, data, 0); │ off, &data, 1);
} │ }
} │ }
│
return -EIO; │ return -EIO;
│
correct: │ correct:
spin_lock(&adapter->ahw.mem_lock); │ spin_lock(&adapter->ahw.mem_lock);
netxen_nic_pci_set_crbwindow_128M(adapter, 0); │ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
│
writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); │ writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
writel(off_hi, (mem_crb + addr_hi)); │ writel(off_hi, (mem_crb + addr_hi));
writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); │ writel(data & 0xffffffff, (mem_crb + data_lo));
writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); │ writel((data >> 32) & 0xffffffff, (mem_crb + data_hi));
│ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
│ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
│ (mem_crb + TEST_AGT_CTRL));
│
for (j = 0; j < MAX_CTL_CHECK; j++) { │ for (j = 0; j < MAX_CTL_CHECK; j++) {
temp = readl(mem_crb + TEST_AGT_CTRL); │ temp = readl((mem_crb + TEST_AGT_CTRL));
if ((temp & TA_CTL_BUSY) == 0) │ if ((temp & TA_CTL_BUSY) == 0)
break; │ break;
} │ }
│
if (j >= MAX_CTL_CHECK) { │ if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit()) │ if (printk_ratelimit())
dev_err(&adapter->pdev->dev, │ dev_err(&adapter->pdev->dev,
"failed to read through agent\n"); │ "failed to write through agent\n");
ret = -EIO; │ ret = -EIO;
} else { │ } else
│
temp = readl(mem_crb + data_hi); │
val = ((u64)temp << 32); │
val |= readl(mem_crb + data_lo); │
*data = val; │
ret = 0; │ ret = 0;
} │
│
netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); │ netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
spin_unlock(&adapter->ahw.mem_lock); │ spin_unlock(&adapter->ahw.mem_lock);
│
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_txrx.c:2125 │ linux/drivers/net/ethernet/intel/i40e/i40e_txrx.c:3400
│
const skb_frag_t *frag, *stale; │ const skb_frag_t *frag, *stale;
int nr_frags, sum; │ int nr_frags, sum;
│
/* no need to check if number of frags is less than 7 */ │ /* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags; │ nr_frags = skb_shinfo(skb)->nr_frags;
if (nr_frags < (ICE_MAX_BUF_TXD - 1)) │ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
return false; │ return false;
│
/* We need to walk through the list and validate that each group │ /* We need to walk through the list and validate that each group
* of 6 fragments totals at least gso_size. │ * of 6 fragments totals at least gso_size.
*/ │ */
nr_frags -= ICE_MAX_BUF_TXD - 2; │ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0]; │ frag = &skb_shinfo(skb)->frags[0];
│
/* Initialize size to the negative value of gso_size minus 1. We │ /* Initialize size to the negative value of gso_size minus 1. We
* use this as the worst case scenario in which the frag ahead │ * use this as the worst case scenerio in which the frag ahead
* of us only provides one byte which is why we are limited to 6 │ * of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous │ * descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors. │ * fragment are already consuming 2 descriptors.
*/ │ */
sum = 1 - skb_shinfo(skb)->gso_size; │ sum = 1 - skb_shinfo(skb)->gso_size;
│
/* Add size of frags 0 through 4 to create our initial sum */ │ /* Add size of frags 0 through 4 to create our initial sum */
sum += skb_frag_size(frag++); │ sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++); │ sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++); │ sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++); │ sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++); │ sum += skb_frag_size(frag++);
│
/* Walk through fragments adding latest fragment, testing it, and │ /* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum. │ * then removing stale fragments from the sum.
*/ │ */
for (stale = &skb_shinfo(skb)->frags[0];; stale++) { │ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
int stale_size = skb_frag_size(stale); │ int stale_size = skb_frag_size(stale);
│
sum += skb_frag_size(frag++); │ sum += skb_frag_size(frag++);
│
/* The stale fragment may present us with a smaller │ /* The stale fragment may present us with a smaller
* descriptor than the actual fragment size. To account │ * descriptor than the actual fragment size. To account
* for that we need to remove all the data on the front and │ * for that we need to remove all the data on the front and
* figure out what the remainder would be in the last │ * figure out what the remainder would be in the last
* descriptor associated with the fragment. │ * descriptor associated with the fragment.
*/ │ */
if (stale_size > ICE_MAX_DATA_PER_TXD) { │ if (stale_size > I40E_MAX_DATA_PER_TXD) {
int align_pad = -(skb_frag_off(stale)) & │ int align_pad = -(skb_frag_off(stale)) &
(ICE_MAX_READ_REQ_SIZE - 1); │ (I40E_MAX_READ_REQ_SIZE - 1);
│
sum -= align_pad; │ sum -= align_pad;
stale_size -= align_pad; │ stale_size -= align_pad;
│
do { │ do {
sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; │ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; │ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
} while (stale_size > ICE_MAX_DATA_PER_TXD); │ } while (stale_size > I40E_MAX_DATA_PER_TXD);
} │ }
│
/* if sum is negative we failed to make sufficient progress */ │ /* if sum is negative we failed to make sufficient progress */
if (sum < 0) │ if (sum < 0)
return true; │ return true;
│
if (!nr_frags--) │ if (!nr_frags--)
break; │ break;
│
sum -= stale_size; │ sum -= stale_size;
} │ }
│
return false; │ return false;
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/selftest.c:691 │ linux/drivers/net/ethernet/sfc/selftest.c:689
│
enum ef4_loopback_mode loopback_mode = efx->loopback_mode; │ enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode; │ int phy_mode = efx->phy_mode;
int rc_test = 0, rc_reset, rc; │ int rc_test = 0, rc_reset, rc;
│
ef4_selftest_async_cancel(efx); │ efx_selftest_async_cancel(efx);
│
/* Online (i.e. non-disruptive) testing │ /* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */ │ * This checks interrupt generation, event delivery and PHY presence. */
│
rc = ef4_test_phy_alive(efx, tests); │ rc = efx_test_phy_alive(efx, tests);
if (rc && !rc_test) │ if (rc && !rc_test)
rc_test = rc; │ rc_test = rc;
│
rc = ef4_test_nvram(efx, tests); │ rc = efx_test_nvram(efx, tests);
if (rc && !rc_test) │ if (rc && !rc_test)
rc_test = rc; │ rc_test = rc;
│
rc = ef4_test_interrupts(efx, tests); │ rc = efx_test_interrupts(efx, tests);
if (rc && !rc_test) │ if (rc && !rc_test)
rc_test = rc; │ rc_test = rc;
│
rc = ef4_test_eventq_irq(efx, tests); │ rc = efx_test_eventq_irq(efx, tests);
if (rc && !rc_test) │ if (rc && !rc_test)
rc_test = rc; │ rc_test = rc;
│
if (rc_test) │ if (rc_test)
return rc_test; │ return rc_test;
│
if (!(flags & ETH_TEST_FL_OFFLINE)) │ if (!(flags & ETH_TEST_FL_OFFLINE))
return ef4_test_phy(efx, tests, flags); │ return efx_test_phy(efx, tests, flags);
│
/* Offline (i.e. disruptive) testing │ /* Offline (i.e. disruptive) testing
* This checks MAC and PHY loopback on the specified port. */ │ * This checks MAC and PHY loopback on the specified port. */
│
/* Detach the device so the kernel doesn't transmit during the │ /* Detach the device so the kernel doesn't transmit during the
* loopback test and the watchdog timeout doesn't fire. │ * loopback test and the watchdog timeout doesn't fire.
*/ │ */
ef4_device_detach_sync(efx); │ efx_device_detach_sync(efx);
│
if (efx->type->test_chip) { │ if (efx->type->test_chip) {
rc_reset = efx->type->test_chip(efx, tests); │ rc_reset = efx->type->test_chip(efx, tests);
if (rc_reset) { │ if (rc_reset) {
netif_err(efx, hw, efx->net_dev, │ netif_err(efx, hw, efx->net_dev,
"Unable to recover from chip test\n"); │ "Unable to recover from chip test\n");
ef4_schedule_reset(efx, RESET_TYPE_DISABLE); │ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset; │ return rc_reset;
} │ }
│
if ((tests->memory < 0 || tests->registers < 0) && !rc_test) │ if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
rc_test = -EIO; │ rc_test = -EIO;
} │ }
│
/* Ensure that the phy is powered and out of loopback │ /* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */ │ * for the bist and loopback tests */
mutex_lock(&efx->mac_lock); │ mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER; │ efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE; │ efx->loopback_mode = LOOPBACK_NONE;
__ef4_reconfigure_port(efx); │ __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock); │ mutex_unlock(&efx->mac_lock);
│
rc = ef4_test_phy(efx, tests, flags); │ rc = efx_test_phy(efx, tests, flags);
if (rc && !rc_test) │ if (rc && !rc_test)
rc_test = rc; │ rc_test = rc;
│
rc = ef4_test_loopbacks(efx, tests, efx->loopback_modes); │ rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
if (rc && !rc_test) │ if (rc && !rc_test)
rc_test = rc; │ rc_test = rc;
│
/* restore the PHY to the previous state */ │ /* restore the PHY to the previous state */
mutex_lock(&efx->mac_lock); │ mutex_lock(&efx->mac_lock);
efx->phy_mode = phy_mode; │ efx->phy_mode = phy_mode;
efx->loopback_mode = loopback_mode; │ efx->loopback_mode = loopback_mode;
__ef4_reconfigure_port(efx); │ __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock); │ mutex_unlock(&efx->mac_lock);
│
netif_device_attach(efx->net_dev); │ efx_device_attach_if_not_resetting(efx);
│
return rc_test; │ return rc_test;
} │
next prev up linux/drivers/net/ethernet/amd/a2065.c:321 │ linux/drivers/net/ethernet/amd/7990.c:353
│
struct lance_private *lp = netdev_priv(dev); │ struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block; │ volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_regs *ll = lp->ll; │
volatile struct lance_tx_desc *td; │ volatile struct lance_tx_desc *td;
int i, j; │ int i, j;
int status; │ int status;
│
│ #ifdef CONFIG_HP300
│ blinken_leds(0x80, 0);
│ #endif
/* csr0 is 2f3 */ │ /* csr0 is 2f3 */
ll->rdp = LE_C0_TINT | LE_C0_INEA; │ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
/* csr0 is 73 */ │ /* csr0 is 73 */
│
j = lp->tx_old; │ j = lp->tx_old;
for (i = j; i != lp->tx_new; i = j) { │ for (i = j; i != lp->tx_new; i = j) {
td = &ib->btx_ring[i]; │ td = &ib->btx_ring[i];
│
/* If we hit a packet not owned by us, stop */ │ /* If we hit a packet not owned by us, stop */
if (td->tmd1_bits & LE_T1_OWN) │ if (td->tmd1_bits & LE_T1_OWN)
break; │ break;
│
if (td->tmd1_bits & LE_T1_ERR) { │ if (td->tmd1_bits & LE_T1_ERR) {
status = td->misc; │ status = td->misc;
│
dev->stats.tx_errors++; │ dev->stats.tx_errors++;
if (status & LE_T3_RTY) │ if (status & LE_T3_RTY)
dev->stats.tx_aborted_errors++; │ dev->stats.tx_aborted_errors++;
if (status & LE_T3_LCOL) │ if (status & LE_T3_LCOL)
dev->stats.tx_window_errors++; │ dev->stats.tx_window_errors++;
│
if (status & LE_T3_CLOS) { │ if (status & LE_T3_CLOS) {
dev->stats.tx_carrier_errors++; │ dev->stats.tx_carrier_errors++;
if (lp->auto_select) { │ if (lp->auto_select) {
lp->tpe = 1 - lp->tpe; │ lp->tpe = 1 - lp->tpe;
netdev_err(dev, "Carrier Lost, trying %s\n", │ printk("%s: Carrier Lost, trying %s\n",
lp->tpe ? "TPE" : "AUI"); │ dev->name,
│ lp->tpe ? "TPE" : "AUI");
/* Stop the lance */ │ /* Stop the lance */
ll->rap = LE_CSR0; │ WRITERAP(lp, LE_CSR0);
ll->rdp = LE_C0_STOP; │ WRITERDP(lp, LE_C0_STOP);
lance_init_ring(dev); │ lance_init_ring(dev);
load_csrs(lp); │ load_csrs(lp);
init_restart_lance(lp); │ init_restart_lance(lp);
return 0; │ return 0;
} │ }
} │ }
│
/* buffer errors and underflows turn off │ /* buffer errors and underflows turn off the transmitter */
* the transmitter, so restart the adapter │ /* Restart the adapter */
*/ │ if (status & (LE_T3_BUF|LE_T3_UFL)) {
if (status & (LE_T3_BUF | LE_T3_UFL)) { │
dev->stats.tx_fifo_errors++; │ dev->stats.tx_fifo_errors++;
│
netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n"); │ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
│ dev->name);
/* Stop the lance */ │ /* Stop the lance */
ll->rap = LE_CSR0; │ WRITERAP(lp, LE_CSR0);
ll->rdp = LE_C0_STOP; │ WRITERDP(lp, LE_C0_STOP);
lance_init_ring(dev); │ lance_init_ring(dev);
load_csrs(lp); │ load_csrs(lp);
init_restart_lance(lp); │ init_restart_lance(lp);
return 0; │ return 0;
} │ }
} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { │ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
/* So we don't count the packet more than once. */ │ /*
│ * So we don't count the packet more than once.
│ */
td->tmd1_bits &= ~(LE_T1_POK); │ td->tmd1_bits &= ~(LE_T1_POK);
│
/* One collision before packet was sent. */ │ /* One collision before packet was sent. */
if (td->tmd1_bits & LE_T1_EONE) │ if (td->tmd1_bits & LE_T1_EONE)
dev->stats.collisions++; │ dev->stats.collisions++;
│
/* More than one collision, be optimistic. */ │ /* More than one collision, be optimistic. */
if (td->tmd1_bits & LE_T1_EMORE) │ if (td->tmd1_bits & LE_T1_EMORE)
dev->stats.collisions += 2; │ dev->stats.collisions += 2;
│
dev->stats.tx_packets++; │ dev->stats.tx_packets++;
} │ }
│
j = (j + 1) & lp->tx_ring_mod_mask; │ j = (j + 1) & lp->tx_ring_mod_mask;
} │ }
lp->tx_old = j; │ lp->tx_old = j;
ll->rdp = LE_C0_TINT | LE_C0_INEA; │ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/mac.c:247 │ linux/drivers/net/ethernet/intel/igb/e1000_mac.c:427
│
u32 hash_value, hash_mask; │ u32 hash_value, hash_mask;
u8 bit_shift = 0; │ u8 bit_shift = 0;
│
/* Register count multiplied by bits per register */ │ /* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1; │ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
│
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts │ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask. │ * where 0xFF would still fall within the hash mask.
*/ │ */
while (hash_mask >> bit_shift != 0xFF) │ while (hash_mask >> bit_shift != 0xFF)
bit_shift++; │ bit_shift++;
│
/* The portion of the address that is used for the hash table │ /* The portion of the address that is used for the hash table
* is determined by the mc_filter_type setting. │ * is determined by the mc_filter_type setting.
* The algorithm is such that there is a total of 8 bits of shifting. │ * The algorithm is such that there is a total of 8 bits of shifting.
* The bit_shift for a mc_filter_type of 0 represents the number of │ * The bit_shift for a mc_filter_type of 0 represents the number of
* left-shifts where the MSB of mc_addr[5] would still fall within │ * left-shifts where the MSB of mc_addr[5] would still fall within
* the hash_mask. Case 0 does this exactly. Since there are a total │ * the hash_mask. Case 0 does this exactly. Since there are a total
* of 8 bits of shifting, then mc_addr[4] will shift right the │ * of 8 bits of shifting, then mc_addr[4] will shift right the
* remaining number of bits. Thus 8 - bit_shift. The rest of the │ * remaining number of bits. Thus 8 - bit_shift. The rest of the
* cases are a variation of this algorithm...essentially raising the │ * cases are a variation of this algorithm...essentially raising the
* number of bits to shift mc_addr[5] left, while still keeping the │ * number of bits to shift mc_addr[5] left, while still keeping the
* 8-bit shifting total. │ * 8-bit shifting total.
* │ *
* For example, given the following Destination MAC Address and an │ * For example, given the following Destination MAC Address and an
* mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), │ * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
* we can see that the bit_shift for case 0 is 4. These are the hash │ * we can see that the bit_shift for case 0 is 4. These are the hash
* values resulting from each mc_filter_type... │ * values resulting from each mc_filter_type...
* [0] [1] [2] [3] [4] [5] │ * [0] [1] [2] [3] [4] [5]
* 01 AA 00 12 34 56 │ * 01 AA 00 12 34 56
* LSB MSB │ * LSB MSB
* │ *
* case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 │ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
* case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 │ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
* case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 │ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
* case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 │ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
*/ │ */
switch (hw->mac.mc_filter_type) { │ switch (hw->mac.mc_filter_type) {
default: │ default:
case 0: │ case 0:
break; │ break;
case 1: │ case 1:
bit_shift += 1; │ bit_shift += 1;
break; │ break;
case 2: │ case 2:
bit_shift += 2; │ bit_shift += 2;
break; │ break;
case 3: │ case 3:
bit_shift += 4; │ bit_shift += 4;
break; │ break;
} │ }
│
hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | │ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
(((u16)mc_addr[5]) << bit_shift))); │ (((u16) mc_addr[5]) << bit_shift)));
│
return hash_value; │ return hash_value;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c:1980 │ linux/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c:2137
│
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS │ #ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
#endif │ #endif
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, │ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
} │
next prev up linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h:606 │ linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h:459
│
u64 ena : 1; │ u64 ena : 1;
u64 qint_idx : 6; │ u64 qint_idx : 6;
u64 substream : 20; │ u64 substream : 20;
u64 sdp_mcast : 1; │ u64 sdp_mcast : 1;
u64 cq : 20; │ u64 cq : 20;
u64 sqe_way_mask : 16; │ u64 sqe_way_mask : 16;
u64 smq : 9; │ u64 smq : 10; /* W1 */
u64 cq_ena : 1; │ u64 cq_ena : 1;
u64 xoff : 1; │ u64 xoff : 1;
u64 sso_ena : 1; │ u64 sso_ena : 1;
u64 smq_rr_quantum : 24; │ u64 smq_rr_weight : 14;
u64 default_chan : 12; │ u64 default_chan : 12;
u64 sqb_count : 16; │ u64 sqb_count : 16;
u64 smq_rr_count : 25; │ u64 rsvd_120_119 : 2;
│ u64 smq_rr_count_lb : 7;
│ u64 smq_rr_count_ub : 25; /* W2 */
u64 sqb_aura : 20; │ u64 sqb_aura : 20;
u64 sq_int : 8; │ u64 sq_int : 8;
u64 sq_int_ena : 8; │ u64 sq_int_ena : 8;
u64 sqe_stype : 2; │ u64 sqe_stype : 2;
u64 rsvd_191 : 1; │ u64 rsvd_191 : 1;
u64 max_sqe_size : 2; │ u64 max_sqe_size : 2; /* W3 */
u64 cq_limit : 8; │ u64 cq_limit : 8;
u64 lmt_dis : 1; │ u64 lmt_dis : 1;
u64 mnq_dis : 1; │ u64 mnq_dis : 1;
u64 smq_next_sq : 20; │ u64 smq_next_sq : 20;
u64 smq_lso_segnum : 8; │ u64 smq_lso_segnum : 8;
u64 tail_offset : 6; │ u64 tail_offset : 6;
u64 smenq_offset : 6; │ u64 smenq_offset : 6;
u64 head_offset : 6; │ u64 head_offset : 6;
u64 smenq_next_sqb_vld : 1; │ u64 smenq_next_sqb_vld : 1;
u64 smq_pend : 1; │ u64 smq_pend : 1;
u64 smq_next_sq_vld : 1; │ u64 smq_next_sq_vld : 1;
u64 rsvd_255_253 : 3; │ u64 rsvd_255_253 : 3;
u64 next_sqb : 64;/* W4 */ │ u64 next_sqb : 64; /* W4 */
u64 tail_sqb : 64;/* W5 */ │ u64 tail_sqb : 64; /* W5 */
u64 smenq_sqb : 64;/* W6 */ │ u64 smenq_sqb : 64; /* W6 */
u64 smenq_next_sqb : 64;/* W7 */ │ u64 smenq_next_sqb : 64; /* W7 */
u64 head_sqb : 64;/* W8 */ │ u64 head_sqb : 64; /* W8 */
u64 rsvd_583_576 : 8; │ u64 rsvd_583_576 : 8; /* W9 */
u64 vfi_lso_total : 18; │ u64 vfi_lso_total : 18;
u64 vfi_lso_sizem1 : 3; │ u64 vfi_lso_sizem1 : 3;
u64 vfi_lso_sb : 8; │ u64 vfi_lso_sb : 8;
u64 vfi_lso_mps : 14; │ u64 vfi_lso_mps : 14;
u64 vfi_lso_vlan0_ins_ena : 1; │ u64 vfi_lso_vlan0_ins_ena : 1;
u64 vfi_lso_vlan1_ins_ena : 1; │ u64 vfi_lso_vlan1_ins_ena : 1;
u64 vfi_lso_vld : 1; │ u64 vfi_lso_vld : 1;
u64 rsvd_639_630 : 10; │ u64 rsvd_639_630 : 10;
u64 scm_lso_rem : 18; │ u64 scm_lso_rem : 18; /* W10 */
u64 rsvd_703_658 : 46; │ u64 rsvd_703_658 : 46;
u64 octs : 48; │ u64 octs : 48; /* W11 */
u64 rsvd_767_752 : 16; │ u64 rsvd_767_752 : 16;
u64 pkts : 48; │ u64 pkts : 48; /* W12 */
u64 rsvd_831_816 : 16; │ u64 rsvd_831_816 : 16;
u64 rsvd_895_832 : 64;/* W13 */ │ u64 rsvd_895_832 : 64; /* W13 */
u64 dropped_octs : 48; │ u64 dropped_octs : 48;
u64 rsvd_959_944 : 16; │ u64 rsvd_959_944 : 16;
u64 dropped_pkts : 48; │ u64 dropped_pkts : 48;
u64 rsvd_1023_1008 : 16; │ u64 rsvd_1023_1008 : 16;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:1813 │ linux/drivers/net/ethernet/intel/igb/e1000_phy.c:1840
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; │ s32 ret_val = 0;
u16 phy_data, i, agc_value = 0; │ u16 phy_data, i, agc_value = 0;
u16 cur_agc_index, max_agc_index = 0; │ u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; │ u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { │ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
IGP02E1000_PHY_AGC_A, │ IGP02E1000_PHY_AGC_A,
IGP02E1000_PHY_AGC_B, │ IGP02E1000_PHY_AGC_B,
IGP02E1000_PHY_AGC_C, │ IGP02E1000_PHY_AGC_C,
IGP02E1000_PHY_AGC_D │ IGP02E1000_PHY_AGC_D
}; │ };
│
/* Read the AGC registers for all channels */ │ /* Read the AGC registers for all channels */
for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { │ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); │ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
/* Getting bits 15:9, which represent the combination of │ /* Getting bits 15:9, which represent the combination of
* coarse and fine gain values. The result is a number │ * coarse and fine gain values. The result is a number
* that can be put into the lookup table to obtain the │ * that can be put into the lookup table to obtain the
* approximate cable length. │ * approximate cable length.
*/ │ */
cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & │ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
IGP02E1000_AGC_LENGTH_MASK); │ IGP02E1000_AGC_LENGTH_MASK;
│
/* Array index bound check. */ │ /* Array index bound check. */
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || │ if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
(cur_agc_index == 0)) │ (cur_agc_index == 0)) {
return -E1000_ERR_PHY; │ ret_val = -E1000_ERR_PHY;
│ goto out;
│ }
│
/* Remove min & max AGC values from calculation. */ │ /* Remove min & max AGC values from calculation. */
if (e1000_igp_2_cable_length_table[min_agc_index] > │ if (e1000_igp_2_cable_length_table[min_agc_index] >
e1000_igp_2_cable_length_table[cur_agc_index]) │ e1000_igp_2_cable_length_table[cur_agc_index])
min_agc_index = cur_agc_index; │ min_agc_index = cur_agc_index;
if (e1000_igp_2_cable_length_table[max_agc_index] < │ if (e1000_igp_2_cable_length_table[max_agc_index] <
e1000_igp_2_cable_length_table[cur_agc_index]) │ e1000_igp_2_cable_length_table[cur_agc_index])
max_agc_index = cur_agc_index; │ max_agc_index = cur_agc_index;
│
agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; │ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
} │ }
│
agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + │ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
e1000_igp_2_cable_length_table[max_agc_index]); │ e1000_igp_2_cable_length_table[max_agc_index]);
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); │ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
│
/* Calculate cable length with the error range of +/- 10 meters. */ │ /* Calculate cable length with the error range of +/- 10 meters. */
phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? │ phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
(agc_value - IGP02E1000_AGC_RANGE) : 0); │ (agc_value - IGP02E1000_AGC_RANGE) : 0;
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; │ phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
│
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; │ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
│
return 0; │ out:
│ return ret_val;
} │
next prev up linux/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c:309 │ linux/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c:250
│
void __iomem *rb; │ void __iomem *rb;
int port = bfa_ioc_portid(ioc); │ int pcifn = bfa_ioc_pcifn(ioc);
│
rb = bfa_ioc_bar0(ioc); │ rb = bfa_ioc_bar0(ioc);
│
ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; │ ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; │ ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; │ ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; │
ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; │ if (ioc->port_id == 0) {
ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; │ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
│ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
if (port == 0) { │ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; │ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; │ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; │
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; │ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; │ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else { │ } else {
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; │ ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; │ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; │ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
│ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
│ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; │ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; │ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
} │ }
│
/* │ /*
* PSS control registers │ * PSS control registers
*/ │ */
ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; │ ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; │ ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; │ ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; │ ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
│
/* │ /*
* IOC semaphore registers and serialization │ * IOC semaphore registers and serialization
*/ │ */
ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; │ ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; │ ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; │ ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; │ ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; │ ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
│
/** │ /**
* sram memory access │ * sram memory access
*/ │ */
ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; │ ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; │ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
│
/* │ /*
* err set reg : for notification of hb failure in fcmode │ * err set reg : for notification of hb failure in fcmode
*/ │ */
ioc->ioc_regs.err_set = rb + ERR_SET_REG; │ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
} │
next prev up linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c:2742 │ linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c:2667
│
u32 tmp; │ u32 val;
u8 i; │ u16 i;
int rc = 0; │ int rc = 0;
u32 chip_id; │ u32 chip_id;
if (phy->flags & FLAGS_MDC_MDIO_WA_G) { │ if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | │ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); │ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); │ bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
} │ }
│
if (phy->flags & FLAGS_MDC_MDIO_WA_B0) │ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, │ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB); │ EMAC_MDIO_STATUS_10MB);
│
/* Address */ │ /* Address */
tmp = ((phy->addr << 21) | (devad << 16) | reg | │ val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS | │ EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY); │ EMAC_MDIO_COMM_START_BUSY);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); │ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
│
for (i = 0; i < 50; i++) { │ for (i = 0; i < 50; i++) {
udelay(10); │ udelay(10);
│
tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); │ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { │ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5); │ udelay(5);
break; │ break;
} │ }
} │ }
if (tmp & EMAC_MDIO_COMM_START_BUSY) { │ if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n"); │ DP(NETIF_MSG_LINK, "read phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n"); │ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
│ *ret_val = 0;
rc = -EFAULT; │ rc = -EFAULT;
} else { │ } else {
/* Data */ │ /* Data */
tmp = ((phy->addr << 21) | (devad << 16) | val | │ val = ((phy->addr << 21) | (devad << 16) |
EMAC_MDIO_COMM_COMMAND_WRITE_45 | │ EMAC_MDIO_COMM_COMMAND_READ_45 |
EMAC_MDIO_COMM_START_BUSY); │ EMAC_MDIO_COMM_START_BUSY);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); │ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
│
for (i = 0; i < 50; i++) { │ for (i = 0; i < 50; i++) {
udelay(10); │ udelay(10);
│
tmp = REG_RD(bp, phy->mdio_ctrl + │ val = REG_RD(bp, phy->mdio_ctrl +
EMAC_REG_EMAC_MDIO_COMM); │ EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { │ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5); │ *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
break; │ break;
} │ }
} │ }
if (tmp & EMAC_MDIO_COMM_START_BUSY) { │ if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n"); │ DP(NETIF_MSG_LINK, "read phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n"); │ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
│ *ret_val = 0;
rc = -EFAULT; │ rc = -EFAULT;
} │ }
} │ }
/* Work around for E3 A0 */ │ /* Work around for E3 A0 */
if (phy->flags & FLAGS_MDC_MDIO_WA) { │ if (phy->flags & FLAGS_MDC_MDIO_WA) {
phy->flags ^= FLAGS_DUMMY_READ; │ phy->flags ^= FLAGS_DUMMY_READ;
if (phy->flags & FLAGS_DUMMY_READ) { │ if (phy->flags & FLAGS_DUMMY_READ) {
u16 temp_val; │ u16 temp_val;
bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); │ bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
} │ }
} │ }
│
if (phy->flags & FLAGS_MDC_MDIO_WA_B0) │ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, │ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB); │ EMAC_MDIO_STATUS_10MB);
return rc; │ return rc;
} │
next prev up linux/drivers/net/ethernet/marvell/skge.h:1618 │ linux/drivers/net/ethernet/marvell/sky2.h:1631
│
GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ │ GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ │ GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ │ GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ │ GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ │ GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ │ GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ │ GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
/* Source Address Registers */ │ /* Source Address Registers */
GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ │ GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ │ GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ │ GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ │ GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ │ GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ │ GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
│
/* Multicast Address Hash Registers */ │ /* Multicast Address Hash Registers */
GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ │ GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ │ GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ │ GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ │ GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
│
/* Interrupt Source Registers */ │ /* Interrupt Source Registers */
GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ │ GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ │ GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ │ GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
│
/* Interrupt Mask Registers */ │ /* Interrupt Mask Registers */
GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ │ GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ │ GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ │ GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
│
/* Serial Management Interface (SMI) Registers */ │ /* Serial Management Interface (SMI) Registers */
GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ │ GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ │ GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ │ GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
│ /* MIB Counters */
│ GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */
│ GM_MIB_CNT_END = 0x025C, /* Last MIB counter */
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_debug.c:4285 │ linux/drivers/net/ethernet/qlogic/qed/qed_debug.c:4355
│
u32 dwords_read, size_param_offset, offset = 0, addr, len; │ u32 dwords_read, size_param_offset, offset = 0, addr, len;
bool fifo_has_data; │ bool fifo_has_data;
│
*num_dumped_dwords = 0; │ *num_dumped_dwords = 0;
│
/* Dump global params */ │ /* Dump global params */
offset += qed_dump_common_global_params(p_hwfn, │ offset += qed_dump_common_global_params(p_hwfn,
p_ptt, │ p_ptt,
dump_buf + offset, dump, 1); │ dump_buf + offset, dump, 1);
offset += qed_dump_str_param(dump_buf + offset, │ offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "reg-fifo"); │ dump, "dump-type", "igu-fifo");
│
/* Dump fifo data section header and param. The size param is 0 for │ /* Dump fifo data section header and param. The size param is 0 for
* now, and is overwritten after reading the FIFO. │ * now, and is overwritten after reading the FIFO.
*/ │ */
offset += qed_dump_section_hdr(dump_buf + offset, │ offset += qed_dump_section_hdr(dump_buf + offset,
dump, "reg_fifo_data", 1); │ dump, "igu_fifo_data", 1);
size_param_offset = offset; │ size_param_offset = offset;
offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); │ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
│
if (!dump) { │ if (!dump) {
/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to │ /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
* test how much data is available, except for reading it. │ * test how much data is available, except for reading it.
*/ │ */
offset += REG_FIFO_DEPTH_DWORDS; │ offset += IGU_FIFO_DEPTH_DWORDS;
goto out; │ goto out;
} │ }
│
fifo_has_data = qed_rd(p_hwfn, p_ptt, │ fifo_has_data = qed_rd(p_hwfn, p_ptt,
GRC_REG_TRACE_FIFO_VALID_DATA) > 0; │ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
│
/* Pull available data from fifo. Use DMAE since this is widebus memory │ /* Pull available data from fifo. Use DMAE since this is widebus memory
* and must be accessed atomically. Test for dwords_read not passing │ * and must be accessed atomically. Test for dwords_read not passing
* buffer size since more entries could be added to the buffer as we are │ * buffer size since more entries could be added to the buffer as we are
* emptying it. │ * emptying it.
*/ │ */
addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO); │ addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
len = REG_FIFO_ELEMENT_DWORDS; │ len = IGU_FIFO_ELEMENT_DWORDS;
for (dwords_read = 0; │ for (dwords_read = 0;
fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; │ fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
dwords_read += REG_FIFO_ELEMENT_DWORDS) { │ dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
offset += qed_grc_dump_addr_range(p_hwfn, │ offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt, │ p_ptt,
dump_buf + offset, │ dump_buf + offset,
true, │ true,
addr, │ addr,
len, │ len,
true, SPLIT_TYPE_NONE, │ true, SPLIT_TYPE_NONE,
0); │ 0);
fifo_has_data = qed_rd(p_hwfn, p_ptt, │ fifo_has_data = qed_rd(p_hwfn, p_ptt,
GRC_REG_TRACE_FIFO_VALID_DATA) > 0; │ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
} │ }
│
qed_dump_num_param(dump_buf + size_param_offset, dump, "size", │ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
dwords_read); │ dwords_read);
out: │ out:
/* Dump last section */ │ /* Dump last section */
offset += qed_dump_last_section(dump_buf, offset, dump); │ offset += qed_dump_last_section(dump_buf, offset, dump);
│
*num_dumped_dwords = offset; │ *num_dumped_dwords = offset;
│
return DBG_STATUS_OK; │ return DBG_STATUS_OK;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:32 │ linux/drivers/net/ethernet/intel/igc/igc_ethtool.c:25
│
IGB_STAT("rx_packets", stats.gprc), │ IGC_STAT("rx_packets", stats.gprc),
IGB_STAT("tx_packets", stats.gptc), │ IGC_STAT("tx_packets", stats.gptc),
IGB_STAT("rx_bytes", stats.gorc), │ IGC_STAT("rx_bytes", stats.gorc),
IGB_STAT("tx_bytes", stats.gotc), │ IGC_STAT("tx_bytes", stats.gotc),
IGB_STAT("rx_broadcast", stats.bprc), │ IGC_STAT("rx_broadcast", stats.bprc),
IGB_STAT("tx_broadcast", stats.bptc), │ IGC_STAT("tx_broadcast", stats.bptc),
IGB_STAT("rx_multicast", stats.mprc), │ IGC_STAT("rx_multicast", stats.mprc),
IGB_STAT("tx_multicast", stats.mptc), │ IGC_STAT("tx_multicast", stats.mptc),
IGB_STAT("multicast", stats.mprc), │ IGC_STAT("multicast", stats.mprc),
IGB_STAT("collisions", stats.colc), │ IGC_STAT("collisions", stats.colc),
IGB_STAT("rx_crc_errors", stats.crcerrs), │ IGC_STAT("rx_crc_errors", stats.crcerrs),
IGB_STAT("rx_no_buffer_count", stats.rnbc), │ IGC_STAT("rx_no_buffer_count", stats.rnbc),
IGB_STAT("rx_missed_errors", stats.mpc), │ IGC_STAT("rx_missed_errors", stats.mpc),
IGB_STAT("tx_aborted_errors", stats.ecol), │ IGC_STAT("tx_aborted_errors", stats.ecol),
IGB_STAT("tx_carrier_errors", stats.tncrs), │ IGC_STAT("tx_carrier_errors", stats.tncrs),
IGB_STAT("tx_window_errors", stats.latecol), │ IGC_STAT("tx_window_errors", stats.latecol),
IGB_STAT("tx_abort_late_coll", stats.latecol), │ IGC_STAT("tx_abort_late_coll", stats.latecol),
IGB_STAT("tx_deferred_ok", stats.dc), │ IGC_STAT("tx_deferred_ok", stats.dc),
IGB_STAT("tx_single_coll_ok", stats.scc), │ IGC_STAT("tx_single_coll_ok", stats.scc),
IGB_STAT("tx_multi_coll_ok", stats.mcc), │ IGC_STAT("tx_multi_coll_ok", stats.mcc),
IGB_STAT("tx_timeout_count", tx_timeout_count), │ IGC_STAT("tx_timeout_count", tx_timeout_count),
IGB_STAT("rx_long_length_errors", stats.roc), │ IGC_STAT("rx_long_length_errors", stats.roc),
IGB_STAT("rx_short_length_errors", stats.ruc), │ IGC_STAT("rx_short_length_errors", stats.ruc),
IGB_STAT("rx_align_errors", stats.algnerrc), │ IGC_STAT("rx_align_errors", stats.algnerrc),
IGB_STAT("tx_tcp_seg_good", stats.tsctc), │ IGC_STAT("tx_tcp_seg_good", stats.tsctc),
IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), │ IGC_STAT("tx_tcp_seg_failed", stats.tsctfc),
IGB_STAT("rx_flow_control_xon", stats.xonrxc), │ IGC_STAT("rx_flow_control_xon", stats.xonrxc),
IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), │ IGC_STAT("rx_flow_control_xoff", stats.xoffrxc),
IGB_STAT("tx_flow_control_xon", stats.xontxc), │ IGC_STAT("tx_flow_control_xon", stats.xontxc),
IGB_STAT("tx_flow_control_xoff", stats.xofftxc), │ IGC_STAT("tx_flow_control_xoff", stats.xofftxc),
IGB_STAT("rx_long_byte_count", stats.gorc), │ IGC_STAT("rx_long_byte_count", stats.gorc),
IGB_STAT("tx_dma_out_of_sync", stats.doosync), │ IGC_STAT("tx_dma_out_of_sync", stats.doosync),
IGB_STAT("tx_smbus", stats.mgptc), │ IGC_STAT("tx_smbus", stats.mgptc),
IGB_STAT("rx_smbus", stats.mgprc), │ IGC_STAT("rx_smbus", stats.mgprc),
IGB_STAT("dropped_smbus", stats.mgpdc), │ IGC_STAT("dropped_smbus", stats.mgpdc),
IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), │ IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), │ IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), │ IGC_STAT("os2bmc_tx_by_host", stats.o2bspc),
IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), │ IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc),
IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), │ IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
IGB_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), │ IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), │ IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
│ IGC_STAT("tx_lpi_counter", stats.tlpic),
│ IGC_STAT("rx_lpi_counter", stats.rlpic),
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/ich8lan.c:3615 │ linux/drivers/net/ethernet/intel/e1000e/ich8lan.c:3691
│
union ich8_hws_flash_status hsfsts; │ union ich8_hws_flash_status hsfsts;
union ich8_hws_flash_ctrl hsflctl; │ union ich8_hws_flash_ctrl hsflctl;
u32 flash_linear_addr; │ u32 flash_linear_addr;
u32 flash_data = 0; │
s32 ret_val = -E1000_ERR_NVM; │ s32 ret_val = -E1000_ERR_NVM;
u8 count = 0; │ u8 count = 0;
│
if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) │ if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt)
return -E1000_ERR_NVM; │ return -E1000_ERR_NVM;
│
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + │ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr); │ hw->nvm.flash_base_addr);
│
do { │ do {
udelay(1); │ udelay(1);
/* Steps */ │ /* Steps */
ret_val = e1000_flash_cycle_init_ich8lan(hw); │ ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val) │ if (ret_val)
break; │ break;
│ /* In SPT, This register is in Lan memory space, not flash.
│ * Therefore, only 32 bit access is supported
│ */
│ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
│
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); │
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */ │ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size - 1; │ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; │ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); │ /* In SPT, This register is in Lan memory space, not flash.
│ * Therefore, only 32 bit access is supported
│ */
│ ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
ew32flash(ICH_FLASH_FADDR, flash_linear_addr); │ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
│
ret_val = │ ret_val =
e1000_flash_cycle_ich8lan(hw, │ e1000_flash_cycle_ich8lan(hw,
ICH_FLASH_READ_COMMAND_TIMEOUT); │ ICH_FLASH_READ_COMMAND_TIMEOUT);
│
/* Check if FCERR is set to 1, if set to 1, clear it │ /* Check if FCERR is set to 1, if set to 1, clear it
* and try the whole sequence a few more times, else │ * and try the whole sequence a few more times, else
* read in (shift in) the Flash Data0, the order is │ * read in (shift in) the Flash Data0, the order is
* least significant byte first msb to lsb │ * least significant byte first msb to lsb
*/ │ */
if (!ret_val) { │ if (!ret_val) {
flash_data = er32flash(ICH_FLASH_FDATA0); │ *data = er32flash(ICH_FLASH_FDATA0);
if (size == 1) │
*data = (u8)(flash_data & 0x000000FF); │
else if (size == 2) │
*data = (u16)(flash_data & 0x0000FFFF); │
break; │ break;
} else { │ } else {
/* If we've gotten here, then things are probably │ /* If we've gotten here, then things are probably
* completely hosed, but if the error condition is │ * completely hosed, but if the error condition is
* detected, it won't hurt to give it another try... │ * detected, it won't hurt to give it another try...
* ICH_FLASH_CYCLE_REPEAT_COUNT times. │ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
*/ │ */
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); │ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcerr) { │ if (hsfsts.hsf_status.flcerr) {
/* Repeat for some time before giving up. */ │ /* Repeat for some time before giving up. */
continue; │ continue;
} else if (!hsfsts.hsf_status.flcdone) { │ } else if (!hsfsts.hsf_status.flcdone) {
e_dbg("Timeout error - flash cycle did not complete.\n") │ e_dbg("Timeout error - flash cycle did not complete.\n")
break; │ break;
} │ }
} │ }
} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); │ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
│
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c:1456 │ linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c:1517
│
.core_init = dwxgmac2_core_init, │ .core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac, │ .set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc, │ .rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable, │ .rx_queue_enable = dwxlgmac2_rx_queue_enable,
.rx_queue_prio = dwxgmac2_rx_queue_prio, │ .rx_queue_prio = dwxgmac2_rx_queue_prio,
.tx_queue_prio = dwxgmac2_tx_queue_prio, │ .tx_queue_prio = dwxgmac2_tx_queue_prio,
.rx_queue_routing = NULL, │ .rx_queue_routing = NULL,
.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms, │ .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, │ .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight, │ .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, │ .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
.config_cbs = dwxgmac2_config_cbs, │ .config_cbs = dwxgmac2_config_cbs,
.dump_regs = dwxgmac2_dump_regs, │ .dump_regs = dwxgmac2_dump_regs,
.host_irq_status = dwxgmac2_host_irq_status, │ .host_irq_status = dwxgmac2_host_irq_status,
.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, │ .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
.flow_ctrl = dwxgmac2_flow_ctrl, │ .flow_ctrl = dwxgmac2_flow_ctrl,
.pmt = dwxgmac2_pmt, │ .pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr, │ .set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr, │ .get_umac_addr = dwxgmac2_get_umac_addr,
.set_eee_mode = dwxgmac2_set_eee_mode, │ .set_eee_mode = dwxgmac2_set_eee_mode,
.reset_eee_mode = dwxgmac2_reset_eee_mode, │ .reset_eee_mode = dwxgmac2_reset_eee_mode,
.set_eee_timer = dwxgmac2_set_eee_timer, │ .set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls, │ .set_eee_pls = dwxgmac2_set_eee_pls,
.pcs_ctrl_ane = NULL, │ .pcs_ctrl_ane = NULL,
.pcs_rane = NULL, │ .pcs_rane = NULL,
.pcs_get_adv_lp = NULL, │ .pcs_get_adv_lp = NULL,
.debug = NULL, │ .debug = NULL,
.set_filter = dwxgmac2_set_filter, │ .set_filter = dwxgmac2_set_filter,
.safety_feat_config = dwxgmac3_safety_feat_config, │ .safety_feat_config = dwxgmac3_safety_feat_config,
.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status, │ .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
.safety_feat_dump = dwxgmac3_safety_feat_dump, │ .safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback, │ .set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure, │ .rss_configure = dwxgmac2_rss_configure,
.update_vlan_hash = dwxgmac2_update_vlan_hash, │ .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config, │ .rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp, │ .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config, │ .flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure, │ .sarc_configure = dwxgmac2_sarc_configure,
.enable_vlan = dwxgmac2_enable_vlan, │ .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter, │ .config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter, │ .config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload, │ .set_arp_offload = dwxgmac2_set_arp_offload,
.est_configure = dwxgmac3_est_configure, │ .est_configure = dwxgmac3_est_configure,
.fpe_configure = dwxgmac3_fpe_configure, │ .fpe_configure = dwxgmac3_fpe_configure,
} │
next prev up linux/drivers/net/ethernet/amd/sunlance.c:563 │ linux/drivers/net/ethernet/amd/sunlance.c:731
│
struct lance_private *lp = netdev_priv(dev); │ struct lance_private *lp = netdev_priv(dev);
struct lance_init_block *ib = lp->init_block_mem; │ struct lance_init_block __iomem *ib = lp->init_block_iomem;
int i, j; │ int i, j;
│
spin_lock(&lp->lock); │ spin_lock(&lp->lock);
│
j = lp->tx_old; │ j = lp->tx_old;
for (i = j; i != lp->tx_new; i = j) { │ for (i = j; i != lp->tx_new; i = j) {
struct lance_tx_desc *td = &ib->btx_ring [i]; │ struct lance_tx_desc __iomem *td = &ib->btx_ring [i];
u8 bits = td->tmd1_bits; │ u8 bits = sbus_readb(&td->tmd1_bits);
│
/* If we hit a packet not owned by us, stop */ │ /* If we hit a packet not owned by us, stop */
if (bits & LE_T1_OWN) │ if (bits & LE_T1_OWN)
break; │ break;
│
if (bits & LE_T1_ERR) { │ if (bits & LE_T1_ERR) {
u16 status = td->misc; │ u16 status = sbus_readw(&td->misc);
│
dev->stats.tx_errors++; │ dev->stats.tx_errors++;
if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; │ if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; │ if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
│
if (status & LE_T3_CLOS) { │ if (status & LE_T3_CLOS) {
dev->stats.tx_carrier_errors++; │ dev->stats.tx_carrier_errors++;
if (lp->auto_select) { │ if (lp->auto_select) {
lp->tpe = 1 - lp->tpe; │ lp->tpe = 1 - lp->tpe;
printk(KERN_NOTICE "%s: Carrier Lost, trying %s\ │ printk(KERN_NOTICE "%s: Carrier Lost, trying %s\
dev->name, lp->tpe?"TPE":"AUI"); │ dev->name, lp->tpe?"TPE":"AUI");
STOP_LANCE(lp); │ STOP_LANCE(lp);
lp->init_ring(dev); │ lp->init_ring(dev);
load_csrs(lp); │ load_csrs(lp);
init_restart_lance(lp); │ init_restart_lance(lp);
goto out; │ goto out;
} │ }
} │ }
│
/* Buffer errors and underflows turn off the │ /* Buffer errors and underflows turn off the
* transmitter, restart the adapter. │ * transmitter, restart the adapter.
*/ │ */
if (status & (LE_T3_BUF|LE_T3_UFL)) { │ if (status & (LE_T3_BUF|LE_T3_UFL)) {
dev->stats.tx_fifo_errors++; │ dev->stats.tx_fifo_errors++;
│
printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", │ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
dev->name); │ dev->name);
STOP_LANCE(lp); │ STOP_LANCE(lp);
lp->init_ring(dev); │ lp->init_ring(dev);
load_csrs(lp); │ load_csrs(lp);
init_restart_lance(lp); │ init_restart_lance(lp);
goto out; │ goto out;
} │ }
} else if ((bits & LE_T1_POK) == LE_T1_POK) { │ } else if ((bits & LE_T1_POK) == LE_T1_POK) {
/* │ /*
* So we don't count the packet more than once. │ * So we don't count the packet more than once.
*/ │ */
td->tmd1_bits = bits & ~(LE_T1_POK); │ sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits);
│
/* One collision before packet was sent. */ │ /* One collision before packet was sent. */
if (bits & LE_T1_EONE) │ if (bits & LE_T1_EONE)
dev->stats.collisions++; │ dev->stats.collisions++;
│
/* More than one collision, be optimistic. */ │ /* More than one collision, be optimistic. */
if (bits & LE_T1_EMORE) │ if (bits & LE_T1_EMORE)
dev->stats.collisions += 2; │ dev->stats.collisions += 2;
│
dev->stats.tx_packets++; │ dev->stats.tx_packets++;
} │ }
│
j = TX_NEXT(j); │ j = TX_NEXT(j);
} │ }
lp->tx_old = j; │ lp->tx_old = j;
out: │
if (netif_queue_stopped(dev) && │ if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL > 0) │ TX_BUFFS_AVAIL > 0)
netif_wake_queue(dev); │ netif_wake_queue(dev);
│ out:
spin_unlock(&lp->lock); │ spin_unlock(&lp->lock);
} │
next prev up linux/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c:1953 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c:2025
│
struct ixgbe_mac_info *mac = &hw->mac; │ struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val; │ u32 lval, sval, flx_val;
s32 rc; │ s32 rc;
│
rc = mac->ops.read_iosf_sb_reg(hw, │ rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), │ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); │ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
if (rc) │ if (rc)
return rc; │ return rc;
│
lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; │ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; │ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; │ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; │ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; │ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
rc = mac->ops.write_iosf_sb_reg(hw, │ rc = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), │ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, lval); │ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
if (rc) │ if (rc)
return rc; │ return rc;
│
rc = mac->ops.read_iosf_sb_reg(hw, │ rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), │ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); │ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
if (rc) │ if (rc)
return rc; │ return rc;
│
sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; │ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; │ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
rc = mac->ops.write_iosf_sb_reg(hw, │ rc = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), │ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, sval); │ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
if (rc) │ if (rc)
return rc; │ return rc;
│
rc = mac->ops.read_iosf_sb_reg(hw, │ rc = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), │ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); │ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
if (rc) │ if (rc)
return rc; │ return rc;
│
rc = mac->ops.read_iosf_sb_reg(hw, │ rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), │ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); │ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
if (rc) │ if (rc)
return rc; │ return rc;
│
flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; │ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; │ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; │ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; │ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; │ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
│
rc = mac->ops.write_iosf_sb_reg(hw, │ rc = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), │ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); │ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
if (rc) │ if (rc)
return rc; │ return rc;
│
rc = ixgbe_restart_an_internal_phy_x550em(hw); │ ixgbe_restart_an_internal_phy_x550em(hw);
return rc; │
│ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c:1659 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c:1583
│
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); │ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u16 local_port, last_local_port; │ u16 local_port, local_port_1, last_local_port;
│ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
│ u8 masked_count, current_page = 0;
│ unsigned long cb_priv = 0;
LIST_HEAD(bulk_list); │ LIST_HEAD(bulk_list);
unsigned int masked_count; │
u8 current_page = 0; │
char *sbsr_pl; │ char *sbsr_pl;
int i; │ int i;
int err; │ int err;
int err2; │ int err2;
│
sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); │ sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
if (!sbsr_pl) │ if (!sbsr_pl)
return -ENOMEM; │ return -ENOMEM;
│
local_port = MLXSW_PORT_CPU_PORT; │ local_port = MLXSW_PORT_CPU_PORT;
next_batch: │ next_batch:
│ local_port_1 = local_port;
masked_count = 0; │ masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true); │ mlxsw_reg_sbsr_pack(sbsr_pl, false);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); │ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + │ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; │ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
│
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) │ for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); │ mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) │ for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); │ mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { │ for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port]) │ if (!mlxsw_sp->ports[local_port])
continue; │ continue;
if (local_port > last_local_port) { │ if (local_port > last_local_port) {
current_page++; │ current_page++;
goto do_query; │ goto do_query;
} │ }
if (local_port != MLXSW_PORT_CPU_PORT) { │ if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */ │ /* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, │ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
local_port, 1); │ local_port, 1);
} │ }
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); │ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { │ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, │ err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
&bulk_list); │ &bulk_list);
if (err) │ if (err)
goto out; │ goto out;
} │ }
if (++masked_count == MASKED_COUNT_MAX) │ if (++masked_count == MASKED_COUNT_MAX)
goto do_query; │ goto do_query;
} │ }
│
do_query: │ do_query:
│ cb_ctx.masked_count = masked_count;
│ cb_ctx.local_port_1 = local_port_1;
│ memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl, │ err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
&bulk_list, NULL, 0); │ &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
│ cb_priv);
if (err) │ if (err)
goto out; │ goto out;
if (local_port < mlxsw_core_max_ports(mlxsw_core)) { │ if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
local_port++; │ local_port++;
goto next_batch; │ goto next_batch;
} │ }
│
out: │ out:
err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); │ err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
if (!err) │ if (!err)
err = err2; │ err = err2;
kfree(sbsr_pl); │ kfree(sbsr_pl);
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c:883 │ linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c:1081
│
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; │ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_mask = &value->misc; │ struct mlx5dr_match_misc *misc_mask = &value->misc;
│
DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid); │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi); │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio); │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag); │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype); │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version); │ DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
│
if (mask->svlan_tag || mask->cvlan_tag) { │ if (mask->svlan_tag || mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1); │ MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0; │ mask->cvlan_tag = 0;
mask->svlan_tag = 0; │ mask->svlan_tag = 0;
} │ }
│
if (inner) { │ if (inner) {
if (misc_mask->inner_second_cvlan_tag || │ if (misc_mask->inner_second_cvlan_tag ||
misc_mask->inner_second_svlan_tag) { │ misc_mask->inner_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); │ MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1)
misc_mask->inner_second_cvlan_tag = 0; │ misc_mask->inner_second_cvlan_tag = 0;
misc_mask->inner_second_svlan_tag = 0; │ misc_mask->inner_second_svlan_tag = 0;
} │ }
│
DR_STE_SET_TAG(eth_l2_src, bit_mask, │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_vlan_id, misc_mask, inner_second_vid); │ second_vlan_id, misc_mask, inner_second_vid);
DR_STE_SET_TAG(eth_l2_src, bit_mask, │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_cfi, misc_mask, inner_second_cfi); │ second_cfi, misc_mask, inner_second_cfi);
DR_STE_SET_TAG(eth_l2_src, bit_mask, │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_priority, misc_mask, inner_second_prio); │ second_priority, misc_mask, inner_second_prio);
} else { │ } else {
if (misc_mask->outer_second_cvlan_tag || │ if (misc_mask->outer_second_cvlan_tag ||
misc_mask->outer_second_svlan_tag) { │ misc_mask->outer_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); │ MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1)
misc_mask->outer_second_cvlan_tag = 0; │ misc_mask->outer_second_cvlan_tag = 0;
misc_mask->outer_second_svlan_tag = 0; │ misc_mask->outer_second_svlan_tag = 0;
} │ }
│
DR_STE_SET_TAG(eth_l2_src, bit_mask, │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_vlan_id, misc_mask, outer_second_vid); │ second_vlan_id, misc_mask, outer_second_vid);
DR_STE_SET_TAG(eth_l2_src, bit_mask, │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_cfi, misc_mask, outer_second_cfi); │ second_cfi, misc_mask, outer_second_cfi);
DR_STE_SET_TAG(eth_l2_src, bit_mask, │ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_priority, misc_mask, outer_second_prio); │ second_priority, misc_mask, outer_second_prio);
} │ }
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c:324 │ linux/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c:262
│
struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr; │ struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
int phymode = socfpga_get_plat_phymode(dwmac); │ int phymode = socfpga_get_plat_phymode(dwmac);
u32 reg_offset = dwmac->reg_offset; │ u32 reg_offset = dwmac->reg_offset;
u32 reg_shift = dwmac->reg_shift; │ u32 reg_shift = dwmac->reg_shift;
u32 ctrl, val, module; │ u32 ctrl, val, module;
│
if (socfpga_set_phy_mode_common(phymode, &val)) │ if (socfpga_set_phy_mode_common(phymode, &val)) {
│ dev_err(dwmac->dev, "bad phy mode %d\n", phymode);
return -EINVAL; │ return -EINVAL;
│ }
│
/* Overwrite val to GMII if splitter core is enabled. The phymode here │ /* Overwrite val to GMII if splitter core is enabled. The phymode here
* is the actual phy mode on phy hardware, but phy interface from │ * is the actual phy mode on phy hardware, but phy interface from
* EMAC core is GMII. │ * EMAC core is GMII.
*/ │ */
if (dwmac->splitter_base) │ if (dwmac->splitter_base)
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; │ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
│
/* Assert reset to the enet controller before changing the phy mode */ │ /* Assert reset to the enet controller before changing the phy mode */
reset_control_assert(dwmac->stmmac_ocp_rst); │ reset_control_assert(dwmac->stmmac_ocp_rst);
reset_control_assert(dwmac->stmmac_rst); │ reset_control_assert(dwmac->stmmac_rst);
│
regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); │ regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK); │ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val; │ ctrl |= val << reg_shift;
│
if (dwmac->f2h_ptp_ref_clk || │ if (dwmac->f2h_ptp_ref_clk ||
phymode == PHY_INTERFACE_MODE_MII || │ phymode == PHY_INTERFACE_MODE_MII ||
phymode == PHY_INTERFACE_MODE_GMII || │ phymode == PHY_INTERFACE_MODE_GMII ||
phymode == PHY_INTERFACE_MODE_SGMII) { │ phymode == PHY_INTERFACE_MODE_SGMII) {
ctrl |= SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK; │ regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAINTF_EMAC_REG, │
&module); │ &module);
module |= (SYSMGR_FPGAINTF_EMAC_BIT << reg_shift); │ module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
regmap_write(sys_mgr_base_addr, SYSMGR_FPGAINTF_EMAC_REG, │ regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
module); │ module);
} else { │
ctrl &= ~SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK; │
} │ }
│
│ if (dwmac->f2h_ptp_ref_clk)
│ ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
│ else
│ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK <<
│ (reg_shift / 2));
│
regmap_write(sys_mgr_base_addr, reg_offset, ctrl); │ regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
│
/* Deassert reset for the phy configuration to be sampled by │ /* Deassert reset for the phy configuration to be sampled by
* the enet controller, and operation to start in requested mode │ * the enet controller, and operation to start in requested mode
*/ │ */
reset_control_deassert(dwmac->stmmac_ocp_rst); │ reset_control_deassert(dwmac->stmmac_ocp_rst);
reset_control_deassert(dwmac->stmmac_rst); │ reset_control_deassert(dwmac->stmmac_rst);
if (phymode == PHY_INTERFACE_MODE_SGMII) { │ if (phymode == PHY_INTERFACE_MODE_SGMII) {
if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) { │ if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
dev_err(dwmac->dev, "Unable to initialize TSE PCS"); │ dev_err(dwmac->dev, "Unable to initialize TSE PCS");
return -EINVAL; │ return -EINVAL;
} │ }
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/nfd3/dp.c:1249 │ linux/drivers/net/ethernet/netronome/nfp/nfdk/dp.c:1423
│
unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off; │ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
struct nfp_net_rx_buf *rxbuf; │ struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd; │ struct nfp_net_rx_desc *rxd;
dma_addr_t new_dma_addr; │ dma_addr_t new_dma_addr;
struct sk_buff *skb; │ struct sk_buff *skb;
void *new_frag; │ void *new_frag;
int idx; │ int idx;
│
idx = D_IDX(rx_ring, rx_ring->rd_p); │ idx = D_IDX(rx_ring, rx_ring->rd_p);
│
rxd = &rx_ring->rxds[idx]; │ rxd = &rx_ring->rxds[idx];
if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) │ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
return false; │ return false;
│
/* Memory barrier to ensure that we won't do other reads │ /* Memory barrier to ensure that we won't do other reads
* before the DD bit. │ * before the DD bit.
*/ │ */
dma_rmb(); │ dma_rmb();
│
rx_ring->rd_p++; │ rx_ring->rd_p++;
│
rxbuf = &rx_ring->rxbufs[idx]; │ rxbuf = &rx_ring->rxbufs[idx];
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; │ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
data_len = le16_to_cpu(rxd->rxd.data_len); │ data_len = le16_to_cpu(rxd->rxd.data_len);
pkt_len = data_len - meta_len; │ pkt_len = data_len - meta_len;
│
pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; │ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) │ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
pkt_off += meta_len; │ pkt_off += meta_len;
else │ else
pkt_off += dp->rx_offset; │ pkt_off += dp->rx_offset;
meta_off = pkt_off - meta_len; │ meta_off = pkt_off - meta_len;
│
/* Stats update */ │ /* Stats update */
u64_stats_update_begin(&r_vec->rx_sync); │ u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++; │ r_vec->rx_pkts++;
r_vec->rx_bytes += pkt_len; │ r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync); │ u64_stats_update_end(&r_vec->rx_sync);
│
nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len); │ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
│
if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) { │ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n", │ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
meta_len); │ meta_len);
nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); │ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
return true; │ return true;
} │ }
│
skb = build_skb(rxbuf->frag, dp->fl_bufsz); │ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
if (unlikely(!skb)) { │ if (unlikely(!skb)) {
nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); │ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
return true; │ return true;
} │ }
new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr); │ new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
if (unlikely(!new_frag)) { │ if (unlikely(!new_frag)) {
nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); │ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
return true; │ return true;
} │ }
│
nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); │ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
│
nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); │ nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
│
skb_reserve(skb, pkt_off); │ skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len); │ skb_put(skb, pkt_len);
│
nfp_app_ctrl_rx(nn->app, skb); │ nfp_app_ctrl_rx(nn->app, skb);
│
return true; │ return true;
} │
next prev up linux/drivers/net/ethernet/intel/igb/e1000_phy.c:2206 │ linux/drivers/net/ethernet/intel/igb/e1000_phy.c:2290
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = 0; │ s32 ret_val = 0;
│
/* Switch to PHY page 0xFF. */ │ /* Switch to PHY page 0xFF. */
ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
/* Switch to PHY page 0xFB. */ │ /* Switch to PHY page 0xFB. */
ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x0C0D);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
/* Switch to PHY page 0x12. */ │ /* Switch to PHY page 0x12. */
ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
/* Change mode to SGMII-to-Copper */ │ /* Change mode to SGMII-to-Copper */
ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
│ if (ret_val)
│ goto out;
│
│ /* Switch to PHY page 1. */
│ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1);
│ if (ret_val)
│ goto out;
│
│ /* Change mode to 1000BASE-X/SGMII and autoneg enable */
│ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
/* Return the PHY to page 0. */ │ /* Return the PHY to page 0. */
ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); │ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = igb_phy_sw_reset(hw); │ ret_val = igb_phy_sw_reset(hw);
if (ret_val) { │ if (ret_val) {
hw_dbg("Error committing the PHY changes\n"); │ hw_dbg("Error committing the PHY changes\n");
return ret_val; │ return ret_val;
} │ }
│
/* msec_delay(1000); */ │ /* msec_delay(1000); */
usleep_range(1000, 2000); │ usleep_range(1000, 2000);
out: │ out:
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c:728 │ linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c:574
│
struct netlink_ext_ack *extack = cls->common.extack; │ struct netlink_ext_ack *extack = cls->common.extack;
struct dpaa2_switch_mirror_entry *mirror_entry; │ struct dpaa2_switch_mirror_entry *mirror_entry;
struct ethsw_core *ethsw = block->ethsw; │ struct ethsw_core *ethsw = block->ethsw;
struct dpaa2_switch_mirror_entry *tmp; │ struct dpaa2_switch_mirror_entry *tmp;
struct flow_action_entry *cls_act; │ struct flow_action_entry *cls_act;
struct list_head *pos, *n; │ struct list_head *pos, *n;
bool mirror_port_enabled; │ bool mirror_port_enabled;
u16 if_id; │ u16 if_id, vlan;
│ int err;
│
mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs); │ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
cls_act = &cls->rule->action.entries[0]; │ cls_act = &cls->rule->action.entries[0];
│
/* Offload rules only when the destination is a DPAA2 switch port */ │ /* Offload rules only when the destination is a DPAA2 switch port */
if (!dpaa2_switch_port_dev_check(cls_act->dev)) { │ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
NL_SET_ERR_MSG_MOD(extack, │ NL_SET_ERR_MSG_MOD(extack,
"Destination not a DPAA2 switch port"); │ "Destination not a DPAA2 switch port");
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
} │ }
if_id = dpaa2_switch_get_index(ethsw, cls_act->dev); │ if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
│
/* We have a single mirror port but can configure egress mirroring on │ /* We have a single mirror port but can configure egress mirroring on
* all the other switch ports. We need to allow mirroring rules only │ * all the other switch ports. We need to allow mirroring rules only
* when the destination port is the same. │ * when the destination port is the same.
*/ │ */
if (mirror_port_enabled && ethsw->mirror_port != if_id) { │ if (mirror_port_enabled && ethsw->mirror_port != if_id) {
NL_SET_ERR_MSG_MOD(extack, │ NL_SET_ERR_MSG_MOD(extack,
"Multiple mirror ports not supported"); │ "Multiple mirror ports not supported");
return -EBUSY; │ return -EBUSY;
} │ }
│
│ /* Parse the key */
│ err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
│ if (err)
│ return err;
│
/* Make sure that we don't already have a mirror rule with the same │ /* Make sure that we don't already have a mirror rule with the same
* configuration. One matchall rule per block is the maximum. │ * configuration.
*/ │ */
list_for_each_safe(pos, n, &block->mirror_entries) { │ list_for_each_safe(pos, n, &block->mirror_entries) {
tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list); │ tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
│
if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) { │ if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
│ tmp->cfg.vlan_id == vlan) {
NL_SET_ERR_MSG_MOD(extack, │ NL_SET_ERR_MSG_MOD(extack,
"Matchall mirror filter already installed"); │ "VLAN mirror filter already installed");
return -EBUSY; │ return -EBUSY;
} │ }
} │ }
│
mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL); │ mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
if (!mirror_entry) │ if (!mirror_entry)
return -ENOMEM; │ return -ENOMEM;
│
mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL; │ mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
│ mirror_entry->cfg.vlan_id = vlan;
mirror_entry->cookie = cls->cookie; │ mirror_entry->cookie = cls->cookie;
│
return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id, │ return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
extack); │ extack);
} │
next prev up linux/drivers/net/ethernet/intel/i40e/i40e_dcb.c:1062 │ linux/drivers/net/ethernet/intel/i40e/i40e_dcb.c:987
│
struct i40e_dcb_ets_config *etsrec; │ u8 priority0, priority1, maxtcwilling = 0;
│ struct i40e_dcb_ets_config *etscfg;
u16 offset = 0, typelength, i; │ u16 offset = 0, typelength, i;
u8 priority0, priority1; │
u8 *buf = tlv->tlvinfo; │ u8 *buf = tlv->tlvinfo;
u32 ouisubtype; │ u32 ouisubtype;
│
typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | │ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
I40E_IEEE_ETS_TLV_LENGTH); │ I40E_IEEE_ETS_TLV_LENGTH);
tlv->typelength = htons(typelength); │ tlv->typelength = htons(typelength);
│
ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | │ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
I40E_IEEE_SUBTYPE_ETS_REC); │ I40E_IEEE_SUBTYPE_ETS_CFG);
tlv->ouisubtype = htonl(ouisubtype); │ tlv->ouisubtype = htonl(ouisubtype);
│
etsrec = &dcbcfg->etsrec; │ /* First Octet post subtype
/* First Octet is reserved */ │ * --------------------------
│ * |will-|CBS | Re- | Max |
│ * |ing | |served| TCs |
│ * --------------------------
│ * |1bit | 1bit|3 bits|3bits|
│ */
│ etscfg = &dcbcfg->etscfg;
│ if (etscfg->willing)
│ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
│ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
│ buf[offset] = maxtcwilling;
│
/* Move offset to Priority Assignment Table */ │ /* Move offset to Priority Assignment Table */
offset++; │ offset++;
│
/* Priority Assignment Table (4 octets) │ /* Priority Assignment Table (4 octets)
* Octets:| 1 | 2 | 3 | 4 | │ * Octets:| 1 | 2 | 3 | 4 |
* ----------------------------------------- │ * -----------------------------------------
* |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| │ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
* ----------------------------------------- │ * -----------------------------------------
* Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| │ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
* ----------------------------------------- │ * -----------------------------------------
*/ │ */
for (i = 0; i < 4; i++) { │ for (i = 0; i < 4; i++) {
priority0 = etsrec->prioritytable[i * 2] & 0xF; │ priority0 = etscfg->prioritytable[i * 2] & 0xF;
priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF; │ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | │ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
priority1; │ priority1;
offset++; │ offset++;
} │ }
│
/* TC Bandwidth Table (8 octets) │ /* TC Bandwidth Table (8 octets)
* Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | │ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
* --------------------------------- │ * ---------------------------------
* |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| │ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
* --------------------------------- │ * ---------------------------------
*/ │ */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) │ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
buf[offset++] = etsrec->tcbwtable[i]; │ buf[offset++] = etscfg->tcbwtable[i];
│
/* TSA Assignment Table (8 octets) │ /* TSA Assignment Table (8 octets)
* Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | │ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
* --------------------------------- │ * ---------------------------------
* |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| │ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
* --------------------------------- │ * ---------------------------------
*/ │ */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) │ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
buf[offset++] = etsrec->tsatable[i]; │ buf[offset++] = etscfg->tsatable[i];
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ptp.c:444 │ linux/drivers/net/ethernet/intel/igc/igc_ptp.c:145
│
static const u32 aux0_sel_sdp[IGB_N_SDP] = { │ static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = {
AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, │ IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SD
}; │ };
static const u32 aux1_sel_sdp[IGB_N_SDP] = { │ static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = {
AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, │ IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SD
}; │ };
static const u32 ts_sdp_en[IGB_N_SDP] = { │ static const u32 igc_ts_sdp_en[IGC_N_SDP] = {
TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, │ IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN,
}; │ };
static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = { │ static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = {
TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0, │ IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0,
TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0, │ IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0,
}; │ };
static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = { │ static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = {
TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, │ IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1,
TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, │ IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1,
}; │ };
static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = { │ static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = {
TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0, │ IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0,
TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0, │ IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0,
}; │ };
static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = { │ static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = {
TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, │ IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1,
TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, │ IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1,
}; │ };
static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { │ static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = {
TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, │ IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1,
TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, │ IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1,
}; │ };
struct e1000_hw *hw = &igb->hw; │ struct igc_hw *hw = &igc->hw;
u32 ctrl, ctrl_ext, tssdp = 0; │ u32 ctrl, ctrl_ext, tssdp = 0;
│
ctrl = rd32(E1000_CTRL); │ ctrl = rd32(IGC_CTRL);
ctrl_ext = rd32(E1000_CTRL_EXT); │ ctrl_ext = rd32(IGC_CTRL_EXT);
tssdp = rd32(E1000_TSSDP); │ tssdp = rd32(IGC_TSSDP);
│
igb_pin_direction(pin, 0, &ctrl, &ctrl_ext); │ igc_pin_direction(pin, 0, &ctrl, &ctrl_ext);
│
/* Make sure this pin is not enabled as an input. */ │ /* Make sure this pin is not enabled as an input. */
if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin]) │ if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin])
tssdp &= ~AUX0_TS_SDP_EN; │ tssdp &= ~IGC_AUX0_TS_SDP_EN;
│
if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin]) │ if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin])
tssdp &= ~AUX1_TS_SDP_EN; │ tssdp &= ~IGC_AUX1_TS_SDP_EN;
│
tssdp &= ~ts_sdp_sel_clr[pin]; │ tssdp &= ~igc_ts_sdp_sel_clr[pin];
if (freq) { │ if (freq) {
if (chan == 1) │ if (chan == 1)
tssdp |= ts_sdp_sel_fc1[pin]; │ tssdp |= igc_ts_sdp_sel_fc1[pin];
else │ else
tssdp |= ts_sdp_sel_fc0[pin]; │ tssdp |= igc_ts_sdp_sel_fc0[pin];
} else { │ } else {
if (chan == 1) │ if (chan == 1)
tssdp |= ts_sdp_sel_tt1[pin]; │ tssdp |= igc_ts_sdp_sel_tt1[pin];
else │ else
tssdp |= ts_sdp_sel_tt0[pin]; │ tssdp |= igc_ts_sdp_sel_tt0[pin];
} │ }
tssdp |= ts_sdp_en[pin]; │ tssdp |= igc_ts_sdp_en[pin];
│
wr32(E1000_TSSDP, tssdp); │ wr32(IGC_TSSDP, tssdp);
wr32(E1000_CTRL, ctrl); │ wr32(IGC_CTRL, ctrl);
wr32(E1000_CTRL_EXT, ctrl_ext); │ wr32(IGC_CTRL_EXT, ctrl_ext);
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:5788 │ linux/drivers/net/ethernet/intel/igc/igc_main.c:4006
│
struct igb_adapter *adapter = q_vector->adapter; │ struct igc_adapter *adapter = q_vector->adapter;
u32 new_itr = q_vector->itr_val; │ u32 new_itr = q_vector->itr_val;
u8 current_itr = 0; │ u8 current_itr = 0;
│
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */ │ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (adapter->link_speed != SPEED_1000) { │ switch (adapter->link_speed) {
│ case SPEED_10:
│ case SPEED_100:
current_itr = 0; │ current_itr = 0;
new_itr = IGB_4K_ITR; │ new_itr = IGC_4K_ITR;
goto set_itr_now; │ goto set_itr_now;
│ default:
│ break;
} │ }
│
igb_update_itr(q_vector, &q_vector->tx); │ igc_update_itr(q_vector, &q_vector->tx);
igb_update_itr(q_vector, &q_vector->rx); │ igc_update_itr(q_vector, &q_vector->rx);
│
current_itr = max(q_vector->rx.itr, q_vector->tx.itr); │ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
│
/* conservative mode (itr 3) eliminates the lowest_latency setting */ │ /* conservative mode (itr 3) eliminates the lowest_latency setting */
if (current_itr == lowest_latency && │ if (current_itr == lowest_latency &&
((q_vector->rx.ring && adapter->rx_itr_setting == 3) || │ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
(!q_vector->rx.ring && adapter->tx_itr_setting == 3))) │ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
current_itr = low_latency; │ current_itr = low_latency;
│
switch (current_itr) { │ switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */ │ /* counts and packets in update_itr are dependent on these numbers */
case lowest_latency: │ case lowest_latency:
new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ │ new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
break; │ break;
case low_latency: │ case low_latency:
new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ │ new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
break; │ break;
case bulk_latency: │ case bulk_latency:
new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ │ new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
break; │ break;
default: │ default:
break; │ break;
} │ }
│
set_itr_now: │ set_itr_now:
if (new_itr != q_vector->itr_val) { │ if (new_itr != q_vector->itr_val) {
/* this attempts to bias the interrupt rate towards Bulk │ /* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is │ * by adding intermediate steps when interrupt rate is
* increasing │ * increasing
*/ │ */
new_itr = new_itr > q_vector->itr_val ? │ new_itr = new_itr > q_vector->itr_val ?
max((new_itr * q_vector->itr_val) / │ max((new_itr * q_vector->itr_val) /
(new_itr + (q_vector->itr_val >> 2)), │ (new_itr + (q_vector->itr_val >> 2)),
new_itr) : new_itr; │ new_itr) : new_itr;
/* Don't write the value here; it resets the adapter's │ /* Don't write the value here; it resets the adapter's
* internal timer, and causes us to delay far longer than │ * internal timer, and causes us to delay far longer than
* we should between interrupts. Instead, we write the ITR │ * we should between interrupts. Instead, we write the ITR
* value at the beginning of the next interrupt so the timing │ * value at the beginning of the next interrupt so the timing
* ends up being correct. │ * ends up being correct.
*/ │ */
q_vector->itr_val = new_itr; │ q_vector->itr_val = new_itr;
q_vector->set_itr = 1; │ q_vector->set_itr = 1;
} │ }
} │
next prev up linux/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c:281 │ linux/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c:451
│
/* RSS_IPV4_TCP_NODE */ │ /* RSS_IPV4_OTHERS_NODE */
.node_type = EWDN, │ .node_type = EWDN,
.last_node = 0, │ .last_node = 0,
.hdr_len_store = 1, │ .hdr_len_store = 1,
.hdr_extn = NO_BYTE, │ .hdr_extn = NO_BYTE,
.byte_store = NO_BYTE, │ .byte_store = NO_BYTE,
.search_byte_store = BOTH_BYTES, │ .search_byte_store = BOTH_BYTES,
.result_pointer = DB_RES_DROP, │ .result_pointer = DB_RES_DROP,
.num_branches = 6, │ .num_branches = 6,
.branch = { │ .branch = {
{ │ {
/* SRC IPV4 B01 */ │ /* SRC IPV4 B01 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 28, │ .next_packet_pointer = 28,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_TCP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 1, │ .next_branch = 1,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* SRC IPV4 B23 */ │ /* SRC IPV4 B23 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 30, │ .next_packet_pointer = 30,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_TCP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 2, │ .next_branch = 2,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* DST IPV4 B01 */ │ /* DST IPV4 B01 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 32, │ .next_packet_pointer = 32,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_TCP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 3, │ .next_branch = 3,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* DST IPV4 B23 */ │ /* DST IPV4 B23 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 34, │ .next_packet_pointer = 34,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_TCP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 4, │ .next_branch = 4,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* TCP SRC Port */ │ /* TCP SRC Port */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 36, │ .next_packet_pointer = 36,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_TCP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 5, │ .next_branch = 5,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* TCP DST Port */ │ /* TCP DST Port */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 256, │ .next_packet_pointer = 260,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = LAST_NODE, │ .next_node = LAST_NODE,
.next_branch = 0, │ .next_branch = 0,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
} │ }
} │ }
} │
next prev up linux/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c:366 │ linux/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c:451
│
/* RSS_IPV4_UDP_NODE */ │ /* RSS_IPV4_OTHERS_NODE */
.node_type = EWDN, │ .node_type = EWDN,
.last_node = 0, │ .last_node = 0,
.hdr_len_store = 1, │ .hdr_len_store = 1,
.hdr_extn = NO_BYTE, │ .hdr_extn = NO_BYTE,
.byte_store = NO_BYTE, │ .byte_store = NO_BYTE,
.search_byte_store = BOTH_BYTES, │ .search_byte_store = BOTH_BYTES,
.result_pointer = DB_RES_DROP, │ .result_pointer = DB_RES_DROP,
.num_branches = 6, │ .num_branches = 6,
.branch = { │ .branch = {
{ │ {
/* SRC IPV4 B01 */ │ /* SRC IPV4 B01 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 28, │ .next_packet_pointer = 28,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 1, │ .next_branch = 1,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* SRC IPV4 B23 */ │ /* SRC IPV4 B23 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 30, │ .next_packet_pointer = 30,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 2, │ .next_branch = 2,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* DST IPV4 B01 */ │ /* DST IPV4 B01 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 32, │ .next_packet_pointer = 32,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 3, │ .next_branch = 3,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* DST IPV4 B23 */ │ /* DST IPV4 B23 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 34, │ .next_packet_pointer = 34,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 4, │ .next_branch = 4,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* TCP SRC Port */ │ /* TCP SRC Port */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 36, │ .next_packet_pointer = 36,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 5, │ .next_branch = 5,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* TCP DST Port */ │ /* TCP DST Port */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 258, │ .next_packet_pointer = 260,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = LAST_NODE, │ .next_node = LAST_NODE,
.next_branch = 0, │ .next_branch = 0,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
} │ }
} │ }
} │
next prev up linux/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c:366 │ linux/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c:281
│
/* RSS_IPV4_UDP_NODE */ │ /* RSS_IPV4_TCP_NODE */
.node_type = EWDN, │ .node_type = EWDN,
.last_node = 0, │ .last_node = 0,
.hdr_len_store = 1, │ .hdr_len_store = 1,
.hdr_extn = NO_BYTE, │ .hdr_extn = NO_BYTE,
.byte_store = NO_BYTE, │ .byte_store = NO_BYTE,
.search_byte_store = BOTH_BYTES, │ .search_byte_store = BOTH_BYTES,
.result_pointer = DB_RES_DROP, │ .result_pointer = DB_RES_DROP,
.num_branches = 6, │ .num_branches = 6,
.branch = { │ .branch = {
{ │ {
/* SRC IPV4 B01 */ │ /* SRC IPV4 B01 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 28, │ .next_packet_pointer = 28,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_TCP_NODE,
.next_branch = 1, │ .next_branch = 1,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* SRC IPV4 B23 */ │ /* SRC IPV4 B23 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 30, │ .next_packet_pointer = 30,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_TCP_NODE,
.next_branch = 2, │ .next_branch = 2,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* DST IPV4 B01 */ │ /* DST IPV4 B01 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 32, │ .next_packet_pointer = 32,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_TCP_NODE,
.next_branch = 3, │ .next_branch = 3,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* DST IPV4 B23 */ │ /* DST IPV4 B23 */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 34, │ .next_packet_pointer = 34,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_TCP_NODE,
.next_branch = 4, │ .next_branch = 4,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* TCP SRC Port */ │ /* TCP SRC Port */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 36, │ .next_packet_pointer = 36,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = RSS_IPV4_UDP_NODE, │ .next_node = RSS_IPV4_TCP_NODE,
.next_branch = 5, │ .next_branch = 5,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
}, │ },
{ │ {
/* TCP DST Port */ │ /* TCP DST Port */
.valid = 0, │ .valid = 0,
.next_packet_pointer = 258, │ .next_packet_pointer = 256,
.jump_bw = JMP_FW, │ .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, │ .jump_rel = JMP_ABS,
.operation = EQT, │ .operation = EQT,
.next_node = LAST_NODE, │ .next_node = LAST_NODE,
.next_branch = 0, │ .next_branch = 0,
.data = 0x0, │ .data = 0x0,
.mask = 0xffff │ .mask = 0xffff
} │ }
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:2288 │ linux/drivers/net/ethernet/intel/igc/igc_ethtool.c:816
│
struct igb_adapter *adapter = netdev_priv(netdev); │ struct igc_adapter *adapter = netdev_priv(netdev);
struct rtnl_link_stats64 *net_stats = &adapter->stats64; │ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
unsigned int start; │ unsigned int start;
struct igb_ring *ring; │ struct igc_ring *ring;
int i, j; │ int i, j;
char *p; │ char *p;
│
spin_lock(&adapter->stats64_lock); │ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter); │ igc_update_stats(adapter);
│
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { │ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
p = (char *)adapter + igb_gstrings_stats[i].stat_offset; │ p = (char *)adapter + igc_gstrings_stats[i].stat_offset;
data[i] = (igb_gstrings_stats[i].sizeof_stat == │ data[i] = (igc_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; │ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} │ }
for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { │ for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) {
p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; │ p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset;
data[i] = (igb_gstrings_net_stats[j].sizeof_stat == │ data[i] = (igc_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; │ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} │ }
for (j = 0; j < adapter->num_tx_queues; j++) { │ for (j = 0; j < adapter->num_tx_queues; j++) {
u64 restart2; │ u64 restart2;
│
ring = adapter->tx_ring[j]; │ ring = adapter->tx_ring[j];
do { │ do {
start = u64_stats_fetch_begin_irq(&ring->tx_syncp); │ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
data[i] = ring->tx_stats.packets; │ data[i] = ring->tx_stats.packets;
data[i+1] = ring->tx_stats.bytes; │ data[i + 1] = ring->tx_stats.bytes;
data[i+2] = ring->tx_stats.restart_queue; │ data[i + 2] = ring->tx_stats.restart_queue;
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); │ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
do { │ do {
start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); │ start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
restart2 = ring->tx_stats.restart_queue2; │ restart2 = ring->tx_stats.restart_queue2;
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); │ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
data[i+2] += restart2; │ data[i + 2] += restart2;
│
i += IGB_TX_QUEUE_STATS_LEN; │ i += IGC_TX_QUEUE_STATS_LEN;
} │ }
for (j = 0; j < adapter->num_rx_queues; j++) { │ for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j]; │ ring = adapter->rx_ring[j];
do { │ do {
start = u64_stats_fetch_begin_irq(&ring->rx_syncp); │ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
data[i] = ring->rx_stats.packets; │ data[i] = ring->rx_stats.packets;
data[i+1] = ring->rx_stats.bytes; │ data[i + 1] = ring->rx_stats.bytes;
data[i+2] = ring->rx_stats.drops; │ data[i + 2] = ring->rx_stats.drops;
data[i+3] = ring->rx_stats.csum_err; │ data[i + 3] = ring->rx_stats.csum_err;
data[i+4] = ring->rx_stats.alloc_failed; │ data[i + 4] = ring->rx_stats.alloc_failed;
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); │ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN; │ i += IGC_RX_QUEUE_STATS_LEN;
} │ }
spin_unlock(&adapter->stats64_lock); │ spin_unlock(&adapter->stats64_lock);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c:2617 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c:2681
│
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; │ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device; │ struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port; │ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port; │ struct mlxsw_sp_port *mlxsw_sp_port;
enum switchdev_notifier_type type; │ enum switchdev_notifier_type type;
char mac[ETH_ALEN]; │ char mac[ETH_ALEN];
u16 local_port; │ u16 lag_vid = 0;
│ u16 lag_id;
u16 vid, fid; │ u16 vid, fid;
bool do_notification = true; │ bool do_notification = true;
int err; │ int err;
│
mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); │ mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
│ mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) │
return; │
mlxsw_sp_port = mlxsw_sp->ports[local_port]; │
if (!mlxsw_sp_port) { │ if (!mlxsw_sp_port) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FD │ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port represent
goto just_remove; │ goto just_remove;
} │ }
│
if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid)) │ if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
goto just_remove; │ goto just_remove;
│
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); │ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) { │ if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} fo │ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} fo
goto just_remove; │ goto just_remove;
} │ }
│
bridge_port = mlxsw_sp_port_vlan->bridge_port; │ bridge_port = mlxsw_sp_port_vlan->bridge_port;
if (!bridge_port) { │ if (!bridge_port) {
netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge │ netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge
goto just_remove; │ goto just_remove;
} │ }
│
bridge_device = bridge_port->bridge_device; │ bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; │ vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
│ lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
│ mlxsw_sp_port_vlan->vid : 0;
│
do_fdb_op: │ do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, │ err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true); │ adding, true);
if (err) { │ if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n" │ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"
return; │ return;
} │ }
│
if (!do_notification) │ if (!do_notification)
return; │ return;
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; │ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding); │ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
│
return; │ return;
│
just_remove: │ just_remove:
adding = false; │ adding = false;
do_notification = false; │ do_notification = false;
goto do_fdb_op; │ goto do_fdb_op;
} │
next prev up linux/drivers/net/ethernet/intel/e1000/e1000_main.c:1500 │ linux/drivers/net/ethernet/intel/e1000/e1000_main.c:1690
│
struct pci_dev *pdev = adapter->pdev; │ struct pci_dev *pdev = adapter->pdev;
int size; │ int size, desc_len;
│
size = sizeof(struct e1000_tx_buffer) * txdr->count; │ size = sizeof(struct e1000_rx_buffer) * rxdr->count;
txdr->buffer_info = vzalloc(size); │ rxdr->buffer_info = vzalloc(size);
if (!txdr->buffer_info) │ if (!rxdr->buffer_info)
return -ENOMEM; │ return -ENOMEM;
│
/* round up to nearest 4K */ │ desc_len = sizeof(struct e1000_rx_desc);
│
txdr->size = txdr->count * sizeof(struct e1000_tx_desc); │ /* Round up to nearest 4K */
txdr->size = ALIGN(txdr->size, 4096); │
│
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, │ rxdr->size = rxdr->count * desc_len;
│ rxdr->size = ALIGN(rxdr->size, 4096);
│
│ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL); │ GFP_KERNEL);
if (!txdr->desc) { │ if (!rxdr->desc) {
setup_tx_desc_die: │ setup_rx_desc_die:
vfree(txdr->buffer_info); │ vfree(rxdr->buffer_info);
return -ENOMEM; │ return -ENOMEM;
} │ }
│
/* Fix for errata 23, can't cross 64kB boundary */ │ /* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { │ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
void *olddesc = txdr->desc; │ void *olddesc = rxdr->desc;
dma_addr_t olddma = txdr->dma; │ dma_addr_t olddma = rxdr->dma;
e_err(tx_err, "txdr align check failed: %u bytes at %p\n", │ e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
txdr->size, txdr->desc); │ rxdr->size, rxdr->desc);
/* Try again, without freeing the previous */ │ /* Try again, without freeing the previous */
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, │ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
&txdr->dma, GFP_KERNEL); │ &rxdr->dma, GFP_KERNEL);
/* Failed allocation, critical failure */ │ /* Failed allocation, critical failure */
if (!txdr->desc) { │ if (!rxdr->desc) {
dma_free_coherent(&pdev->dev, txdr->size, olddesc, │ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma); │ olddma);
goto setup_tx_desc_die; │ goto setup_rx_desc_die;
} │ }
│
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { │ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
/* give up */ │ /* give up */
dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, │ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
txdr->dma); │ rxdr->dma);
dma_free_coherent(&pdev->dev, txdr->size, olddesc, │ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma); │ olddma);
e_err(probe, "Unable to allocate aligned memory " │ e_err(probe, "Unable to allocate aligned memory for "
"for the transmit descriptor ring\n"); │ "the Rx descriptor ring\n");
vfree(txdr->buffer_info); │ goto setup_rx_desc_die;
return -ENOMEM; │
} else { │ } else {
/* Free old allocation, new allocation was successful */ │ /* Free old allocation, new allocation was successful */
dma_free_coherent(&pdev->dev, txdr->size, olddesc, │ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma); │ olddma);
} │ }
} │ }
memset(txdr->desc, 0, txdr->size); │ memset(rxdr->desc, 0, rxdr->size);
│
txdr->next_to_use = 0; │ rxdr->next_to_clean = 0;
txdr->next_to_clean = 0; │ rxdr->next_to_use = 0;
│ rxdr->rx_skb_top = NULL;
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/sfc/selftest.c:466 │ linux/drivers/net/ethernet/sfc/falcon/selftest.c:468
│
struct efx_nic *efx = tx_queue->efx; │ struct ef4_nic *efx = tx_queue->efx;
struct efx_loopback_state *state = efx->loopback_selftest; │ struct ef4_loopback_state *state = efx->loopback_selftest;
struct sk_buff *skb; │ struct sk_buff *skb;
int tx_done = 0, rx_good, rx_bad; │ int tx_done = 0, rx_good, rx_bad;
int i, rc = 0; │ int i, rc = 0;
│
netif_tx_lock_bh(efx->net_dev); │ netif_tx_lock_bh(efx->net_dev);
│
/* Count the number of tx completions, and decrement the refcnt. Any │ /* Count the number of tx completions, and decrement the refcnt. Any
* skbs not already completed will be free'd when the queue is flushed */ │ * skbs not already completed will be free'd when the queue is flushed */
for (i = 0; i < state->packet_count; i++) { │ for (i = 0; i < state->packet_count; i++) {
skb = state->skbs[i]; │ skb = state->skbs[i];
if (skb && !skb_shared(skb)) │ if (skb && !skb_shared(skb))
++tx_done; │ ++tx_done;
dev_kfree_skb(skb); │ dev_kfree_skb(skb);
} │ }
│
netif_tx_unlock_bh(efx->net_dev); │ netif_tx_unlock_bh(efx->net_dev);
│
/* Check TX completion and received packet counts */ │ /* Check TX completion and received packet counts */
rx_good = atomic_read(&state->rx_good); │ rx_good = atomic_read(&state->rx_good);
rx_bad = atomic_read(&state->rx_bad); │ rx_bad = atomic_read(&state->rx_bad);
if (tx_done != state->packet_count) { │ if (tx_done != state->packet_count) {
/* Don't free the skbs; they will be picked up on TX │ /* Don't free the skbs; they will be picked up on TX
* overflow or channel teardown. │ * overflow or channel teardown.
*/ │ */
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d " │ "TX queue %d saw only %d out of an expected %d "
"TX completion events in %s loopback test\n", │ "TX completion events in %s loopback test\n",
tx_queue->label, tx_done, state->packet_count, │ tx_queue->queue, tx_done, state->packet_count,
LOOPBACK_MODE(efx)); │ LOOPBACK_MODE(efx));
rc = -ETIMEDOUT; │ rc = -ETIMEDOUT;
/* Allow to fall through so we see the RX errors as well */ │ /* Allow to fall through so we see the RX errors as well */
} │ }
│
/* We may always be up to a flush away from our desired packet total */ │ /* We may always be up to a flush away from our desired packet total */
if (rx_good != state->packet_count) { │ if (rx_good != state->packet_count) {
netif_dbg(efx, drv, efx->net_dev, │ netif_dbg(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d " │ "TX queue %d saw only %d out of an expected %d "
"received packets in %s loopback test\n", │ "received packets in %s loopback test\n",
tx_queue->label, rx_good, state->packet_count, │ tx_queue->queue, rx_good, state->packet_count,
LOOPBACK_MODE(efx)); │ LOOPBACK_MODE(efx));
rc = -ETIMEDOUT; │ rc = -ETIMEDOUT;
/* Fall through */ │ /* Fall through */
} │ }
│
/* Update loopback test structure */ │ /* Update loopback test structure */
lb_tests->tx_sent[tx_queue->label] += state->packet_count; │ lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
lb_tests->tx_done[tx_queue->label] += tx_done; │ lb_tests->tx_done[tx_queue->queue] += tx_done;
lb_tests->rx_good += rx_good; │ lb_tests->rx_good += rx_good;
lb_tests->rx_bad += rx_bad; │ lb_tests->rx_bad += rx_bad;
│
return rc; │ return rc;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwmac5.c:421 │ linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c:1014
│
struct stmmac_tc_entry *entry, *frag; │ struct stmmac_tc_entry *entry, *frag;
int i, ret, nve = 0; │ int i, ret, nve = 0;
u32 curr_prio = 0; │ u32 curr_prio = 0;
u32 old_val, val; │ u32 old_val, val;
│
/* Force disable RX */ │ /* Force disable RX */
old_val = readl(ioaddr + GMAC_CONFIG); │ old_val = readl(ioaddr + XGMAC_RX_CONFIG);
val = old_val & ~GMAC_CONFIG_RE; │ val = old_val & ~XGMAC_CONFIG_RE;
writel(val, ioaddr + GMAC_CONFIG); │ writel(val, ioaddr + XGMAC_RX_CONFIG);
│
/* Disable RX Parser */ │ /* Disable RX Parser */
ret = dwmac5_rxp_disable(ioaddr); │ ret = dwxgmac3_rxp_disable(ioaddr);
if (ret) │ if (ret)
goto re_enable; │ goto re_enable;
│
/* Set all entries as NOT in HW */ │ /* Set all entries as NOT in HW */
for (i = 0; i < count; i++) { │ for (i = 0; i < count; i++) {
entry = &entries[i]; │ entry = &entries[i];
entry->in_hw = false; │ entry->in_hw = false;
} │ }
│
/* Update entries by reverse order */ │ /* Update entries by reverse order */
while (1) { │ while (1) {
entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio); │ entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
if (!entry) │ if (!entry)
break; │ break;
│
curr_prio = entry->prio; │ curr_prio = entry->prio;
frag = entry->frag_ptr; │ frag = entry->frag_ptr;
│
/* Set special fragment requirements */ │ /* Set special fragment requirements */
if (frag) { │ if (frag) {
entry->val.af = 0; │ entry->val.af = 0;
entry->val.rf = 0; │ entry->val.rf = 0;
entry->val.nc = 1; │ entry->val.nc = 1;
entry->val.ok_index = nve + 2; │ entry->val.ok_index = nve + 2;
} │ }
│
ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); │ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
if (ret) │ if (ret)
goto re_enable; │ goto re_enable;
│
entry->table_pos = nve++; │ entry->table_pos = nve++;
entry->in_hw = true; │ entry->in_hw = true;
│
if (frag && !frag->in_hw) { │ if (frag && !frag->in_hw) {
ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve); │ ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
if (ret) │ if (ret)
goto re_enable; │ goto re_enable;
frag->table_pos = nve++; │ frag->table_pos = nve++;
frag->in_hw = true; │ frag->in_hw = true;
} │ }
} │ }
│
if (!nve) │ if (!nve)
goto re_enable; │ goto re_enable;
│
/* Update all pass entry */ │ /* Update all pass entry */
for (i = 0; i < count; i++) { │ for (i = 0; i < count; i++) {
entry = &entries[i]; │ entry = &entries[i];
if (!entry->is_last) │ if (!entry->is_last)
continue; │ continue;
│
ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); │ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
if (ret) │ if (ret)
goto re_enable; │ goto re_enable;
│
entry->table_pos = nve++; │ entry->table_pos = nve++;
} │ }
│
/* Assume n. of parsable entries == n. of valid entries */ │ /* Assume n. of parsable entries == n. of valid entries */
val = (nve << 16) & NPE; │ val = (nve << 16) & XGMAC_NPE;
val |= nve & NVE; │ val |= nve & XGMAC_NVE;
writel(val, ioaddr + MTL_RXP_CONTROL_STATUS); │ writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
│
/* Enable RX Parser */ │ /* Enable RX Parser */
dwmac5_rxp_enable(ioaddr); │ dwxgmac3_rxp_enable(ioaddr);
│
re_enable: │ re_enable:
/* Re-enable RX */ │ /* Re-enable RX */
writel(old_val, ioaddr + GMAC_CONFIG); │ writel(old_val, ioaddr + XGMAC_RX_CONFIG);
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_virtchnl.c:3214 │ linux/drivers/net/ethernet/intel/ice/ice_virtchnl.c:3140
│
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; │ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vlan_supported_caps *stripping_support; │ struct virtchnl_vlan_supported_caps *stripping_support;
struct virtchnl_vlan_setting *strip_msg = │ struct virtchnl_vlan_setting *strip_msg =
(struct virtchnl_vlan_setting *)msg; │ (struct virtchnl_vlan_setting *)msg;
u32 ethertype_setting; │ u32 ethertype_setting;
struct ice_vsi *vsi; │ struct ice_vsi *vsi;
│
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { │ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) { │ if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
vsi = ice_get_vf_vsi(vf); │ vsi = ice_get_vf_vsi(vf);
if (!vsi) { │ if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
stripping_support = &vf->vlan_v2_caps.offloads.stripping_support; │ stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) { │ if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
ethertype_setting = strip_msg->outer_ethertype_setting; │ ethertype_setting = strip_msg->outer_ethertype_setting;
if (ethertype_setting) { │ if (ethertype_setting) {
if (vsi->outer_vlan_ops.dis_stripping(vsi)) { │ if (ice_vc_ena_vlan_offload(vsi,
│ vsi->outer_vlan_ops.ena_stripping,
│ ethertype_setting)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} else { │ } else {
enum ice_l2tsel l2tsel = │ enum ice_l2tsel l2tsel =
ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1; │ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
│
/* PF tells the VF that the outer VLAN tag is always │ /* PF tells the VF that the outer VLAN tag is always
* extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and │ * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
* inner is always extracted to │ * inner is always extracted to
* VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to │ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
* support inner stripping while outer stripping is │ * support outer stripping so the first tag always ends
* disabled so that the first and only tag is extracted │ * up in L2TAG2_2ND and the second/inner tag, if
* in L2TAG1. │ * enabled, is extracted in L2TAG1.
*/ │ */
ice_vsi_update_l2tsel(vsi, l2tsel); │ ice_vsi_update_l2tsel(vsi, l2tsel);
} │ }
} │ }
│
ethertype_setting = strip_msg->inner_ethertype_setting; │ ethertype_setting = strip_msg->inner_ethertype_setting;
if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) { │ if (ethertype_setting &&
│ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
│ ethertype_setting)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
out: │ out:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, │ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
v_ret, NULL, 0); │ v_ret, NULL, 0);
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/selftest.c:615 │ linux/drivers/net/ethernet/sfc/selftest.c:613
│
enum ef4_loopback_mode mode; │ enum efx_loopback_mode mode;
struct ef4_loopback_state *state; │ struct efx_loopback_state *state;
struct ef4_channel *channel = │ struct efx_channel *channel =
ef4_get_channel(efx, efx->tx_channel_offset); │ efx_get_channel(efx, efx->tx_channel_offset);
struct ef4_tx_queue *tx_queue; │ struct efx_tx_queue *tx_queue;
int rc = 0; │ int rc = 0;
│
/* Set the port loopback_selftest member. From this point on │ /* Set the port loopback_selftest member. From this point on
* all received packets will be dropped. Mark the state as │ * all received packets will be dropped. Mark the state as
* "flushing" so all inflight packets are dropped */ │ * "flushing" so all inflight packets are dropped */
state = kzalloc(sizeof(*state), GFP_KERNEL); │ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL) │ if (state == NULL)
return -ENOMEM; │ return -ENOMEM;
BUG_ON(efx->loopback_selftest); │ BUG_ON(efx->loopback_selftest);
state->flush = true; │ state->flush = true;
efx->loopback_selftest = state; │ efx->loopback_selftest = state;
│
/* Test all supported loopback modes */ │ /* Test all supported loopback modes */
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { │ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
if (!(loopback_modes & (1 << mode))) │ if (!(loopback_modes & (1 << mode)))
continue; │ continue;
│
/* Move the port into the specified loopback mode. */ │ /* Move the port into the specified loopback mode. */
state->flush = true; │ state->flush = true;
mutex_lock(&efx->mac_lock); │ mutex_lock(&efx->mac_lock);
efx->loopback_mode = mode; │ efx->loopback_mode = mode;
rc = __ef4_reconfigure_port(efx); │ rc = __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock); │ mutex_unlock(&efx->mac_lock);
if (rc) { │ if (rc) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"unable to move into %s loopback\n", │ "unable to move into %s loopback\n",
LOOPBACK_MODE(efx)); │ LOOPBACK_MODE(efx));
goto out; │ goto out;
} │ }
│
rc = ef4_wait_for_link(efx); │ rc = efx_wait_for_link(efx);
if (rc) { │ if (rc) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"loopback %s never came up\n", │ "loopback %s never came up\n",
LOOPBACK_MODE(efx)); │ LOOPBACK_MODE(efx));
goto out; │ goto out;
} │ }
│
/* Test all enabled types of TX queue */ │ /* Test all enabled types of TX queue */
ef4_for_each_channel_tx_queue(tx_queue, channel) { │ efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue & │ state->offload_csum = (tx_queue->type &
EF4_TXQ_TYPE_OFFLOAD); │ EFX_TXQ_TYPE_OUTER_CSUM);
rc = ef4_test_loopback(tx_queue, │ rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]); │ &tests->loopback[mode]);
if (rc) │ if (rc)
goto out; │ goto out;
} │ }
} │ }
│
out: │ out:
/* Remove the flush. The caller will remove the loopback setting */ │ /* Remove the flush. The caller will remove the loopback setting */
state->flush = true; │ state->flush = true;
efx->loopback_selftest = NULL; │ efx->loopback_selftest = NULL;
wmb(); │ wmb();
kfree(state); │ kfree(state);
│
if (rc == -EPERM) │ if (rc == -EPERM)
rc = 0; │ rc = 0;
│
return rc; │ return rc;
} │
next prev up linux/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c:1160 │ linux/drivers/net/ethernet/intel/ixgbevf/ipsec.c:552
│
struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev); │ struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev);
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; │ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | │ __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); │ IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
struct ixgbe_ipsec *ipsec = adapter->ipsec; │ struct ixgbevf_ipsec *ipsec = adapter->ipsec;
struct xfrm_offload *xo = NULL; │ struct xfrm_offload *xo = NULL;
struct xfrm_state *xs = NULL; │ struct xfrm_state *xs = NULL;
struct ipv6hdr *ip6 = NULL; │ struct ipv6hdr *ip6 = NULL;
struct iphdr *ip4 = NULL; │ struct iphdr *ip4 = NULL;
struct sec_path *sp; │ struct sec_path *sp;
void *daddr; │ void *daddr;
__be32 spi; │ __be32 spi;
u8 *c_hdr; │ u8 *c_hdr;
u8 proto; │ u8 proto;
│
/* Find the ip and crypto headers in the data. │ /* Find the IP and crypto headers in the data.
* We can assume no vlan header in the way, b/c the │ * We can assume no VLAN header in the way, b/c the
* hw won't recognize the IPsec packet and anyway the │ * hw won't recognize the IPsec packet and anyway the
* currently vlan device doesn't support xfrm offload. │ * currently VLAN device doesn't support xfrm offload.
*/ │ */
if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { │ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
ip4 = (struct iphdr *)(skb->data + ETH_HLEN); │ ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
daddr = &ip4->daddr; │ daddr = &ip4->daddr;
c_hdr = (u8 *)ip4 + ip4->ihl * 4; │ c_hdr = (u8 *)ip4 + ip4->ihl * 4;
} else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { │ } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); │ ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
daddr = &ip6->daddr; │ daddr = &ip6->daddr;
c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); │ c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
} else { │ } else {
return; │ return;
} │ }
│
switch (pkt_info & ipsec_pkt_types) { │ switch (pkt_info & ipsec_pkt_types) {
case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): │ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
spi = ((struct ip_auth_hdr *)c_hdr)->spi; │ spi = ((struct ip_auth_hdr *)c_hdr)->spi;
proto = IPPROTO_AH; │ proto = IPPROTO_AH;
break; │ break;
case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): │ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
spi = ((struct ip_esp_hdr *)c_hdr)->spi; │ spi = ((struct ip_esp_hdr *)c_hdr)->spi;
proto = IPPROTO_ESP; │ proto = IPPROTO_ESP;
break; │ break;
default: │ default:
return; │ return;
} │ }
│
xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); │ xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
if (unlikely(!xs)) │ if (unlikely(!xs))
return; │ return;
│
sp = secpath_set(skb); │ sp = secpath_set(skb);
if (unlikely(!sp)) │ if (unlikely(!sp))
return; │ return;
│
sp->xvec[sp->len++] = xs; │ sp->xvec[sp->len++] = xs;
sp->olen++; │ sp->olen++;
xo = xfrm_offload(skb); │ xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE; │ xo->flags = CRYPTO_DONE;
xo->status = CRYPTO_SUCCESS; │ xo->status = CRYPTO_SUCCESS;
│
adapter->rx_ipsec++; │ adapter->rx_ipsec++;
} │
next prev up linux/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c:578 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c:477
│
u32 i, command; │ u32 i, data, command;
│
/* Put the data in the MDI single read and write data register*/ │
IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); │
│
/* Setup and write the address cycle command */ │ /* Setup and write the address cycle command */
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | │ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | │ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | │ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); │ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
│
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); │ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
│
/* │ /* Check every 10 usec to see if the address cycle completed.
* Check every 10 usec to see if the address cycle completed. │
* The MDI Command bit will clear when the operation is │ * The MDI Command bit will clear when the operation is
* complete │ * complete
*/ │ */
for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { │ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
udelay(10); │ udelay(10);
│
command = IXGBE_READ_REG(hw, IXGBE_MSCA); │ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) │ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
break; │ break;
} │ }
│
│
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { │ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY address cmd didn't complete\n"); │ hw_dbg(hw, "PHY address command did not complete.\n");
return IXGBE_ERR_PHY; │ return IXGBE_ERR_PHY;
} │ }
│
/* │ /* Address cycle complete, setup and write the read
* Address cycle complete, setup and write the write │
* command │ * command
*/ │ */
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | │ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | │ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | │ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); │ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
│
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); │ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
│
/* Check every 10 usec to see if the address cycle │ /* Check every 10 usec to see if the address cycle
* completed. The MDI Command bit will clear when the │ * completed. The MDI Command bit will clear when the
* operation is complete │ * operation is complete
*/ │ */
for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { │ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
udelay(10); │ udelay(10);
│
command = IXGBE_READ_REG(hw, IXGBE_MSCA); │ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) │ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
break; │ break;
} │ }
│
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { │ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY write cmd didn't complete\n"); │ hw_dbg(hw, "PHY read command didn't complete\n");
return IXGBE_ERR_PHY; │ return IXGBE_ERR_PHY;
} │ }
│
│ /* Read operation is complete. Get the data
│ * from MSRWD
│ */
│ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
│ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
│ *phy_data = (u16)(data);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/efx.c:2261 │ linux/drivers/net/ethernet/sfc/efx.c:702
│
struct net_device *net_dev = efx->net_dev; │ struct net_device *net_dev = efx->net_dev;
struct ef4_channel *channel; │ struct efx_channel *channel;
int rc; │ int rc;
│
net_dev->watchdog_timeo = 5 * HZ; │ net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq; │ net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &ef4_netdev_ops; │ net_dev->netdev_ops = &efx_netdev_ops;
net_dev->ethtool_ops = &ef4_ethtool_ops; │ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
netif_set_gso_max_segs(net_dev, EF4_TSO_MAX_SEGS); │ net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->min_mtu = EF4_MIN_MTU; │ net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->max_mtu = EF4_MAX_MTU; │ netif_set_gso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
│ net_dev->min_mtu = EFX_MIN_MTU;
│ net_dev->max_mtu = EFX_MAX_MTU;
│
rtnl_lock(); │ rtnl_lock();
│
/* Enable resets to be scheduled and check whether any were │ /* Enable resets to be scheduled and check whether any were
* already requested. If so, the NIC is probably hosed so we │ * already requested. If so, the NIC is probably hosed so we
* abort. │ * abort.
*/ │ */
efx->state = STATE_READY; │ efx->state = STATE_READY;
smp_mb(); /* ensure we change state before checking reset_pending */ │ smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) { │ if (efx->reset_pending) {
netif_err(efx, probe, efx->net_dev, │ pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
"aborting probe due to scheduled reset\n"); │
rc = -EIO; │ rc = -EIO;
goto fail_locked; │ goto fail_locked;
} │ }
│
rc = dev_alloc_name(net_dev, net_dev->name); │ rc = dev_alloc_name(net_dev, net_dev->name);
if (rc < 0) │ if (rc < 0)
goto fail_locked; │ goto fail_locked;
ef4_update_name(efx); │ efx_update_name(efx);
│
/* Always start with carrier off; PHY events will detect the link */ │ /* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev); │ netif_carrier_off(net_dev);
│
rc = register_netdevice(net_dev); │ rc = register_netdevice(net_dev);
if (rc) │ if (rc)
goto fail_locked; │ goto fail_locked;
│
ef4_for_each_channel(channel, efx) { │ efx_for_each_channel(channel, efx) {
struct ef4_tx_queue *tx_queue; │ struct efx_tx_queue *tx_queue;
ef4_for_each_channel_tx_queue(tx_queue, channel) │ efx_for_each_channel_tx_queue(tx_queue, channel)
ef4_init_tx_queue_core_txq(tx_queue); │ efx_init_tx_queue_core_txq(tx_queue);
} │ }
│
ef4_associate(efx); │ efx_associate(efx);
│
rtnl_unlock(); │ rtnl_unlock();
│
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); │ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
if (rc) { │ if (rc) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"failed to init net dev attributes\n"); │ "failed to init net dev attributes\n");
goto fail_registered; │ goto fail_registered;
} │ }
│
│ efx_init_mcdi_logging(efx);
│
return 0; │ return 0;
│
fail_registered: │ fail_registered:
rtnl_lock(); │ rtnl_lock();
ef4_dissociate(efx); │ efx_dissociate(efx);
unregister_netdevice(net_dev); │ unregister_netdevice(net_dev);
fail_locked: │ fail_locked:
efx->state = STATE_UNINIT; │ efx->state = STATE_UNINIT;
rtnl_unlock(); │ rtnl_unlock();
netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); │ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc; │ return rc;
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_ethtool.c:505 │ linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:782
│
struct igc_adapter *adapter = netdev_priv(netdev); │ struct igb_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw; │ struct e1000_hw *hw = &adapter->hw;
int max_len, first_word, last_word, ret_val = 0; │
u16 *eeprom_buff; │ u16 *eeprom_buff;
void *ptr; │ void *ptr;
│ int max_len, first_word, last_word, ret_val = 0;
u16 i; │ u16 i;
│
if (eeprom->len == 0) │ if (eeprom->len == 0)
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
│
if (hw->mac.type >= igc_i225 && │ if ((hw->mac.type >= e1000_i210) &&
!igc_get_flash_presence_i225(hw)) { │ !igb_get_flash_presence_i210(hw)) {
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
} │ }
│
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) │ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT; │ return -EFAULT;
│
max_len = hw->nvm.word_size * 2; │ max_len = hw->nvm.word_size * 2;
│
first_word = eeprom->offset >> 1; │ first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1; │ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(max_len, GFP_KERNEL); │ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
if (!eeprom_buff) │ if (!eeprom_buff)
return -ENOMEM; │ return -ENOMEM;
│
ptr = (void *)eeprom_buff; │ ptr = (void *)eeprom_buff;
│
if (eeprom->offset & 1) { │ if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word │ /* need read/modify/write of first changed EEPROM word
* only the second byte of the word is being modified │ * only the second byte of the word is being modified
*/ │ */
ret_val = hw->nvm.ops.read(hw, first_word, 1, │ ret_val = hw->nvm.ops.read(hw, first_word, 1,
&eeprom_buff[0]); │ &eeprom_buff[0]);
ptr++; │ ptr++;
} │ }
if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { │ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
/* need read/modify/write of last changed EEPROM word │ /* need read/modify/write of last changed EEPROM word
* only the first byte of the word is being modified │ * only the first byte of the word is being modified
*/ │ */
ret_val = hw->nvm.ops.read(hw, last_word, 1, │ ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]); │ &eeprom_buff[last_word - first_word]);
} │ }
│
/* Device's eeprom is always little-endian, word addressable */ │ /* Device's eeprom is always little-endian, word addressable */
for (i = 0; i < last_word - first_word + 1; i++) │ for (i = 0; i < last_word - first_word + 1; i++)
le16_to_cpus(&eeprom_buff[i]); │ le16_to_cpus(&eeprom_buff[i]);
│
memcpy(ptr, bytes, eeprom->len); │ memcpy(ptr, bytes, eeprom->len);
│
for (i = 0; i < last_word - first_word + 1; i++) │ for (i = 0; i < last_word - first_word + 1; i++)
cpu_to_le16s(&eeprom_buff[i]); │ cpu_to_le16s(&eeprom_buff[i]);
│
ret_val = hw->nvm.ops.write(hw, first_word, │ ret_val = hw->nvm.ops.write(hw, first_word,
last_word - first_word + 1, eeprom_buff); │ last_word - first_word + 1, eeprom_buff);
│
/* Update the checksum if nvm write succeeded */ │ /* Update the checksum if nvm write succeeded */
if (ret_val == 0) │ if (ret_val == 0)
hw->nvm.ops.update(hw); │ hw->nvm.ops.update(hw);
│
│ igb_set_fw_version(adapter);
kfree(eeprom_buff); │ kfree(eeprom_buff);
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_ethtool.c:896 │ linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:2188
│
struct igc_adapter *adapter = netdev_priv(netdev); │ struct igb_adapter *adapter = netdev_priv(netdev);
int i; │ int i;
│
if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || │ if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
(ec->rx_coalesce_usecs > 3 && │ ((ec->rx_coalesce_usecs > 3) &&
ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || │ (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
ec->rx_coalesce_usecs == 2) │ (ec->rx_coalesce_usecs == 2))
return -EINVAL; │ return -EINVAL;
│
if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || │ if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
(ec->tx_coalesce_usecs > 3 && │ ((ec->tx_coalesce_usecs > 3) &&
ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || │ (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
ec->tx_coalesce_usecs == 2) │ (ec->tx_coalesce_usecs == 2))
return -EINVAL; │ return -EINVAL;
│
if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) │ if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
return -EINVAL; │ return -EINVAL;
│
/* If ITR is disabled, disable DMAC */ │ /* If ITR is disabled, disable DMAC */
if (ec->rx_coalesce_usecs == 0) { │ if (ec->rx_coalesce_usecs == 0) {
if (adapter->flags & IGC_FLAG_DMAC) │ if (adapter->flags & IGB_FLAG_DMAC)
adapter->flags &= ~IGC_FLAG_DMAC; │ adapter->flags &= ~IGB_FLAG_DMAC;
} │ }
│
/* convert to rate of irq's per second */ │ /* convert to rate of irq's per second */
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) │ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
adapter->rx_itr_setting = ec->rx_coalesce_usecs; │ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
else │ else
adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; │ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
│
/* convert to rate of irq's per second */ │ /* convert to rate of irq's per second */
if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) │ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
adapter->tx_itr_setting = adapter->rx_itr_setting; │ adapter->tx_itr_setting = adapter->rx_itr_setting;
else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) │ else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
adapter->tx_itr_setting = ec->tx_coalesce_usecs; │ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
else │ else
adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; │ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
│
for (i = 0; i < adapter->num_q_vectors; i++) { │ for (i = 0; i < adapter->num_q_vectors; i++) {
struct igc_q_vector *q_vector = adapter->q_vector[i]; │ struct igb_q_vector *q_vector = adapter->q_vector[i];
│
q_vector->tx.work_limit = adapter->tx_work_limit; │ q_vector->tx.work_limit = adapter->tx_work_limit;
if (q_vector->rx.ring) │ if (q_vector->rx.ring)
q_vector->itr_val = adapter->rx_itr_setting; │ q_vector->itr_val = adapter->rx_itr_setting;
else │ else
q_vector->itr_val = adapter->tx_itr_setting; │ q_vector->itr_val = adapter->tx_itr_setting;
if (q_vector->itr_val && q_vector->itr_val <= 3) │ if (q_vector->itr_val && q_vector->itr_val <= 3)
q_vector->itr_val = IGC_START_ITR; │ q_vector->itr_val = IGB_START_ITR;
q_vector->set_itr = 1; │ q_vector->set_itr = 1;
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:1171 │ linux/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:1261
│
.core_init = dwmac4_core_init, │ .core_init = dwmac4_core_init,
.set_mac = stmmac_set_mac, │ .set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable, │ .rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable, │ .rx_queue_enable = dwmac4_rx_queue_enable,
.rx_queue_prio = dwmac4_rx_queue_priority, │ .rx_queue_prio = dwmac4_rx_queue_priority,
.tx_queue_prio = dwmac4_tx_queue_priority, │ .tx_queue_prio = dwmac4_tx_queue_priority,
.rx_queue_routing = dwmac4_rx_queue_routing, │ .rx_queue_routing = dwmac4_rx_queue_routing,
.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, │ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, │ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, │ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
.map_mtl_to_dma = dwmac4_map_mtl_dma, │ .map_mtl_to_dma = dwmac4_map_mtl_dma,
.config_cbs = dwmac4_config_cbs, │ .config_cbs = dwmac4_config_cbs,
.dump_regs = dwmac4_dump_regs, │ .dump_regs = dwmac4_dump_regs,
.host_irq_status = dwmac4_irq_status, │ .host_irq_status = dwmac4_irq_status,
.host_mtl_irq_status = dwmac4_irq_mtl_status, │ .host_mtl_irq_status = dwmac4_irq_mtl_status,
.flow_ctrl = dwmac4_flow_ctrl, │ .flow_ctrl = dwmac4_flow_ctrl,
.pmt = dwmac4_pmt, │ .pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr, │ .set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr, │ .get_umac_addr = dwmac4_get_umac_addr,
.set_eee_mode = dwmac4_set_eee_mode, │ .set_eee_mode = dwmac4_set_eee_mode,
.reset_eee_mode = dwmac4_reset_eee_mode, │ .reset_eee_mode = dwmac4_reset_eee_mode,
.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, │ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
.set_eee_timer = dwmac4_set_eee_timer, │ .set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls, │ .set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane, │ .pcs_ctrl_ane = dwmac4_ctrl_ane,
.pcs_rane = dwmac4_rane, │ .pcs_rane = dwmac4_rane,
.pcs_get_adv_lp = dwmac4_get_adv_lp, │ .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug, │ .debug = dwmac4_debug,
.set_filter = dwmac4_set_filter, │ .set_filter = dwmac4_set_filter,
│ .safety_feat_config = dwmac5_safety_feat_config,
│ .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
│ .safety_feat_dump = dwmac5_safety_feat_dump,
│ .rxp_config = dwmac5_rxp_config,
│ .flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback, │ .set_mac_loopback = dwmac4_set_mac_loopback,
.update_vlan_hash = dwmac4_update_vlan_hash, │ .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure, │ .sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan, │ .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload, │ .set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter, │ .config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter, │ .config_l4_filter = dwmac4_config_l4_filter,
│ .est_configure = dwmac5_est_configure,
│ .est_irq_status = dwmac5_est_irq_status,
│ .fpe_configure = dwmac5_fpe_configure,
│ .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
│ .fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, │ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, │ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, │ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:1171 │ linux/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:1213
│
.core_init = dwmac4_core_init, │ .core_init = dwmac4_core_init,
.set_mac = stmmac_set_mac, │ .set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable, │ .rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable, │ .rx_queue_enable = dwmac4_rx_queue_enable,
.rx_queue_prio = dwmac4_rx_queue_priority, │ .rx_queue_prio = dwmac4_rx_queue_priority,
.tx_queue_prio = dwmac4_tx_queue_priority, │ .tx_queue_prio = dwmac4_tx_queue_priority,
.rx_queue_routing = dwmac4_rx_queue_routing, │ .rx_queue_routing = dwmac4_rx_queue_routing,
.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, │ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, │ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, │ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
.map_mtl_to_dma = dwmac4_map_mtl_dma, │ .map_mtl_to_dma = dwmac4_map_mtl_dma,
.config_cbs = dwmac4_config_cbs, │ .config_cbs = dwmac4_config_cbs,
.dump_regs = dwmac4_dump_regs, │ .dump_regs = dwmac4_dump_regs,
.host_irq_status = dwmac4_irq_status, │ .host_irq_status = dwmac4_irq_status,
.host_mtl_irq_status = dwmac4_irq_mtl_status, │ .host_mtl_irq_status = dwmac4_irq_mtl_status,
.flow_ctrl = dwmac4_flow_ctrl, │ .flow_ctrl = dwmac4_flow_ctrl,
.pmt = dwmac4_pmt, │ .pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr, │ .set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr, │ .get_umac_addr = dwmac4_get_umac_addr,
.set_eee_mode = dwmac4_set_eee_mode, │ .set_eee_mode = dwmac4_set_eee_mode,
.reset_eee_mode = dwmac4_reset_eee_mode, │ .reset_eee_mode = dwmac4_reset_eee_mode,
.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, │ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
.set_eee_timer = dwmac4_set_eee_timer, │ .set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls, │ .set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane, │ .pcs_ctrl_ane = dwmac4_ctrl_ane,
.pcs_rane = dwmac4_rane, │ .pcs_rane = dwmac4_rane,
.pcs_get_adv_lp = dwmac4_get_adv_lp, │ .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug, │ .debug = dwmac4_debug,
.set_filter = dwmac4_set_filter, │ .set_filter = dwmac4_set_filter,
│ .flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback, │ .set_mac_loopback = dwmac4_set_mac_loopback,
.update_vlan_hash = dwmac4_update_vlan_hash, │ .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure, │ .sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan, │ .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload, │ .set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter, │ .config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter, │ .config_l4_filter = dwmac4_config_l4_filter,
│ .est_configure = dwmac5_est_configure,
│ .est_irq_status = dwmac5_est_irq_status,
│ .fpe_configure = dwmac5_fpe_configure,
│ .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
│ .fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, │ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, │ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, │ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
} │
next prev up linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c:453 │ linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c:524
│
struct virtchnl_ether_addr_list *veal; │ struct virtchnl_ether_addr_list *veal;
struct iavf_mac_filter *f; │ struct iavf_mac_filter *f, *ftmp;
int i = 0, count = 0; │ int i = 0, count = 0;
bool more = false; │ bool more = false;
size_t len; │ size_t len;
│
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { │ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ │ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", │ dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\
adapter->current_op); │ adapter->current_op);
return; │ return;
} │ }
│
spin_lock_bh(&adapter->mac_vlan_list_lock); │ spin_lock_bh(&adapter->mac_vlan_list_lock);
│
list_for_each_entry(f, &adapter->mac_filter_list, list) { │ list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add) │ if (f->remove)
count++; │ count++;
} │ }
if (!count) { │ if (!count) {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; │ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock); │ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return; │ return;
} │ }
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; │ adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
│
len = struct_size(veal, list, count); │ len = struct_size(veal, list, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) { │ if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n │ dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one reques
count = (IAVF_MAX_AQ_BUF_SIZE - │ count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_ether_addr_list)) / │ sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr); │ sizeof(struct virtchnl_ether_addr);
len = struct_size(veal, list, count); │ len = struct_size(veal, list, count);
more = true; │ more = true;
} │ }
│
veal = kzalloc(len, GFP_ATOMIC); │ veal = kzalloc(len, GFP_ATOMIC);
if (!veal) { │ if (!veal) {
spin_unlock_bh(&adapter->mac_vlan_list_lock); │ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return; │ return;
} │ }
│
veal->vsi_id = adapter->vsi_res->vsi_id; │ veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count; │ veal->num_elements = count;
list_for_each_entry(f, &adapter->mac_filter_list, list) { │ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->add) { │ if (f->remove) {
ether_addr_copy(veal->list[i].addr, f->macaddr); │ ether_addr_copy(veal->list[i].addr, f->macaddr);
iavf_set_mac_addr_type(&veal->list[i], f); │ iavf_set_mac_addr_type(&veal->list[i], f);
i++; │ i++;
f->add = false; │ list_del(&f->list);
│ kfree(f);
if (i == count) │ if (i == count)
break; │ break;
} │ }
} │ }
if (!more) │ if (!more)
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; │ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
│
spin_unlock_bh(&adapter->mac_vlan_list_lock); │ spin_unlock_bh(&adapter->mac_vlan_list_lock);
│
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); │ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
kfree(veal); │ kfree(veal);
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/farch.c:1256 │ linux/drivers/net/ethernet/sfc/farch.c:1262
│
struct ef4_nic *efx = channel->efx; │ struct efx_nic *efx = channel->efx;
unsigned int read_ptr; │ unsigned int read_ptr;
ef4_qword_t event, *p_event; │ efx_qword_t event, *p_event;
int ev_code; │ int ev_code;
int tx_packets = 0; │
int spent = 0; │ int spent = 0;
│
if (budget <= 0) │ if (budget <= 0)
return spent; │ return spent;
│
read_ptr = channel->eventq_read_ptr; │ read_ptr = channel->eventq_read_ptr;
│
for (;;) { │ for (;;) {
p_event = ef4_event(channel, read_ptr); │ p_event = efx_event(channel, read_ptr);
event = *p_event; │ event = *p_event;
│
if (!ef4_event_present(&event)) │ if (!efx_event_present(&event))
/* End of events */ │ /* End of events */
break; │ break;
│
netif_vdbg(channel->efx, intr, channel->efx->net_dev, │ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d event is "EF4_QWORD_FMT"\n", │ "channel %d event is "EFX_QWORD_FMT"\n",
channel->channel, EF4_QWORD_VAL(event)); │ channel->channel, EFX_QWORD_VAL(event));
│
/* Clear this event by marking it all ones */ │ /* Clear this event by marking it all ones */
EF4_SET_QWORD(*p_event); │ EFX_SET_QWORD(*p_event);
│
++read_ptr; │ ++read_ptr;
│
ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE); │ ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
│
switch (ev_code) { │ switch (ev_code) {
case FSE_AZ_EV_CODE_RX_EV: │ case FSE_AZ_EV_CODE_RX_EV:
ef4_farch_handle_rx_event(channel, &event); │ efx_farch_handle_rx_event(channel, &event);
if (++spent == budget) │ if (++spent == budget)
goto out; │ goto out;
break; │ break;
case FSE_AZ_EV_CODE_TX_EV: │ case FSE_AZ_EV_CODE_TX_EV:
tx_packets += ef4_farch_handle_tx_event(channel, │ efx_farch_handle_tx_event(channel, &event);
&event); │
if (tx_packets > efx->txq_entries) { │
spent = budget; │
goto out; │
} │
break; │ break;
case FSE_AZ_EV_CODE_DRV_GEN_EV: │ case FSE_AZ_EV_CODE_DRV_GEN_EV:
ef4_farch_handle_generated_event(channel, &event); │ efx_farch_handle_generated_event(channel, &event);
break; │ break;
case FSE_AZ_EV_CODE_DRIVER_EV: │ case FSE_AZ_EV_CODE_DRIVER_EV:
ef4_farch_handle_driver_event(channel, &event); │ efx_farch_handle_driver_event(channel, &event);
│ break;
│ #ifdef CONFIG_SFC_SRIOV
│ case FSE_CZ_EV_CODE_USER_EV:
│ efx_siena_sriov_event(channel, &event);
│ break;
│ #endif
│ case FSE_CZ_EV_CODE_MCDI_EV:
│ efx_mcdi_process_event(channel, &event);
break; │ break;
case FSE_AZ_EV_CODE_GLOBAL_EV: │ case FSE_AZ_EV_CODE_GLOBAL_EV:
if (efx->type->handle_global_event && │ if (efx->type->handle_global_event &&
efx->type->handle_global_event(channel, &event)) │ efx->type->handle_global_event(channel, &event))
break; │ break;
fallthrough; │ fallthrough;
default: │ default:
netif_err(channel->efx, hw, channel->efx->net_dev, │ netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data " │ "channel %d unknown event type %d (data "
EF4_QWORD_FMT ")\n", channel->channel, │ EFX_QWORD_FMT ")\n", channel->channel,
ev_code, EF4_QWORD_VAL(event)); │ ev_code, EFX_QWORD_VAL(event));
} │ }
} │ }
│
out: │ out:
channel->eventq_read_ptr = read_ptr; │ channel->eventq_read_ptr = read_ptr;
return spent; │ return spent;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c:432 │ linux/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c:293
│
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); │ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0; │ int ix = 0;
u32 *in; │ u32 *in;
int err; │ int err;
u8 *mc; │ u8 *mc;
│
ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g), │ ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL);
GFP_KERNEL); │
if (!ttc->g) │ if (!ttc->g)
return -ENOMEM; │ return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL); │ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) { │ if (!in) {
kfree(ttc->g); │ kfree(ttc->g);
ttc->g = NULL; │ ttc->g = NULL;
return -ENOMEM; │ return -ENOMEM;
} │ }
│
/* L4 Group */ │ /* L4 Group */
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); │ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); │ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); │ if (use_ipv)
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); │ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
│ else
│ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
│ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix); │ MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5_INNER_TTC_GROUP1_SIZE; │ ix += MLX5_TTC_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); │ MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); │ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups])) │ if (IS_ERR(ttc->g[ttc->num_groups]))
goto err; │ goto err;
ttc->num_groups++; │ ttc->num_groups++;
│
/* L3 Group */ │ /* L3 Group */
MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); │ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix); │ MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5_INNER_TTC_GROUP2_SIZE; │ ix += MLX5_TTC_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); │ MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); │ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups])) │ if (IS_ERR(ttc->g[ttc->num_groups]))
goto err; │ goto err;
ttc->num_groups++; │ ttc->num_groups++;
│
/* Any Group */ │ /* Any Group */
memset(in, 0, inlen); │ memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix); │ MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5_INNER_TTC_GROUP3_SIZE; │ ix += MLX5_TTC_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); │ MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); │ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups])) │ if (IS_ERR(ttc->g[ttc->num_groups]))
goto err; │ goto err;
ttc->num_groups++; │ ttc->num_groups++;
│
kvfree(in); │ kvfree(in);
return 0; │ return 0;
│
err: │ err:
err = PTR_ERR(ttc->g[ttc->num_groups]); │ err = PTR_ERR(ttc->g[ttc->num_groups]);
ttc->g[ttc->num_groups] = NULL; │ ttc->g[ttc->num_groups] = NULL;
kvfree(in); │ kvfree(in);
│
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4/sge.c:840 │ linux/drivers/net/ethernet/chelsio/cxgb4vf/sge.c:904
│
unsigned int i, len; │ unsigned int i, len;
struct ulptx_sge_pair *to; │ struct ulptx_sge_pair *to;
const struct skb_shared_info *si = skb_shinfo(skb); │ const struct skb_shared_info *si = skb_shinfo(skb);
unsigned int nfrags = si->nr_frags; │ unsigned int nfrags = si->nr_frags;
struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; │ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
│
len = skb_headlen(skb) - start; │ len = skb_headlen(skb) - start;
if (likely(len)) { │ if (likely(len)) {
sgl->len0 = htonl(len); │ sgl->len0 = htonl(len);
sgl->addr0 = cpu_to_be64(addr[0] + start); │ sgl->addr0 = cpu_to_be64(addr[0] + start);
nfrags++; │ nfrags++;
} else { │ } else {
sgl->len0 = htonl(skb_frag_size(&si->frags[0])); │ sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
sgl->addr0 = cpu_to_be64(addr[1]); │ sgl->addr0 = cpu_to_be64(addr[1]);
} │ }
│
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | │ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE_V(nfrags)); │ ULPTX_NSGE_V(nfrags));
if (likely(--nfrags == 0)) │ if (likely(--nfrags == 0))
return; │ return;
/* │ /*
* Most of the complexity below deals with the possibility we hit the │ * Most of the complexity below deals with the possibility we hit the
* end of the queue in the middle of writing the SGL. For this case │ * end of the queue in the middle of writing the SGL. For this case
* only we create the SGL in a temporary buffer and then copy it. │ * only we create the SGL in a temporary buffer and then copy it.
*/ │ */
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; │ to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
│
for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { │ for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); │ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); │ to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
to->addr[0] = cpu_to_be64(addr[i]); │ to->addr[0] = cpu_to_be64(addr[i]);
to->addr[1] = cpu_to_be64(addr[++i]); │ to->addr[1] = cpu_to_be64(addr[++i]);
} │ }
if (nfrags) { │ if (nfrags) {
to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); │ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to->len[1] = cpu_to_be32(0); │ to->len[1] = cpu_to_be32(0);
to->addr[0] = cpu_to_be64(addr[i + 1]); │ to->addr[0] = cpu_to_be64(addr[i + 1]);
} │ }
if (unlikely((u8 *)end > (u8 *)q->stat)) { │ if (unlikely((u8 *)end > (u8 *)tq->stat)) {
unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; │ unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
│
if (likely(part0)) │ if (likely(part0))
memcpy(sgl->sge, buf, part0); │ memcpy(sgl->sge, buf, part0);
part1 = (u8 *)end - (u8 *)q->stat; │ part1 = (u8 *)end - (u8 *)tq->stat;
memcpy(q->desc, (u8 *)buf + part0, part1); │ memcpy(tq->desc, (u8 *)buf + part0, part1);
end = (void *)q->desc + part1; │ end = (void *)tq->desc + part1;
} │ }
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ │ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
*end = 0; │ *end = 0;
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:2420 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:2351
│
/* Search concurrently for │ /* Search concurrently for
* (1) a filter to be replaced (rep_index): any filter │ * (1) a filter to be replaced (rep_index): any filter
* with the same match values, up to the current │ * with the same match values, up to the current
* search depth for this type, and │ * search depth for this type, and
* (2) the insertion point (ins_index): (1) or any │ * (2) the insertion point (ins_index): (1) or any
* free slot before it or up to the maximum search │ * free slot before it or up to the maximum search
* depth for this priority │ * depth for this priority
* We fail if we cannot find (2). │ * We fail if we cannot find (2).
* │ *
* We can stop once either │ * We can stop once either
* (a) we find (1), in which case we have definitely │ * (a) we find (1), in which case we have definitely
* found (2) as well; or │ * found (2) as well; or
* (b) we have searched exhaustively for (1), and have │ * (b) we have searched exhaustively for (1), and have
* either found (2) or searched exhaustively for it │ * either found (2) or searched exhaustively for it
*/ │ */
u32 key = efx_farch_filter_build(&filter, &spec); │ u32 key = ef4_farch_filter_build(&filter, &spec);
unsigned int hash = efx_farch_filter_hash(key); │ unsigned int hash = ef4_farch_filter_hash(key);
unsigned int incr = efx_farch_filter_increment(key); │ unsigned int incr = ef4_farch_filter_increment(key);
unsigned int max_rep_depth = table->search_limit[spec.type]; │ unsigned int max_rep_depth = table->search_limit[spec.type];
unsigned int max_ins_depth = │ unsigned int max_ins_depth =
spec.priority <= EFX_FILTER_PRI_HINT ? │ spec.priority <= EF4_FILTER_PRI_HINT ?
EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : │ EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX :
EFX_FARCH_FILTER_CTL_SRCH_MAX; │ EF4_FARCH_FILTER_CTL_SRCH_MAX;
unsigned int i = hash & (table->size - 1); │ unsigned int i = hash & (table->size - 1);
│
ins_index = -1; │ ins_index = -1;
depth = 1; │ depth = 1;
│
│ spin_lock_bh(&efx->filter_lock);
│
for (;;) { │ for (;;) {
if (!test_bit(i, table->used_bitmap)) { │ if (!test_bit(i, table->used_bitmap)) {
if (ins_index < 0) │ if (ins_index < 0)
ins_index = i; │ ins_index = i;
} else if (efx_farch_filter_equal(&spec, │ } else if (ef4_farch_filter_equal(&spec,
&table->spec[i])) { │ &table->spec[i])) {
/* Case (a) */ │ /* Case (a) */
if (ins_index < 0) │ if (ins_index < 0)
ins_index = i; │ ins_index = i;
rep_index = i; │ rep_index = i;
break; │ break;
} │ }
│
if (depth >= max_rep_depth && │ if (depth >= max_rep_depth &&
(ins_index >= 0 || depth >= max_ins_depth)) { │ (ins_index >= 0 || depth >= max_ins_depth)) {
/* Case (b) */ │ /* Case (b) */
if (ins_index < 0) { │ if (ins_index < 0) {
rc = -EBUSY; │ rc = -EBUSY;
goto out_unlock; │ goto out;
} │ }
rep_index = -1; │ rep_index = -1;
break; │ break;
} │ }
│
i = (i + incr) & (table->size - 1); │ i = (i + incr) & (table->size - 1);
++depth; │ ++depth;
} │ }
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c:709 │ linux/drivers/net/ethernet/stmicro/stmmac/dwmac5.c:90
│
{ true, "TXCES", "MTL TX Memory Error" }, │ { true, "TXCES", "MTL TX Memory Error" },
{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" }, │ { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
{ true, "TXUES", "MTL TX Memory Error" }, │ { true, "TXUES", "MTL TX Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 3 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
{ true, "RXCES", "MTL RX Memory Error" }, │ { true, "RXCES", "MTL RX Memory Error" },
{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" }, │ { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
{ true, "RXUES", "MTL RX Memory Error" }, │ { true, "RXUES", "MTL RX Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 7 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
{ true, "ECES", "MTL EST Memory Error" }, │ { true, "ECES", "MTL EST Memory Error" },
{ true, "EAMS", "MTL EST Memory Address Mismatch Error" }, │ { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
{ true, "EUES", "MTL EST Memory Error" }, │ { true, "EUES", "MTL EST Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 11 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
{ true, "RPCES", "MTL RX Parser Memory Error" }, │ { true, "RPCES", "MTL RX Parser Memory Error" },
{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" }, │ { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
{ true, "RPUES", "MTL RX Parser Memory Error" }, │ { true, "RPUES", "MTL RX Parser Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 15 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
{ false, "UNKNOWN", "Unknown Error" }, /* 16 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
{ false, "UNKNOWN", "Unknown Error" }, /* 17 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
{ false, "UNKNOWN", "Unknown Error" }, /* 18 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
{ false, "UNKNOWN", "Unknown Error" }, /* 19 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
{ false, "UNKNOWN", "Unknown Error" }, /* 20 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
{ false, "UNKNOWN", "Unknown Error" }, /* 21 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
{ false, "UNKNOWN", "Unknown Error" }, /* 22 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
{ false, "UNKNOWN", "Unknown Error" }, /* 23 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
{ false, "UNKNOWN", "Unknown Error" }, /* 24 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
{ false, "UNKNOWN", "Unknown Error" }, /* 25 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
{ false, "UNKNOWN", "Unknown Error" }, /* 26 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
{ false, "UNKNOWN", "Unknown Error" }, /* 27 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
{ false, "UNKNOWN", "Unknown Error" }, /* 28 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
{ false, "UNKNOWN", "Unknown Error" }, /* 29 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
{ false, "UNKNOWN", "Unknown Error" }, /* 30 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:2233 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:2166
│
u32 data3; │ u32 data3;
│
switch (efx_farch_filter_spec_table_id(spec)) { │ switch (ef4_farch_filter_spec_table_id(spec)) {
case EFX_FARCH_FILTER_TABLE_RX_IP: { │ case EF4_FARCH_FILTER_TABLE_RX_IP: {
bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || │ bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL ||
spec->type == EFX_FARCH_FILTER_UDP_WILD); │ spec->type == EF4_FARCH_FILTER_UDP_WILD);
EFX_POPULATE_OWORD_7( │ EF4_POPULATE_OWORD_7(
*filter, │ *filter,
FRF_BZ_RSS_EN, │ FRF_BZ_RSS_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_RSS), │ !!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
FRF_BZ_SCATTER_EN, │ FRF_BZ_SCATTER_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), │ !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
FRF_BZ_TCP_UDP, is_udp, │ FRF_BZ_TCP_UDP, is_udp,
FRF_BZ_RXQ_ID, spec->dmaq_id, │ FRF_BZ_RXQ_ID, spec->dmaq_id,
EFX_DWORD_2, spec->data[2], │ EF4_DWORD_2, spec->data[2],
EFX_DWORD_1, spec->data[1], │ EF4_DWORD_1, spec->data[1],
EFX_DWORD_0, spec->data[0]); │ EF4_DWORD_0, spec->data[0]);
data3 = is_udp; │ data3 = is_udp;
break; │ break;
} │ }
│
case EFX_FARCH_FILTER_TABLE_RX_MAC: { │ case EF4_FARCH_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; │ bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_7( │ EF4_POPULATE_OWORD_7(
*filter, │ *filter,
FRF_CZ_RMFT_RSS_EN, │ FRF_CZ_RMFT_RSS_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_RSS), │ !!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
FRF_CZ_RMFT_SCATTER_EN, │ FRF_CZ_RMFT_SCATTER_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), │ !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, │ FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, │ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], │ FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], │ FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
FRF_CZ_RMFT_VLAN_ID, spec->data[0]); │ FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
data3 = is_wild; │ data3 = is_wild;
break; │ break;
} │ }
│
case EFX_FARCH_FILTER_TABLE_TX_MAC: { │ case EF4_FARCH_FILTER_TABLE_TX_MAC: {
bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; │ bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_5(*filter, │ EF4_POPULATE_OWORD_5(*filter,
FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, │ FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, │ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], │ FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], │ FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
FRF_CZ_TMFT_VLAN_ID, spec->data[0]); │ FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
data3 = is_wild | spec->dmaq_id << 1; │ data3 = is_wild | spec->dmaq_id << 1;
break; │ break;
} │ }
│
default: │ default:
BUG(); │ BUG();
} │ }
│
return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; │ return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
} │
next prev up linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c:560 │ linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c:595
│
struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); │ struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
│
/* the macros below will use "bmac1_stats" type */ │ /* the macros below will use "bmac2_stats" type */
UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); │ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); │ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); │ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); │ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); │ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); │ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); │ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); │ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); │ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
│
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); │ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); │ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); │ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
UPDATE_STAT64(tx_stat_gt127, │ UPDATE_STAT64(tx_stat_gt127,
tx_stat_etherstatspkts65octetsto127octets); │ tx_stat_etherstatspkts65octetsto127octets);
UPDATE_STAT64(tx_stat_gt255, │ UPDATE_STAT64(tx_stat_gt255,
tx_stat_etherstatspkts128octetsto255octets); │ tx_stat_etherstatspkts128octetsto255octets);
UPDATE_STAT64(tx_stat_gt511, │ UPDATE_STAT64(tx_stat_gt511,
tx_stat_etherstatspkts256octetsto511octets); │ tx_stat_etherstatspkts256octetsto511octets);
UPDATE_STAT64(tx_stat_gt1023, │ UPDATE_STAT64(tx_stat_gt1023,
tx_stat_etherstatspkts512octetsto1023octets); │ tx_stat_etherstatspkts512octetsto1023octets);
UPDATE_STAT64(tx_stat_gt1518, │ UPDATE_STAT64(tx_stat_gt1518,
tx_stat_etherstatspkts1024octetsto1522octets); │ tx_stat_etherstatspkts1024octetsto1522octets);
UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); │ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); │ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); │ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); │ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
UPDATE_STAT64(tx_stat_gterr, │ UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors); │ tx_stat_dot3statsinternalmactransmiterrors);
UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); │ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
│
│ /* collect PFC stats */
│ pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
│ pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
│
│ pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
│ pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c:698 │ linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c:7217
│
u32 sge_control, sge_control2; │ u32 sge_control, sge_control2;
unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; │ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
│
sge_control = adapter->params.sge.sge_control; │ sge_control = t4_read_reg(adap, SGE_CONTROL_A);
│
/* T4 uses a single control field to specify both the PCIe Padding and │ /* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these │ * Packing Boundary. T5 introduced the ability to specify these
* separately. The actual Ingress Packet Data alignment boundary │ * separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two │ * within Packed Buffer Mode is the maximum of these two
* specifications. (Note that it makes no real practical sense to │ * specifications. (Note that it makes no real practical sense to
* have the Pading Boudary be larger than the Packing Boundary but you │ * have the Padding Boundary be larger than the Packing Boundary but you
* could set the chip up that way and, in fact, legacy T4 code would │ * could set the chip up that way and, in fact, legacy T4 code would
* end doing this because it would initialize the Padding Boundary and │ * end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).) │ * leave the Packing Boundary initialized to 0 (16 bytes).)
* Padding Boundary values in T6 starts from 8B, │ * Padding Boundary values in T6 starts from 8B,
* where as it is 32B for T4 and T5. │ * where as it is 32B for T4 and T5.
*/ │ */
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) │ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
ingpad_shift = INGPADBOUNDARY_SHIFT_X; │ ingpad_shift = INGPADBOUNDARY_SHIFT_X;
else │ else
ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; │ ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
│
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); │ ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
│
fl_align = ingpadboundary; │ fl_align = ingpadboundary;
if (!is_t4(adapter->params.chip)) { │ if (!is_t4(adap->params.chip)) {
/* T5 has a different interpretation of one of the PCIe Packing │ /* T5 has a weird interpretation of one of the PCIe Packing
* Boundary values. │ * Boundary values. No idea why ...
*/ │ */
sge_control2 = adapter->params.sge.sge_control2; │ sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
ingpackboundary = INGPACKBOUNDARY_G(sge_control2); │ ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
if (ingpackboundary == INGPACKBOUNDARY_16B_X) │ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
ingpackboundary = 16; │ ingpackboundary = 16;
else │ else
ingpackboundary = 1 << (ingpackboundary + │ ingpackboundary = 1 << (ingpackboundary +
INGPACKBOUNDARY_SHIFT_X); │ INGPACKBOUNDARY_SHIFT_X);
│
fl_align = max(ingpadboundary, ingpackboundary); │ fl_align = max(ingpadboundary, ingpackboundary);
} │ }
return fl_align; │ return fl_align;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c:4799 │ linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c:4734
│
struct mlx4_priv *priv = mlx4_priv(dev); │ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; │ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *cq_list = │ struct list_head *srq_list =
&tracker->slave_list[slave].res_list[RES_CQ]; │ &tracker->slave_list[slave].res_list[RES_SRQ];
struct res_cq *cq; │ struct res_srq *srq;
struct res_cq *tmp; │ struct res_srq *tmp;
int state; │ int state;
u64 in_param; │ u64 in_param;
int cqn; │ int srqn;
int err; │ int err;
│
err = move_all_busy(dev, slave, RES_CQ); │ err = move_all_busy(dev, slave, RES_SRQ);
if (err) │ if (err)
mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for sla │ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for s
slave); │ slave);
│
spin_lock_irq(mlx4_tlock(dev)); │ spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(cq, tmp, cq_list, com.list) { │ list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev)); │ spin_unlock_irq(mlx4_tlock(dev));
if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { │ if (srq->com.owner == slave) {
cqn = cq->com.res_id; │ srqn = srq->com.res_id;
state = cq->com.from_state; │ state = srq->com.from_state;
while (state != 0) { │ while (state != 0) {
switch (state) { │ switch (state) {
case RES_CQ_ALLOCATED: │ case RES_SRQ_ALLOCATED:
__mlx4_cq_free_icm(dev, cqn); │ __mlx4_srq_free_icm(dev, srqn);
spin_lock_irq(mlx4_tlock(dev)); │ spin_lock_irq(mlx4_tlock(dev));
rb_erase(&cq->com.node, │ rb_erase(&srq->com.node,
&tracker->res_tree[RES_CQ]); │ &tracker->res_tree[RES_SRQ]);
list_del(&cq->com.list); │ list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev)); │ spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave, │ mlx4_release_resource(dev, slave,
RES_CQ, 1, 0); │ RES_SRQ, 1, 0);
kfree(cq); │ kfree(srq);
state = 0; │ state = 0;
break; │ break;
│
case RES_CQ_HW: │ case RES_SRQ_HW:
in_param = slave; │ in_param = slave;
err = mlx4_cmd(dev, in_param, cqn, 1, │ err = mlx4_cmd(dev, in_param, srqn, 1,
MLX4_CMD_HW2SW_CQ, │ MLX4_CMD_HW2SW_SRQ,
MLX4_CMD_TIME_CLASS_A, │ MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); │ MLX4_CMD_NATIVE);
if (err) │ if (err)
mlx4_dbg(dev, "rem_slave_cqs: failed to │ mlx4_dbg(dev, "rem_slave_srqs: failed to
slave, cqn); │ slave, srqn);
atomic_dec(&cq->mtt->ref_count); │
state = RES_CQ_ALLOCATED; │ atomic_dec(&srq->mtt->ref_count);
│ if (srq->cq)
│ atomic_dec(&srq->cq->ref_count);
│ state = RES_SRQ_ALLOCATED;
break; │ break;
│
default: │ default:
state = 0; │ state = 0;
} │ }
} │ }
} │ }
spin_lock_irq(mlx4_tlock(dev)); │ spin_lock_irq(mlx4_tlock(dev));
} │ }
spin_unlock_irq(mlx4_tlock(dev)); │ spin_unlock_irq(mlx4_tlock(dev));
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:931 │ linux/drivers/net/ethernet/intel/igc/igc_main.c:5153
│
unsigned int num_q_vectors = adapter->num_q_vectors; │ unsigned int num_q_vectors = adapter->num_q_vectors;
│ int i = 0, err = 0, vector = 0, free_vector = 0;
struct net_device *netdev = adapter->netdev; │ struct net_device *netdev = adapter->netdev;
int i, err = 0, vector = 0, free_vector = 0; │
│
err = request_irq(adapter->msix_entries[vector].vector, │ err = request_irq(adapter->msix_entries[vector].vector,
igb_msix_other, 0, netdev->name, adapter); │ &igc_msix_other, 0, netdev->name, adapter);
if (err) │ if (err)
goto err_out; │ goto err_out;
│
if (num_q_vectors > MAX_Q_VECTORS) { │ if (num_q_vectors > MAX_Q_VECTORS) {
num_q_vectors = MAX_Q_VECTORS; │ num_q_vectors = MAX_Q_VECTORS;
dev_warn(&adapter->pdev->dev, │ dev_warn(&adapter->pdev->dev,
"The number of queue vectors (%d) is higher than max allowed (% │ "The number of queue vectors (%d) is higher than max allowed (%
adapter->num_q_vectors, MAX_Q_VECTORS); │ adapter->num_q_vectors, MAX_Q_VECTORS);
} │ }
for (i = 0; i < num_q_vectors; i++) { │ for (i = 0; i < num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i]; │ struct igc_q_vector *q_vector = adapter->q_vector[i];
│
vector++; │ vector++;
│
q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); │ q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
│
if (q_vector->rx.ring && q_vector->tx.ring) │ if (q_vector->rx.ring && q_vector->tx.ring)
sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, │ sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
q_vector->rx.ring->queue_index); │ q_vector->rx.ring->queue_index);
else if (q_vector->tx.ring) │ else if (q_vector->tx.ring)
sprintf(q_vector->name, "%s-tx-%u", netdev->name, │ sprintf(q_vector->name, "%s-tx-%u", netdev->name,
q_vector->tx.ring->queue_index); │ q_vector->tx.ring->queue_index);
else if (q_vector->rx.ring) │ else if (q_vector->rx.ring)
sprintf(q_vector->name, "%s-rx-%u", netdev->name, │ sprintf(q_vector->name, "%s-rx-%u", netdev->name,
q_vector->rx.ring->queue_index); │ q_vector->rx.ring->queue_index);
else │ else
sprintf(q_vector->name, "%s-unused", netdev->name); │ sprintf(q_vector->name, "%s-unused", netdev->name);
│
err = request_irq(adapter->msix_entries[vector].vector, │ err = request_irq(adapter->msix_entries[vector].vector,
igb_msix_ring, 0, q_vector->name, │ igc_msix_ring, 0, q_vector->name,
q_vector); │ q_vector);
if (err) │ if (err)
goto err_free; │ goto err_free;
} │ }
│
igb_configure_msix(adapter); │ igc_configure_msix(adapter);
return 0; │ return 0;
│
err_free: │ err_free:
/* free already assigned IRQs */ │ /* free already assigned IRQs */
free_irq(adapter->msix_entries[free_vector++].vector, adapter); │ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
│
vector--; │ vector--;
for (i = 0; i < vector; i++) { │ for (i = 0; i < vector; i++) {
free_irq(adapter->msix_entries[free_vector++].vector, │ free_irq(adapter->msix_entries[free_vector++].vector,
adapter->q_vector[i]); │ adapter->q_vector[i]);
} │ }
err_out: │ err_out:
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/ti/cpsw.c:456 │ linux/drivers/net/ethernet/ti/cpsw_new.c:648
│
struct phy_device *phy = slave->phy; │ struct cpsw_priv *priv = netdev_priv(ndev);
u32 mac_control = 0; │
u32 slave_port; │
struct cpsw_common *cpsw = priv->cpsw; │ struct cpsw_common *cpsw = priv->cpsw;
│ struct cpsw_slave *slave;
│ struct phy_device *phy;
│ u32 mac_control = 0;
│
│ slave = &cpsw->slaves[priv->emac_port - 1];
│ phy = slave->phy;
│
if (!phy) │ if (!phy)
return; │ return;
│
slave_port = cpsw_get_slave_port(slave->slave_num); │
│
if (phy->link) { │ if (phy->link) {
mac_control = CPSW_SL_CTL_GMII_EN; │ mac_control = CPSW_SL_CTL_GMII_EN;
│
if (phy->speed == 1000) │ if (phy->speed == 1000)
mac_control |= CPSW_SL_CTL_GIG; │ mac_control |= CPSW_SL_CTL_GIG;
if (phy->duplex) │ if (phy->duplex)
mac_control |= CPSW_SL_CTL_FULLDUPLEX; │ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
│
/* set speed_in input in case RMII mode is used in 100Mbps */ │ /* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100) │ if (phy->speed == 100)
mac_control |= CPSW_SL_CTL_IFCTL_A; │ mac_control |= CPSW_SL_CTL_IFCTL_A;
/* in band mode only works in 10Mbps RGMII mode */ │ /* in band mode only works in 10Mbps RGMII mode */
else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) │ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */ │ mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
│
if (priv->rx_pause) │ if (priv->rx_pause)
mac_control |= CPSW_SL_CTL_RX_FLOW_EN; │ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
│
if (priv->tx_pause) │ if (priv->tx_pause)
mac_control |= CPSW_SL_CTL_TX_FLOW_EN; │ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
│
if (mac_control != slave->mac_control) │ if (mac_control != slave->mac_control)
cpsw_sl_ctl_set(slave->mac_sl, mac_control); │ cpsw_sl_ctl_set(slave->mac_sl, mac_control);
│
/* enable forwarding */ │ /* enable forwarding */
cpsw_ale_control_set(cpsw->ale, slave_port, │ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); │ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
│
*link = true; │ netif_tx_wake_all_queues(ndev);
│
if (priv->shp_cfg_speed && │ if (priv->shp_cfg_speed &&
priv->shp_cfg_speed != slave->phy->speed && │ priv->shp_cfg_speed != slave->phy->speed &&
!cpsw_shp_is_off(priv)) │ !cpsw_shp_is_off(priv))
dev_warn(priv->dev, │ dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are ch
"Speed was changed, CBS shaper speeds are changed!"); │
} else { │ } else {
│ netif_tx_stop_all_queues(ndev);
│
mac_control = 0; │ mac_control = 0;
/* disable forwarding */ │ /* disable forwarding */
cpsw_ale_control_set(cpsw->ale, slave_port, │ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); │ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
│
cpsw_sl_wait_for_idle(slave->mac_sl, 100); │ cpsw_sl_wait_for_idle(slave->mac_sl, 100);
│
cpsw_sl_ctl_reset(slave->mac_sl); │ cpsw_sl_ctl_reset(slave->mac_sl);
} │ }
│
if (mac_control != slave->mac_control) │ if (mac_control != slave->mac_control)
phy_print_status(phy); │ phy_print_status(phy);
│
slave->mac_control = mac_control; │ slave->mac_control = mac_control;
│
│ if (phy->link && cpsw_need_resplit(cpsw))
│ cpsw_split_res(cpsw);
} │
next prev up linux/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c:1696 │ linux/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c:233
│
struct ethtool_link_ksettings *lks = &pdata->phy.lks; │ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
│ struct xgbe_phy_data *phy_data = pdata->phy_data;
enum xgbe_mode mode; │ enum xgbe_mode mode;
unsigned int ad_reg, lp_reg; │ unsigned int ad_reg, lp_reg;
│
XGBE_SET_LP_ADV(lks, Autoneg); │ XGBE_SET_LP_ADV(lks, Autoneg);
XGBE_SET_LP_ADV(lks, Backplane); │ XGBE_SET_LP_ADV(lks, Backplane);
│
/* Compare Advertisement and Link Partner register 1 */ │ /* Compare Advertisement and Link Partner register 1 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); │ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); │ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
if (lp_reg & 0x400) │ if (lp_reg & 0x400)
XGBE_SET_LP_ADV(lks, Pause); │ XGBE_SET_LP_ADV(lks, Pause);
if (lp_reg & 0x800) │ if (lp_reg & 0x800)
XGBE_SET_LP_ADV(lks, Asym_Pause); │ XGBE_SET_LP_ADV(lks, Asym_Pause);
│
if (pdata->phy.pause_autoneg) { │ if (pdata->phy.pause_autoneg) {
/* Set flow control based on auto-negotiation result */ │ /* Set flow control based on auto-negotiation result */
pdata->phy.tx_pause = 0; │ pdata->phy.tx_pause = 0;
pdata->phy.rx_pause = 0; │ pdata->phy.rx_pause = 0;
│
if (ad_reg & lp_reg & 0x400) { │ if (ad_reg & lp_reg & 0x400) {
pdata->phy.tx_pause = 1; │ pdata->phy.tx_pause = 1;
pdata->phy.rx_pause = 1; │ pdata->phy.rx_pause = 1;
} else if (ad_reg & lp_reg & 0x800) { │ } else if (ad_reg & lp_reg & 0x800) {
if (ad_reg & 0x400) │ if (ad_reg & 0x400)
pdata->phy.rx_pause = 1; │ pdata->phy.rx_pause = 1;
else if (lp_reg & 0x400) │ else if (lp_reg & 0x400)
pdata->phy.tx_pause = 1; │ pdata->phy.tx_pause = 1;
} │ }
} │ }
│
/* Compare Advertisement and Link Partner register 2 */ │ /* Compare Advertisement and Link Partner register 2 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); │ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); │ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
if (lp_reg & 0x80) │ if (lp_reg & 0x80)
XGBE_SET_LP_ADV(lks, 10000baseKR_Full); │ XGBE_SET_LP_ADV(lks, 10000baseKR_Full);
if (lp_reg & 0x20) │ if (lp_reg & 0x20) {
XGBE_SET_LP_ADV(lks, 1000baseKX_Full); │ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
│ XGBE_SET_LP_ADV(lks, 2500baseX_Full);
│ else
│ XGBE_SET_LP_ADV(lks, 1000baseKX_Full);
│ }
│
ad_reg &= lp_reg; │ ad_reg &= lp_reg;
if (ad_reg & 0x80) │ if (ad_reg & 0x80) {
mode = XGBE_MODE_KR; │ mode = XGBE_MODE_KR;
else if (ad_reg & 0x20) │ } else if (ad_reg & 0x20) {
mode = XGBE_MODE_KX_1000; │ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
else │ mode = XGBE_MODE_KX_2500;
│ else
│ mode = XGBE_MODE_KX_1000;
│ } else {
mode = XGBE_MODE_UNKNOWN; │ mode = XGBE_MODE_UNKNOWN;
│ }
│
/* Compare Advertisement and Link Partner register 3 */ │ /* Compare Advertisement and Link Partner register 3 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); │ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); │ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
if (lp_reg & 0xc000) │ if (lp_reg & 0xc000)
XGBE_SET_LP_ADV(lks, 10000baseR_FEC); │ XGBE_SET_LP_ADV(lks, 10000baseR_FEC);
│
return mode; │ return mode;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwmac5.c:138 │ linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c:757
│
{ true, "TCES", "DMA TSO Memory Error" }, │ { true, "TCES", "DMA TSO Memory Error" },
{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" }, │ { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
{ true, "TUES", "DMA TSO Memory Error" }, │ { true, "TUES", "DMA TSO Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 3 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
{ false, "UNKNOWN", "Unknown Error" }, /* 4 */ │ { true, "DCES", "DMA DCACHE Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 5 */ │ { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 6 */ │ { true, "DUES", "DMA DCACHE Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 7 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
{ false, "UNKNOWN", "Unknown Error" }, /* 8 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 8 */
{ false, "UNKNOWN", "Unknown Error" }, /* 9 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 9 */
{ false, "UNKNOWN", "Unknown Error" }, /* 10 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 10 */
{ false, "UNKNOWN", "Unknown Error" }, /* 11 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
{ false, "UNKNOWN", "Unknown Error" }, /* 12 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 12 */
{ false, "UNKNOWN", "Unknown Error" }, /* 13 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 13 */
{ false, "UNKNOWN", "Unknown Error" }, /* 14 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 14 */
{ false, "UNKNOWN", "Unknown Error" }, /* 15 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
{ false, "UNKNOWN", "Unknown Error" }, /* 16 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
{ false, "UNKNOWN", "Unknown Error" }, /* 17 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
{ false, "UNKNOWN", "Unknown Error" }, /* 18 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
{ false, "UNKNOWN", "Unknown Error" }, /* 19 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
{ false, "UNKNOWN", "Unknown Error" }, /* 20 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
{ false, "UNKNOWN", "Unknown Error" }, /* 21 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
{ false, "UNKNOWN", "Unknown Error" }, /* 22 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
{ false, "UNKNOWN", "Unknown Error" }, /* 23 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
{ false, "UNKNOWN", "Unknown Error" }, /* 24 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
{ false, "UNKNOWN", "Unknown Error" }, /* 25 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
{ false, "UNKNOWN", "Unknown Error" }, /* 26 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
{ false, "UNKNOWN", "Unknown Error" }, /* 27 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
{ false, "UNKNOWN", "Unknown Error" }, /* 28 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
{ false, "UNKNOWN", "Unknown Error" }, /* 29 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
{ false, "UNKNOWN", "Unknown Error" }, /* 30 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */ │ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:1460 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:1445
│
efx_oword_t *int_ker = efx->irq_status.addr; │ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t fatal_intr; │ ef4_oword_t *int_ker = efx->irq_status.addr;
│ ef4_oword_t fatal_intr;
int error, mem_perr; │ int error, mem_perr;
│
efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); │ ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); │ error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
│
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " │ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), │ EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr), │ EF4_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error"); │ error ? "disabling bus mastering" : "no recognised error");
│
/* If this is a memory parity error dump which blocks are offending */ │ /* If this is a memory parity error dump which blocks are offending */
mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || │ mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); │ EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
if (mem_perr) { │ if (mem_perr) {
efx_oword_t reg; │ ef4_oword_t reg;
efx_reado(efx, ®, FR_AZ_MEM_STAT); │ ef4_reado(efx, ®, FR_AZ_MEM_STAT);
netif_err(efx, hw, efx->net_dev, │ netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", │ "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n",
EFX_OWORD_VAL(reg)); │ EF4_OWORD_VAL(reg));
} │ }
│
/* Disable both devices */ │ /* Disable both devices */
pci_clear_master(efx->pci_dev); │ pci_clear_master(efx->pci_dev);
efx_farch_irq_disable_master(efx); │ if (ef4_nic_is_dual_func(efx))
│ pci_clear_master(nic_data->pci_dev2);
│ ef4_farch_irq_disable_master(efx);
│
/* Count errors and reset or disable the NIC accordingly */ │ /* Count errors and reset or disable the NIC accordingly */
if (efx->int_error_count == 0 || │ if (efx->int_error_count == 0 ||
time_after(jiffies, efx->int_error_expire)) { │ time_after(jiffies, efx->int_error_expire)) {
efx->int_error_count = 0; │ efx->int_error_count = 0;
efx->int_error_expire = │ efx->int_error_expire =
jiffies + EFX_INT_ERROR_EXPIRE * HZ; │ jiffies + EF4_INT_ERROR_EXPIRE * HZ;
} │ }
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { │ if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
netif_err(efx, hw, efx->net_dev, │ netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - reset scheduled\n"); │ "SYSTEM ERROR - reset scheduled\n");
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); │ ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else { │ } else {
netif_err(efx, hw, efx->net_dev, │ netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - max number of errors seen." │ "SYSTEM ERROR - max number of errors seen."
"NIC will be disabled\n"); │ "NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE); │ ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
} │ }
│
return IRQ_HANDLED; │ return IRQ_HANDLED;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c:151 │ linux/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c:83
│
struct net_device *ndev = bus->priv; │ struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev); │ struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr; │ unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data; │ unsigned int mii_data = priv->hw->mii.data;
u32 addr, tmp, value = MII_XGMAC_BUSY; │ u32 tmp, addr, value = MII_XGMAC_BUSY;
int ret; │ int ret;
│
ret = pm_runtime_get_sync(priv->device); │ ret = pm_runtime_get_sync(priv->device);
if (ret < 0) { │ if (ret < 0) {
pm_runtime_put_noidle(priv->device); │ pm_runtime_put_noidle(priv->device);
return ret; │ return ret;
} │ }
│
/* Wait until any existing MII operation is complete */ │ /* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, │ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
!(tmp & MII_XGMAC_BUSY), 100, 10000)) { │ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
ret = -EBUSY; │ ret = -EBUSY;
goto err_disable_clks; │ goto err_disable_clks;
} │ }
│
if (phyreg & MII_ADDR_C45) { │ if (phyreg & MII_ADDR_C45) {
phyreg &= ~MII_ADDR_C45; │ phyreg &= ~MII_ADDR_C45;
│
ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); │ ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
if (ret) │ if (ret)
goto err_disable_clks; │ goto err_disable_clks;
} else { │ } else {
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); │ ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
if (ret) │ if (ret)
goto err_disable_clks; │ goto err_disable_clks;
│
value |= MII_XGMAC_SADDR; │ value |= MII_XGMAC_SADDR;
} │ }
│
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) │ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask; │ & priv->hw->mii.clk_csr_mask;
value |= phydata; │ value |= MII_XGMAC_READ;
value |= MII_XGMAC_WRITE; │
│
/* Wait until any existing MII operation is complete */ │ /* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, │ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
!(tmp & MII_XGMAC_BUSY), 100, 10000)) { │ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
ret = -EBUSY; │ ret = -EBUSY;
goto err_disable_clks; │ goto err_disable_clks;
} │ }
│
/* Set the MII address register to write */ │ /* Set the MII address register to read */
writel(addr, priv->ioaddr + mii_address); │ writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data); │ writel(value, priv->ioaddr + mii_data);
│
/* Wait until any existing MII operation is complete */ │ /* Wait until any existing MII operation is complete */
ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp, │ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
!(tmp & MII_XGMAC_BUSY), 100, 10000); │ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
│ ret = -EBUSY;
│ goto err_disable_clks;
│ }
│
│ /* Read the data from the MII data register */
│ ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
│
err_disable_clks: │ err_disable_clks:
pm_runtime_put(priv->device); │ pm_runtime_put(priv->device);
│
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3676 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3639
│
.kind = mlxsw_sp3_driver_name, │ .kind = mlxsw_sp2_driver_name,
.priv_size = sizeof(struct mlxsw_sp), │ .priv_size = sizeof(struct mlxsw_sp),
.fw_req_rev = &mlxsw_sp3_fw_rev, │ .fw_req_rev = &mlxsw_sp2_fw_rev,
.fw_filename = MLXSW_SP3_FW_FILENAME, │ .fw_filename = MLXSW_SP2_FW_FILENAME,
.init = mlxsw_sp3_init, │ .init = mlxsw_sp2_init,
.fini = mlxsw_sp_fini, │ .fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split, │ .port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit, │ .port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get, │ .sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set, │ .sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get, │ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
.sb_port_pool_set = mlxsw_sp_sb_port_pool_set, │ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
.sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, │ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, │ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
.sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, │ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, │ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, │ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, │ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.trap_init = mlxsw_sp_trap_init, │ .trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini, │ .trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set, │ .trap_action_set = mlxsw_sp_trap_action_set,
.trap_group_init = mlxsw_sp_trap_group_init, │ .trap_group_init = mlxsw_sp_trap_group_init,
.trap_group_set = mlxsw_sp_trap_group_set, │ .trap_group_set = mlxsw_sp_trap_group_set,
.trap_policer_init = mlxsw_sp_trap_policer_init, │ .trap_policer_init = mlxsw_sp_trap_policer_init,
.trap_policer_fini = mlxsw_sp_trap_policer_fini, │ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set, │ .trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, │ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct, │ .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register, │ .resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register, │ .params_register = mlxsw_sp2_params_register,
.params_unregister = mlxsw_sp2_params_unregister, │ .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted, │ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN, │ .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile, │ .profile = &mlxsw_sp2_config_profile,
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/ich8lan.c:4314 │ linux/drivers/net/ethernet/intel/e1000e/ich8lan.c:4229
│
udelay(1); │ udelay(1);
/* Steps */ │ /* Steps */
ret_val = e1000_flash_cycle_init_ich8lan(hw); │ ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val) │ if (ret_val)
break; │ break;
│
/* In SPT, This register is in Lan memory space, not │ /* In SPT, This register is in Lan memory space, not
* flash. Therefore, only 32 bit access is supported │ * flash. Therefore, only 32 bit access is supported
*/ │ */
if (hw->mac.type >= e1000_pch_spt) │ if (hw->mac.type >= e1000_pch_spt)
hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) │ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
>> 16; │
else │ else
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); │ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
│
hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; │ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
│ hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; │ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
│
/* In SPT, This register is in Lan memory space, │ /* In SPT, This register is in Lan memory space,
* not flash. Therefore, only 32 bit access is │ * not flash. Therefore, only 32 bit access is
* supported │ * supported
*/ │ */
if (hw->mac.type >= e1000_pch_spt) │ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); │ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
else │ else
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); │ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
│
ew32flash(ICH_FLASH_FADDR, flash_linear_addr); │ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
│
ew32flash(ICH_FLASH_FDATA0, data); │ if (size == 1)
│ flash_data = (u32)data & 0x00FF;
│ else
│ flash_data = (u32)data;
│
│ ew32flash(ICH_FLASH_FDATA0, flash_data);
│
/* check if FCERR is set to 1 , if set to 1, clear it │ /* check if FCERR is set to 1 , if set to 1, clear it
* and try the whole sequence a few more times else done │ * and try the whole sequence a few more times else done
*/ │ */
ret_val = │ ret_val =
e1000_flash_cycle_ich8lan(hw, │ e1000_flash_cycle_ich8lan(hw,
ICH_FLASH_WRITE_COMMAND_TIMEOUT); │ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
│
if (!ret_val) │ if (!ret_val)
break; │ break;
│
/* If we're here, then things are most likely │ /* If we're here, then things are most likely
* completely hosed, but if the error condition │ * completely hosed, but if the error condition
* is detected, it won't hurt to give it another │ * is detected, it won't hurt to give it another
* try...ICH_FLASH_CYCLE_REPEAT_COUNT times. │ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
*/ │ */
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); │ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
│
if (hsfsts.hsf_status.flcerr) │ if (hsfsts.hsf_status.flcerr)
/* Repeat for some time before giving up. */ │ /* Repeat for some time before giving up. */
continue; │ continue;
if (!hsfsts.hsf_status.flcdone) { │ if (!hsfsts.hsf_status.flcdone) {
e_dbg("Timeout error - flash cycle did not complete.\n"); │ e_dbg("Timeout error - flash cycle did not complete.\n");
break; │ break;
} │ }
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c:1159 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:8708
│
struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; │ struct hclge_mac_node *mac_node, *tmp, *new_node;
struct list_head tmp_add_list, tmp_del_list; │ struct list_head tmp_add_list, tmp_del_list;
struct list_head *list; │ struct list_head *list;
│ bool all_added;
│
INIT_LIST_HEAD(&tmp_add_list); │ INIT_LIST_HEAD(&tmp_add_list);
INIT_LIST_HEAD(&tmp_del_list); │ INIT_LIST_HEAD(&tmp_del_list);
│
/* move the mac addr to the tmp_add_list and tmp_del_list, then │ /* move the mac addr to the tmp_add_list and tmp_del_list, then
* we can add/delete these mac addr outside the spin lock │ * we can add/delete these mac addr outside the spin lock
*/ │ */
list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? │ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
&hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; │ &vport->uc_mac_list : &vport->mc_mac_list;
│
spin_lock_bh(&hdev->mac_table.mac_list_lock); │ spin_lock_bh(&vport->mac_list_lock);
│
list_for_each_entry_safe(mac_node, tmp, list, node) { │ list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) { │ switch (mac_node->state) {
case HCLGEVF_MAC_TO_DEL: │ case HCLGE_MAC_TO_DEL:
list_move_tail(&mac_node->node, &tmp_del_list); │ list_move_tail(&mac_node->node, &tmp_del_list);
break; │ break;
case HCLGEVF_MAC_TO_ADD: │ case HCLGE_MAC_TO_ADD:
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); │ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
if (!new_node) │ if (!new_node)
goto stop_traverse; │ goto stop_traverse;
│
ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); │ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
new_node->state = mac_node->state; │ new_node->state = mac_node->state;
list_add_tail(&new_node->node, &tmp_add_list); │ list_add_tail(&new_node->node, &tmp_add_list);
break; │ break;
default: │ default:
break; │ break;
} │ }
} │ }
│
stop_traverse: │ stop_traverse:
spin_unlock_bh(&hdev->mac_table.mac_list_lock); │ spin_unlock_bh(&vport->mac_list_lock);
│
/* delete first, in order to get max mac table space for adding */ │ /* delete first, in order to get max mac table space for adding */
hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); │ hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); │ hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
│
/* if some mac addresses were added/deleted fail, move back to the │ /* if some mac addresses were added/deleted fail, move back to the
* mac_list, and retry at next time. │ * mac_list, and retry at next time.
*/ │ */
spin_lock_bh(&hdev->mac_table.mac_list_lock); │ spin_lock_bh(&vport->mac_list_lock);
│
│ hclge_sync_from_del_list(&tmp_del_list, list);
│ all_added = hclge_sync_from_add_list(&tmp_add_list, list);
│
hclgevf_sync_from_del_list(&tmp_del_list, list); │ spin_unlock_bh(&vport->mac_list_lock);
hclgevf_sync_from_add_list(&tmp_add_list, list); │
│
spin_unlock_bh(&hdev->mac_table.mac_list_lock); │ hclge_update_overflow_flags(vport, mac_type, all_added);
} │
next prev up linux/drivers/net/ethernet/brocade/bna/bna_tx_rx.c:776 │ linux/drivers/net/ethernet/brocade/bna/bna_tx_rx.c:834
│
struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; │ struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
struct bna_rxf *rxf = &rx->rxf; │ struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head; │ struct list_head list_head;
const u8 *mcaddr; │ const u8 *mcaddr;
struct bna_mac *mac, *del_mac; │ struct bna_mac *mac, *del_mac;
int i; │ int i;
│
/* Purge the pending_add_q */ │ /* Purge the pending_add_q */
while (!list_empty(&rxf->ucast_pending_add_q)) { │ while (!list_empty(&rxf->mcast_pending_add_q)) {
mac = list_first_entry(&rxf->ucast_pending_add_q, │ mac = list_first_entry(&rxf->mcast_pending_add_q,
struct bna_mac, qe); │ struct bna_mac, qe);
list_move_tail(&mac->qe, &ucam_mod->free_q); │ list_move_tail(&mac->qe, &mcam_mod->free_q);
} │ }
│
/* Schedule active_q entries for deletion */ │ /* Schedule active_q entries for deletion */
while (!list_empty(&rxf->ucast_active_q)) { │ while (!list_empty(&rxf->mcast_active_q)) {
mac = list_first_entry(&rxf->ucast_active_q, │ mac = list_first_entry(&rxf->mcast_active_q,
struct bna_mac, qe); │ struct bna_mac, qe);
del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); │ del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
ether_addr_copy(del_mac->addr, mac->addr); │ ether_addr_copy(del_mac->addr, mac->addr);
del_mac->handle = mac->handle; │ del_mac->handle = mac->handle;
list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); │ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
list_move_tail(&mac->qe, &ucam_mod->free_q); │ mac->handle = NULL;
│ list_move_tail(&mac->qe, &mcam_mod->free_q);
} │ }
│
/* Allocate nodes */ │ /* Allocate nodes */
INIT_LIST_HEAD(&list_head); │ INIT_LIST_HEAD(&list_head);
for (i = 0, mcaddr = uclist; i < count; i++) { │ for (i = 0, mcaddr = mclist; i < count; i++) {
mac = bna_cam_mod_mac_get(&ucam_mod->free_q); │ mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
if (mac == NULL) │ if (mac == NULL)
goto err_return; │ goto err_return;
ether_addr_copy(mac->addr, mcaddr); │ ether_addr_copy(mac->addr, mcaddr);
list_add_tail(&mac->qe, &list_head); │ list_add_tail(&mac->qe, &list_head);
│
mcaddr += ETH_ALEN; │ mcaddr += ETH_ALEN;
} │ }
│
/* Add the new entries */ │ /* Add the new entries */
while (!list_empty(&list_head)) { │ while (!list_empty(&list_head)) {
mac = list_first_entry(&list_head, struct bna_mac, qe); │ mac = list_first_entry(&list_head, struct bna_mac, qe);
list_move_tail(&mac->qe, &rxf->ucast_pending_add_q); │ list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
} │ }
│
bfa_fsm_send_event(rxf, RXF_E_CONFIG); │ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
│
return BNA_CB_SUCCESS; │ return BNA_CB_SUCCESS;
│
err_return: │ err_return:
while (!list_empty(&list_head)) { │ while (!list_empty(&list_head)) {
mac = list_first_entry(&list_head, struct bna_mac, qe); │ mac = list_first_entry(&list_head, struct bna_mac, qe);
list_move_tail(&mac->qe, &ucam_mod->free_q); │ list_move_tail(&mac->qe, &mcam_mod->free_q);
} │ }
│
return BNA_CB_UCAST_CAM_FULL; │ return BNA_CB_MCAST_LIST_FULL;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8638 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8682
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:5662 │ linux/drivers/net/ethernet/intel/igc/igc_main.c:4170
│
│ struct igc_adapter *adapter = q_vector->adapter;
int new_val = q_vector->itr_val; │ int new_val = q_vector->itr_val;
int avg_wire_size = 0; │ int avg_wire_size = 0;
struct igb_adapter *adapter = q_vector->adapter; │
unsigned int packets; │ unsigned int packets;
│
/* For non-gigabit speeds, just fix the interrupt rate at 4000 │ /* For non-gigabit speeds, just fix the interrupt rate at 4000
* ints/sec - ITR timer value of 120 ticks. │ * ints/sec - ITR timer value of 120 ticks.
*/ │ */
if (adapter->link_speed != SPEED_1000) { │ switch (adapter->link_speed) {
new_val = IGB_4K_ITR; │ case SPEED_10:
│ case SPEED_100:
│ new_val = IGC_4K_ITR;
goto set_itr_val; │ goto set_itr_val;
│ default:
│ break;
} │ }
│
packets = q_vector->rx.total_packets; │ packets = q_vector->rx.total_packets;
if (packets) │ if (packets)
avg_wire_size = q_vector->rx.total_bytes / packets; │ avg_wire_size = q_vector->rx.total_bytes / packets;
│
packets = q_vector->tx.total_packets; │ packets = q_vector->tx.total_packets;
if (packets) │ if (packets)
avg_wire_size = max_t(u32, avg_wire_size, │ avg_wire_size = max_t(u32, avg_wire_size,
q_vector->tx.total_bytes / packets); │ q_vector->tx.total_bytes / packets);
│
/* if avg_wire_size isn't set no work was done */ │ /* if avg_wire_size isn't set no work was done */
if (!avg_wire_size) │ if (!avg_wire_size)
goto clear_counts; │ goto clear_counts;
│
/* Add 24 bytes to size to account for CRC, preamble, and gap */ │ /* Add 24 bytes to size to account for CRC, preamble, and gap */
avg_wire_size += 24; │ avg_wire_size += 24;
│
/* Don't starve jumbo frames */ │ /* Don't starve jumbo frames */
avg_wire_size = min(avg_wire_size, 3000); │ avg_wire_size = min(avg_wire_size, 3000);
│
/* Give a little boost to mid-size frames */ │ /* Give a little boost to mid-size frames */
if ((avg_wire_size > 300) && (avg_wire_size < 1200)) │ if (avg_wire_size > 300 && avg_wire_size < 1200)
new_val = avg_wire_size / 3; │ new_val = avg_wire_size / 3;
else │ else
new_val = avg_wire_size / 2; │ new_val = avg_wire_size / 2;
│
/* conservative mode (itr 3) eliminates the lowest_latency setting */ │ /* conservative mode (itr 3) eliminates the lowest_latency setting */
if (new_val < IGB_20K_ITR && │ if (new_val < IGC_20K_ITR &&
((q_vector->rx.ring && adapter->rx_itr_setting == 3) || │ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
(!q_vector->rx.ring && adapter->tx_itr_setting == 3))) │ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
new_val = IGB_20K_ITR; │ new_val = IGC_20K_ITR;
│
set_itr_val: │ set_itr_val:
if (new_val != q_vector->itr_val) { │ if (new_val != q_vector->itr_val) {
q_vector->itr_val = new_val; │ q_vector->itr_val = new_val;
q_vector->set_itr = 1; │ q_vector->set_itr = 1;
} │ }
clear_counts: │ clear_counts:
q_vector->rx.total_bytes = 0; │ q_vector->rx.total_bytes = 0;
q_vector->rx.total_packets = 0; │ q_vector->rx.total_packets = 0;
q_vector->tx.total_bytes = 0; │ q_vector->tx.total_bytes = 0;
q_vector->tx.total_packets = 0; │ q_vector->tx.total_packets = 0;
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/nic.c:76 │ linux/drivers/net/ethernet/sfc/nic.c:78
│
struct ef4_channel *channel; │ struct efx_channel *channel;
unsigned int n_irqs; │ unsigned int n_irqs;
int rc; │ int rc;
│
if (!EF4_INT_MODE_USE_MSI(efx)) { │ if (!EFX_INT_MODE_USE_MSI(efx)) {
rc = request_irq(efx->legacy_irq, │ rc = request_irq(efx->legacy_irq,
efx->type->irq_handle_legacy, IRQF_SHARED, │ efx->type->irq_handle_legacy, IRQF_SHARED,
efx->name, efx); │ efx->name, efx);
if (rc) { │ if (rc) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"failed to hook legacy IRQ %d\n", │ "failed to hook legacy IRQ %d\n",
efx->pci_dev->irq); │ efx->pci_dev->irq);
goto fail1; │ goto fail1;
} │ }
│ efx->irqs_hooked = true;
return 0; │ return 0;
} │ }
│
#ifdef CONFIG_RFS_ACCEL │ #ifdef CONFIG_RFS_ACCEL
if (efx->interrupt_mode == EF4_INT_MODE_MSIX) { │ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
efx->net_dev->rx_cpu_rmap = │ efx->net_dev->rx_cpu_rmap =
alloc_irq_cpu_rmap(efx->n_rx_channels); │ alloc_irq_cpu_rmap(efx->n_rx_channels);
if (!efx->net_dev->rx_cpu_rmap) { │ if (!efx->net_dev->rx_cpu_rmap) {
rc = -ENOMEM; │ rc = -ENOMEM;
goto fail1; │ goto fail1;
} │ }
} │ }
#endif │ #endif
│
/* Hook MSI or MSI-X interrupt */ │ /* Hook MSI or MSI-X interrupt */
n_irqs = 0; │ n_irqs = 0;
ef4_for_each_channel(channel, efx) { │ efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx->type->irq_handle_msi, │ rc = request_irq(channel->irq, efx->type->irq_handle_msi,
IRQF_PROBE_SHARED, /* Not shared */ │ IRQF_PROBE_SHARED, /* Not shared */
efx->msi_context[channel->channel].name, │ efx->msi_context[channel->channel].name,
&efx->msi_context[channel->channel]); │ &efx->msi_context[channel->channel]);
if (rc) { │ if (rc) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq); │ "failed to hook IRQ %d\n", channel->irq);
goto fail2; │ goto fail2;
} │ }
++n_irqs; │ ++n_irqs;
│
#ifdef CONFIG_RFS_ACCEL │ #ifdef CONFIG_RFS_ACCEL
if (efx->interrupt_mode == EF4_INT_MODE_MSIX && │ if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
channel->channel < efx->n_rx_channels) { │ channel->channel < efx->n_rx_channels) {
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, │ rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
channel->irq); │ channel->irq);
if (rc) │ if (rc)
goto fail2; │ goto fail2;
} │ }
#endif │ #endif
} │ }
│
│ efx->irqs_hooked = true;
return 0; │ return 0;
│
fail2: │ fail2:
#ifdef CONFIG_RFS_ACCEL │ #ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); │ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL; │ efx->net_dev->rx_cpu_rmap = NULL;
#endif │ #endif
ef4_for_each_channel(channel, efx) { │ efx_for_each_channel(channel, efx) {
if (n_irqs-- == 0) │ if (n_irqs-- == 0)
break; │ break;
free_irq(channel->irq, &efx->msi_context[channel->channel]); │ free_irq(channel->irq, &efx->msi_context[channel->channel]);
} │ }
fail1: │ fail1:
return rc; │ return rc;
} │
next prev up linux/drivers/net/ethernet/ibm/emac/core.c:873 │ linux/drivers/net/ethernet/ibm/emac/core.c:801
│
struct emac_regs __iomem *p = dev->emacp; │ struct emac_regs __iomem *p = dev->emacp;
u32 r = 0; │ u32 r = 0;
int n; │ int n, err = -ETIMEDOUT;
│
mutex_lock(&dev->mdio_lock); │ mutex_lock(&dev->mdio_lock);
│
DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val); │ DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
│
/* Enable proper MDIO port */ │ /* Enable proper MDIO port */
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) │ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_get_mdio(dev->zmii_dev, dev->zmii_port); │ zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) │ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port); │ rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
│
/* Wait for management interface to be idle */ │ /* Wait for management interface to become idle */
n = 20; │ n = 20;
while (!emac_phy_done(dev, in_be32(&p->stacr))) { │ while (!emac_phy_done(dev, in_be32(&p->stacr))) {
udelay(1); │ udelay(1);
if (!--n) { │ if (!--n) {
DBG2(dev, " -> timeout wait idle\n"); │ DBG2(dev, " -> timeout wait idle\n");
goto bail; │ goto bail;
} │ }
} │ }
│
/* Issue write command */ │ /* Issue read command */
if (emac_has_feature(dev, EMAC_FTR_EMAC4)) │ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
r = EMAC4_STACR_BASE(dev->opb_bus_freq); │ r = EMAC4_STACR_BASE(dev->opb_bus_freq);
else │ else
r = EMAC_STACR_BASE(dev->opb_bus_freq); │ r = EMAC_STACR_BASE(dev->opb_bus_freq);
if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) │ if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
r |= EMAC_STACR_OC; │ r |= EMAC_STACR_OC;
if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR)) │ if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
r |= EMACX_STACR_STAC_WRITE; │ r |= EMACX_STACR_STAC_READ;
else │ else
r |= EMAC_STACR_STAC_WRITE; │ r |= EMAC_STACR_STAC_READ;
r |= (reg & EMAC_STACR_PRA_MASK) | │ r |= (reg & EMAC_STACR_PRA_MASK)
((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) | │ | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
(val << EMAC_STACR_PHYD_SHIFT); │
out_be32(&p->stacr, r); │ out_be32(&p->stacr, r);
│
/* Wait for write to complete */ │ /* Wait for read to complete */
n = 200; │ n = 200;
while (!emac_phy_done(dev, in_be32(&p->stacr))) { │ while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
udelay(1); │ udelay(1);
if (!--n) { │ if (!--n) {
DBG2(dev, " -> timeout wait complete\n"); │ DBG2(dev, " -> timeout wait complete\n");
goto bail; │ goto bail;
} │ }
} │ }
│
│ if (unlikely(r & EMAC_STACR_PHYE)) {
│ DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
│ err = -EREMOTEIO;
│ goto bail;
│ }
│
│ r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
│
│ DBG2(dev, "mdio_read -> %04x" NL, r);
│ err = 0;
bail: │ bail:
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) │ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port); │ rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) │ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_put_mdio(dev->zmii_dev, dev->zmii_port); │ zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
mutex_unlock(&dev->mdio_lock); │ mutex_unlock(&dev->mdio_lock);
│
│ return err == 0 ? r : err;
} │
next prev up linux/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c:2818 │ linux/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c:2881
│
unsigned int dma_ch_ier; │ unsigned int dma_ch_ier;
│
dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); │ dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
│
switch (int_id) { │ switch (int_id) {
case XLGMAC_INT_DMA_CH_SR_TI: │ case XLGMAC_INT_DMA_CH_SR_TI:
dma_ch_ier = XLGMAC_SET_REG_BITS( │ dma_ch_ier = XLGMAC_SET_REG_BITS(
dma_ch_ier, DMA_CH_IER_TIE_POS, │ dma_ch_ier, DMA_CH_IER_TIE_POS,
DMA_CH_IER_TIE_LEN, 1); │ DMA_CH_IER_TIE_LEN, 0);
break; │ break;
case XLGMAC_INT_DMA_CH_SR_TPS: │ case XLGMAC_INT_DMA_CH_SR_TPS:
dma_ch_ier = XLGMAC_SET_REG_BITS( │ dma_ch_ier = XLGMAC_SET_REG_BITS(
dma_ch_ier, DMA_CH_IER_TXSE_POS, │ dma_ch_ier, DMA_CH_IER_TXSE_POS,
DMA_CH_IER_TXSE_LEN, 1); │ DMA_CH_IER_TXSE_LEN, 0);
break; │ break;
case XLGMAC_INT_DMA_CH_SR_TBU: │ case XLGMAC_INT_DMA_CH_SR_TBU:
dma_ch_ier = XLGMAC_SET_REG_BITS( │ dma_ch_ier = XLGMAC_SET_REG_BITS(
dma_ch_ier, DMA_CH_IER_TBUE_POS, │ dma_ch_ier, DMA_CH_IER_TBUE_POS,
DMA_CH_IER_TBUE_LEN, 1); │ DMA_CH_IER_TBUE_LEN, 0);
break; │ break;
case XLGMAC_INT_DMA_CH_SR_RI: │ case XLGMAC_INT_DMA_CH_SR_RI:
dma_ch_ier = XLGMAC_SET_REG_BITS( │ dma_ch_ier = XLGMAC_SET_REG_BITS(
dma_ch_ier, DMA_CH_IER_RIE_POS, │ dma_ch_ier, DMA_CH_IER_RIE_POS,
DMA_CH_IER_RIE_LEN, 1); │ DMA_CH_IER_RIE_LEN, 0);
break; │ break;
case XLGMAC_INT_DMA_CH_SR_RBU: │ case XLGMAC_INT_DMA_CH_SR_RBU:
dma_ch_ier = XLGMAC_SET_REG_BITS( │ dma_ch_ier = XLGMAC_SET_REG_BITS(
dma_ch_ier, DMA_CH_IER_RBUE_POS, │ dma_ch_ier, DMA_CH_IER_RBUE_POS,
DMA_CH_IER_RBUE_LEN, 1); │ DMA_CH_IER_RBUE_LEN, 0);
break; │ break;
case XLGMAC_INT_DMA_CH_SR_RPS: │ case XLGMAC_INT_DMA_CH_SR_RPS:
dma_ch_ier = XLGMAC_SET_REG_BITS( │ dma_ch_ier = XLGMAC_SET_REG_BITS(
dma_ch_ier, DMA_CH_IER_RSE_POS, │ dma_ch_ier, DMA_CH_IER_RSE_POS,
DMA_CH_IER_RSE_LEN, 1); │ DMA_CH_IER_RSE_LEN, 0);
break; │ break;
case XLGMAC_INT_DMA_CH_SR_TI_RI: │ case XLGMAC_INT_DMA_CH_SR_TI_RI:
dma_ch_ier = XLGMAC_SET_REG_BITS( │ dma_ch_ier = XLGMAC_SET_REG_BITS(
dma_ch_ier, DMA_CH_IER_TIE_POS, │ dma_ch_ier, DMA_CH_IER_TIE_POS,
DMA_CH_IER_TIE_LEN, 1); │ DMA_CH_IER_TIE_LEN, 0);
dma_ch_ier = XLGMAC_SET_REG_BITS( │ dma_ch_ier = XLGMAC_SET_REG_BITS(
dma_ch_ier, DMA_CH_IER_RIE_POS, │ dma_ch_ier, DMA_CH_IER_RIE_POS,
DMA_CH_IER_RIE_LEN, 1); │ DMA_CH_IER_RIE_LEN, 0);
break; │ break;
case XLGMAC_INT_DMA_CH_SR_FBE: │ case XLGMAC_INT_DMA_CH_SR_FBE:
dma_ch_ier = XLGMAC_SET_REG_BITS( │ dma_ch_ier = XLGMAC_SET_REG_BITS(
dma_ch_ier, DMA_CH_IER_FBEE_POS, │ dma_ch_ier, DMA_CH_IER_FBEE_POS,
DMA_CH_IER_FBEE_LEN, 1); │ DMA_CH_IER_FBEE_LEN, 0);
break; │ break;
case XLGMAC_INT_DMA_ALL: │ case XLGMAC_INT_DMA_ALL:
dma_ch_ier |= channel->saved_ier; │ channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK;
│ dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK;
break; │ break;
default: │ default:
return -1; │ return -1;
} │ }
│
writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); │ writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/amd/sunlance.c:504 │ linux/drivers/net/ethernet/amd/sunlance.c:673
│
struct lance_private *lp = netdev_priv(dev); │ struct lance_private *lp = netdev_priv(dev);
struct lance_init_block *ib = lp->init_block_mem; │ struct lance_init_block __iomem *ib = lp->init_block_iomem;
struct lance_rx_desc *rd; │ struct lance_rx_desc __iomem *rd;
u8 bits; │ unsigned char bits;
int len, entry = lp->rx_new; │ int len, entry;
struct sk_buff *skb; │ struct sk_buff *skb;
│
│ entry = lp->rx_new;
for (rd = &ib->brx_ring [entry]; │ for (rd = &ib->brx_ring [entry];
!((bits = rd->rmd1_bits) & LE_R1_OWN); │ !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN);
rd = &ib->brx_ring [entry]) { │ rd = &ib->brx_ring [entry]) {
│
/* We got an incomplete frame? */ │ /* We got an incomplete frame? */
if ((bits & LE_R1_POK) != LE_R1_POK) { │ if ((bits & LE_R1_POK) != LE_R1_POK) {
dev->stats.rx_over_errors++; │ dev->stats.rx_over_errors++;
dev->stats.rx_errors++; │ dev->stats.rx_errors++;
} else if (bits & LE_R1_ERR) { │ } else if (bits & LE_R1_ERR) {
/* Count only the end frame as a rx error, │ /* Count only the end frame as a rx error,
* not the beginning │ * not the beginning
*/ │ */
if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; │ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; │ if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; │ if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; │ if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
if (bits & LE_R1_EOP) dev->stats.rx_errors++; │ if (bits & LE_R1_EOP) dev->stats.rx_errors++;
} else { │ } else {
len = (rd->mblength & 0xfff) - 4; │ len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2); │ skb = netdev_alloc_skb(dev, len + 2);
│
if (skb == NULL) { │ if (skb == NULL) {
dev->stats.rx_dropped++; │ dev->stats.rx_dropped++;
rd->mblength = 0; │ sbus_writew(0, &rd->mblength);
rd->rmd1_bits = LE_R1_OWN; │ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
lp->rx_new = RX_NEXT(entry); │ lp->rx_new = RX_NEXT(entry);
return; │ return;
} │ }
│
dev->stats.rx_bytes += len; │ dev->stats.rx_bytes += len;
│
skb_reserve(skb, 2); /* 16 byte align */ │ skb_reserve (skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */ │ skb_put(skb, len); /* make room */
skb_copy_to_linear_data(skb, │ lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
(unsigned char *)&(ib->rx_buf [entry][0]), │
len); │
skb->protocol = eth_type_trans(skb, dev); │ skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); │ netif_rx(skb);
dev->stats.rx_packets++; │ dev->stats.rx_packets++;
} │ }
│
/* Return the packet to the pool */ │ /* Return the packet to the pool */
rd->mblength = 0; │ sbus_writew(0, &rd->mblength);
rd->rmd1_bits = LE_R1_OWN; │ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
entry = RX_NEXT(entry); │ entry = RX_NEXT(entry);
} │ }
│
lp->rx_new = entry; │ lp->rx_new = entry;
} │
next prev up linux/drivers/net/ethernet/ti/cpsw_new.c:916 │ linux/drivers/net/ethernet/ti/cpsw.c:900
│
struct cpsw_priv *priv = netdev_priv(ndev); │ struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw; │ struct cpsw_common *cpsw = priv->cpsw;
struct cpts *cpts = cpsw->cpts; │ struct cpts *cpts = cpsw->cpts;
struct netdev_queue *txq; │ struct netdev_queue *txq;
struct cpdma_chan *txch; │ struct cpdma_chan *txch;
int ret, q_idx; │ int ret, q_idx;
│
if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) { │ if (skb_put_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n"); │ cpsw_err(priv, tx_err, "packet pad failed\n");
ndev->stats.tx_dropped++; │ ndev->stats.tx_dropped++;
return NET_XMIT_DROP; │ return NET_XMIT_DROP;
} │ }
│
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && │ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb)) │ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; │ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
│
q_idx = skb_get_queue_mapping(skb); │ q_idx = skb_get_queue_mapping(skb);
if (q_idx >= cpsw->tx_ch_num) │ if (q_idx >= cpsw->tx_ch_num)
q_idx = q_idx % cpsw->tx_ch_num; │ q_idx = q_idx % cpsw->tx_ch_num;
│
txch = cpsw->txv[q_idx].ch; │ txch = cpsw->txv[q_idx].ch;
txq = netdev_get_tx_queue(ndev, q_idx); │ txq = netdev_get_tx_queue(ndev, q_idx);
skb_tx_timestamp(skb); │ skb_tx_timestamp(skb);
ret = cpdma_chan_submit(txch, skb, skb->data, skb->len, │ ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
priv->emac_port); │ priv->emac_port + cpsw->data.dual_emac);
if (unlikely(ret != 0)) { │ if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n"); │ cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail; │ goto fail;
} │ }
│
/* If there is no more tx desc left free then we need to │ /* If there is no more tx desc left free then we need to
* tell the kernel to stop sending us tx frames. │ * tell the kernel to stop sending us tx frames.
*/ │ */
if (unlikely(!cpdma_check_free_tx_desc(txch))) { │ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
netif_tx_stop_queue(txq); │ netif_tx_stop_queue(txq);
│
/* Barrier, so that stop_queue visible to other cpus */ │ /* Barrier, so that stop_queue visible to other cpus */
smp_mb__after_atomic(); │ smp_mb__after_atomic();
│
if (cpdma_check_free_tx_desc(txch)) │ if (cpdma_check_free_tx_desc(txch))
netif_tx_wake_queue(txq); │ netif_tx_wake_queue(txq);
} │ }
│
return NETDEV_TX_OK; │ return NETDEV_TX_OK;
fail: │ fail:
ndev->stats.tx_dropped++; │ ndev->stats.tx_dropped++;
netif_tx_stop_queue(txq); │ netif_tx_stop_queue(txq);
│
/* Barrier, so that stop_queue visible to other cpus */ │ /* Barrier, so that stop_queue visible to other cpus */
smp_mb__after_atomic(); │ smp_mb__after_atomic();
│
if (cpdma_check_free_tx_desc(txch)) │ if (cpdma_check_free_tx_desc(txch))
netif_tx_wake_queue(txq); │ netif_tx_wake_queue(txq);
│
return NETDEV_TX_BUSY; │ return NETDEV_TX_BUSY;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:1881 │ linux/drivers/net/ethernet/intel/igb/e1000_phy.c:1911
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; │ s32 ret_val;
u16 phy_data; │ u16 phy_data;
bool link; │ bool link;
│
if (phy->media_type != e1000_media_type_copper) { │ if (phy->media_type != e1000_media_type_copper) {
e_dbg("Phy info is only valid for copper media\n"); │ hw_dbg("Phy info is only valid for copper media\n");
return -E1000_ERR_CONFIG; │ ret_val = -E1000_ERR_CONFIG;
│ goto out;
} │ }
│
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); │ ret_val = igb_phy_has_link(hw, 1, 0, &link);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
if (!link) { │ if (!link) {
e_dbg("Phy info is only valid if link is up\n"); │ hw_dbg("Phy info is only valid if link is up\n");
return -E1000_ERR_CONFIG; │ ret_val = -E1000_ERR_CONFIG;
│ goto out;
} │ }
│
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); │ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
phy->polarity_correction = !!(phy_data & │ phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
M88E1000_PSCR_POLARITY_REVERSAL); │ ? true : false;
│
ret_val = e1000_check_polarity_m88(hw); │ ret_val = igb_check_polarity_m88(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); │ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); │ phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
│
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { │ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
ret_val = hw->phy.ops.get_cable_length(hw); │ ret_val = phy->ops.get_cable_length(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data); │ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
phy->local_rx = (phy_data & LPA_1000LOCALRXOK) │ phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; │ ? e1000_1000t_rx_status_ok
│ : e1000_1000t_rx_status_not_ok;
phy->remote_rx = (phy_data & LPA_1000REMRXOK) │
? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; │ phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
│ ? e1000_1000t_rx_status_ok
│ : e1000_1000t_rx_status_not_ok;
} else { │ } else {
/* Set values to "undefined" */ │ /* Set values to "undefined" */
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; │ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined; │ phy->local_rx = e1000_1000t_rx_status_undefined;
phy->remote_rx = e1000_1000t_rx_status_undefined; │ phy->remote_rx = e1000_1000t_rx_status_undefined;
} │ }
│
│ out:
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c:223 │ linux/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c:291
│
struct net_device *ndev = bus->priv; │ struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev); │ struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr; │ unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data; │ unsigned int mii_data = priv->hw->mii.data;
│ int ret, data = phydata;
u32 value = MII_BUSY; │ u32 value = MII_BUSY;
int data = 0; │
u32 v; │ u32 v;
│
data = pm_runtime_get_sync(priv->device); │ ret = pm_runtime_get_sync(priv->device);
if (data < 0) { │ if (ret < 0) {
pm_runtime_put_noidle(priv->device); │ pm_runtime_put_noidle(priv->device);
return data; │ return ret;
} │ }
│
value |= (phyaddr << priv->hw->mii.addr_shift) │ value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask; │ & priv->hw->mii.addr_mask;
value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; │ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
│
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) │ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask; │ & priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4) { │ if (priv->plat->has_gmac4) {
value |= MII_GMAC4_READ; │ value |= MII_GMAC4_WRITE;
if (phyreg & MII_ADDR_C45) { │ if (phyreg & MII_ADDR_C45) {
value |= MII_GMAC4_C45E; │ value |= MII_GMAC4_C45E;
value &= ~priv->hw->mii.reg_mask; │ value &= ~priv->hw->mii.reg_mask;
value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) << │ value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) <<
priv->hw->mii.reg_shift) & │ priv->hw->mii.reg_shift) &
priv->hw->mii.reg_mask; │ priv->hw->mii.reg_mask;
│
data |= (phyreg & MII_REGADDR_C45_MASK) << │ data |= (phyreg & MII_REGADDR_C45_MASK) <<
MII_GMAC4_REG_ADDR_SHIFT; │ MII_GMAC4_REG_ADDR_SHIFT;
} │ }
│ } else {
│ value |= MII_WRITE;
} │ }
│
│ /* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), │ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
100, 10000)) { │ 100, 10000)) {
data = -EBUSY; │ ret = -EBUSY;
goto err_disable_clks; │ goto err_disable_clks;
} │ }
│
│ /* Set the MII address register to write */
writel(data, priv->ioaddr + mii_data); │ writel(data, priv->ioaddr + mii_data);
writel(value, priv->ioaddr + mii_address); │ writel(value, priv->ioaddr + mii_address);
│
if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), │ /* Wait until any existing MII operation is complete */
100, 10000)) { │ ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
data = -EBUSY; │ 100, 10000);
goto err_disable_clks; │
} │
│
/* Read the data from the MII data register */ │
data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK; │
│
err_disable_clks: │ err_disable_clks:
pm_runtime_put(priv->device); │ pm_runtime_put(priv->device);
│
return data; │ return ret;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3603 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3639
│
.kind = mlxsw_sp1_driver_name, │ .kind = mlxsw_sp2_driver_name,
.priv_size = sizeof(struct mlxsw_sp), │ .priv_size = sizeof(struct mlxsw_sp),
.fw_req_rev = &mlxsw_sp1_fw_rev, │ .fw_req_rev = &mlxsw_sp2_fw_rev,
.fw_filename = MLXSW_SP1_FW_FILENAME, │ .fw_filename = MLXSW_SP2_FW_FILENAME,
.init = mlxsw_sp1_init, │ .init = mlxsw_sp2_init,
.fini = mlxsw_sp_fini, │ .fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split, │ .port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit, │ .port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get, │ .sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set, │ .sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get, │ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
.sb_port_pool_set = mlxsw_sp_sb_port_pool_set, │ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
.sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, │ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, │ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
.sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, │ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, │ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, │ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, │ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.trap_init = mlxsw_sp_trap_init, │ .trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini, │ .trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set, │ .trap_action_set = mlxsw_sp_trap_action_set,
.trap_group_init = mlxsw_sp_trap_group_init, │ .trap_group_init = mlxsw_sp_trap_group_init,
.trap_group_set = mlxsw_sp_trap_group_set, │ .trap_group_set = mlxsw_sp_trap_group_set,
.trap_policer_init = mlxsw_sp_trap_policer_init, │ .trap_policer_init = mlxsw_sp_trap_policer_init,
.trap_policer_fini = mlxsw_sp_trap_policer_fini, │ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set, │ .trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, │ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct, │ .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register, │ .resources_register = mlxsw_sp2_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get, │ .params_register = mlxsw_sp2_params_register,
│ .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted, │ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN, │ .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile, │ .profile = &mlxsw_sp2_config_profile,
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3603 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3676
│
.kind = mlxsw_sp1_driver_name, │ .kind = mlxsw_sp3_driver_name,
.priv_size = sizeof(struct mlxsw_sp), │ .priv_size = sizeof(struct mlxsw_sp),
.fw_req_rev = &mlxsw_sp1_fw_rev, │ .fw_req_rev = &mlxsw_sp3_fw_rev,
.fw_filename = MLXSW_SP1_FW_FILENAME, │ .fw_filename = MLXSW_SP3_FW_FILENAME,
.init = mlxsw_sp1_init, │ .init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini, │ .fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split, │ .port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit, │ .port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get, │ .sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set, │ .sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get, │ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
.sb_port_pool_set = mlxsw_sp_sb_port_pool_set, │ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
.sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, │ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, │ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
.sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, │ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, │ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, │ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, │ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.trap_init = mlxsw_sp_trap_init, │ .trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini, │ .trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set, │ .trap_action_set = mlxsw_sp_trap_action_set,
.trap_group_init = mlxsw_sp_trap_group_init, │ .trap_group_init = mlxsw_sp_trap_group_init,
.trap_group_set = mlxsw_sp_trap_group_set, │ .trap_group_set = mlxsw_sp_trap_group_set,
.trap_policer_init = mlxsw_sp_trap_policer_init, │ .trap_policer_init = mlxsw_sp_trap_policer_init,
.trap_policer_fini = mlxsw_sp_trap_policer_fini, │ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set, │ .trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, │ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct, │ .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register, │ .resources_register = mlxsw_sp2_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get, │ .params_register = mlxsw_sp2_params_register,
│ .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted, │ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN, │ .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile, │ .profile = &mlxsw_sp2_config_profile,
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8594 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8682
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8594 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8638
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/intel/i40e/i40e_common.c:4901 │ linux/drivers/net/ethernet/intel/i40e/i40e_common.c:4827
│
i40e_status status = I40E_ERR_TIMEOUT; │ i40e_status status = I40E_ERR_TIMEOUT;
u32 command = 0; │ u32 command = 0;
u16 retry = 1000; │ u16 retry = 1000;
u8 port_num = hw->func_caps.mdio_port_num; │ u8 port_num = hw->func_caps.mdio_port_num;
│
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | │ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | │ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | │ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | │ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
(I40E_MDIO_CLAUSE45_STCODE_MASK) | │ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) | │ (I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK); │ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command); │ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do { │ do {
command = rd32(hw, I40E_GLGEN_MSCA(port_num)); │ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { │ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
status = 0; │ status = 0;
break; │ break;
} │ }
usleep_range(10, 20); │ usleep_range(10, 20);
retry--; │ retry--;
} while (retry); │ } while (retry);
│
if (status) { │ if (status) {
i40e_debug(hw, I40E_DEBUG_PHY, │ i40e_debug(hw, I40E_DEBUG_PHY,
"PHY: Can't write command to external PHY.\n"); │ "PHY: Can't write command to external PHY.\n");
goto phy_write_end; │ goto phy_read_end;
} │ }
│
command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; │
wr32(hw, I40E_GLGEN_MSRWD(port_num), command); │
│
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | │ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | │ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | │ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
(I40E_MDIO_CLAUSE45_STCODE_MASK) | │ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) | │ (I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK); │ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT; │ status = I40E_ERR_TIMEOUT;
retry = 1000; │ retry = 1000;
wr32(hw, I40E_GLGEN_MSCA(port_num), command); │ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do { │ do {
command = rd32(hw, I40E_GLGEN_MSCA(port_num)); │ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { │ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
status = 0; │ status = 0;
break; │ break;
} │ }
usleep_range(10, 20); │ usleep_range(10, 20);
retry--; │ retry--;
} while (retry); │ } while (retry);
│
phy_write_end: │ if (!status) {
│ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
│ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
│ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
│ } else {
│ i40e_debug(hw, I40E_DEBUG_PHY,
│ "PHY: Can't read register value from external PHY.\n");
│ }
│
│ phy_read_end:
return status; │ return status;
} │
next prev up linux/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c:1664 │ linux/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c:1597
│
int j, ret; │ int j, ret;
u32 temp, off8; │ u32 temp, off8;
u64 val; │
void __iomem *mem_crb; │ void __iomem *mem_crb;
│
/* Only 64-bit aligned access */ │ /* Only 64-bit aligned access */
if (off & 7) │ if (off & 7)
return -EIO; │ return -EIO;
│
/* P3 onward, test agent base for MIU and SIU is same */ │ /* P3 onward, test agent base for MIU and SIU is same */
if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, │ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
NETXEN_ADDR_QDR_NET_MAX_P3)) { │ NETXEN_ADDR_QDR_NET_MAX_P3)) {
mem_crb = netxen_get_ioaddr(adapter, │ mem_crb = netxen_get_ioaddr(adapter,
NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); │ NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
goto correct; │ goto correct;
} │ }
│
if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { │ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
mem_crb = netxen_get_ioaddr(adapter, │ mem_crb = netxen_get_ioaddr(adapter,
NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); │ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
goto correct; │ goto correct;
} │ }
│
if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { │ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX))
return netxen_nic_pci_mem_access_direct(adapter, │ return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1);
off, data, 0); │
} │
│
return -EIO; │ return -EIO;
│
correct: │ correct:
off8 = off & 0xfffffff8; │ off8 = off & 0xfffffff8;
│
spin_lock(&adapter->ahw.mem_lock); │ spin_lock(&adapter->ahw.mem_lock);
│
writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); │ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); │ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); │
writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); │ writel(data & 0xffffffff,
│ mem_crb + MIU_TEST_AGT_WRDATA_LO);
│ writel((data >> 32) & 0xffffffff,
│ mem_crb + MIU_TEST_AGT_WRDATA_HI);
│
│ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
│ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
│ (mem_crb + TEST_AGT_CTRL));
│
for (j = 0; j < MAX_CTL_CHECK; j++) { │ for (j = 0; j < MAX_CTL_CHECK; j++) {
temp = readl(mem_crb + TEST_AGT_CTRL); │ temp = readl(mem_crb + TEST_AGT_CTRL);
if ((temp & TA_CTL_BUSY) == 0) │ if ((temp & TA_CTL_BUSY) == 0)
break; │ break;
} │ }
│
if (j >= MAX_CTL_CHECK) { │ if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit()) │ if (printk_ratelimit())
dev_err(&adapter->pdev->dev, │ dev_err(&adapter->pdev->dev,
"failed to read through agent\n"); │ "failed to write through agent\n");
ret = -EIO; │ ret = -EIO;
} else { │ } else
val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32; │
val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO); │
*data = val; │
ret = 0; │ ret = 0;
} │
│
spin_unlock(&adapter->ahw.mem_lock); │ spin_unlock(&adapter->ahw.mem_lock);
│
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:182 │ linux/drivers/net/ethernet/intel/e1000e/phy.c:119
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
u32 i, mdic = 0; │ u32 i, mdic = 0;
│
if (offset > MAX_PHY_REG_ADDRESS) { │ if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset); │ e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM; │ return -E1000_ERR_PARAM;
} │ }
│
/* Set up Op-code, Phy Address, and register offset in the MDI │ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the │ * Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data. │ * PHY to retrieve the desired data.
*/ │ */
mdic = (((u32)data) | │ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
(offset << E1000_MDIC_REG_SHIFT) | │
(phy->addr << E1000_MDIC_PHY_SHIFT) | │ (phy->addr << E1000_MDIC_PHY_SHIFT) |
(E1000_MDIC_OP_WRITE)); │ (E1000_MDIC_OP_READ));
│
ew32(MDIC, mdic); │ ew32(MDIC, mdic);
│
/* Poll the ready bit to see if the MDI read completed │ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with │ * Increasing the time out as testing showed failures with
* the lower time out │ * the lower time out
*/ │ */
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { │ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
udelay(50); │ udelay(50);
mdic = er32(MDIC); │ mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY) │ if (mdic & E1000_MDIC_READY)
break; │ break;
} │ }
if (!(mdic & E1000_MDIC_READY)) { │ if (!(mdic & E1000_MDIC_READY)) {
e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset); │ e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
return -E1000_ERR_PHY; │ return -E1000_ERR_PHY;
} │ }
if (mdic & E1000_MDIC_ERROR) { │ if (mdic & E1000_MDIC_ERROR) {
e_dbg("MDI Write PHY Red Address %d Error\n", offset); │ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
return -E1000_ERR_PHY; │ return -E1000_ERR_PHY;
} │ }
if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { │ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
e_dbg("MDI Write offset error - requested %d, returned %d\n", │ e_dbg("MDI Read offset error - requested %d, returned %d\n",
offset, │ offset,
(mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); │ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
return -E1000_ERR_PHY; │ return -E1000_ERR_PHY;
} │ }
│ *data = (u16)mdic;
│
/* Allow some time after each MDIC transaction to avoid │ /* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction. │ * reading duplicate data in the next MDIC transaction.
*/ │ */
if (hw->mac.type == e1000_pch2lan) │ if (hw->mac.type == e1000_pch2lan)
udelay(100); │ udelay(100);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7751 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8682
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
│ u8 byte2;
│ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
│ __le16 word1;
│ __le16 word2;
│ __le16 word3;
│ __le16 word4;
│ __le32 reg2;
│ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/microchip/lan743x_main.c:629 │ linux/drivers/net/ethernet/microchip/lan743x_main.c:671
│
int number_of_tx_vectors = intr->number_of_vectors - 1; │ int number_of_rx_vectors = intr->number_of_vectors -
│ used_tx_channels - 1;
│
│ if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
│ number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
│
if (number_of_tx_vectors > used_tx_channels) │
number_of_tx_vectors = used_tx_channels; │
flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | │ flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | │ LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | │ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | │ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | │ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; │ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
│
if (adapter->csr.flags & │ if (adapter->csr.flags &
LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { │ LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | │ flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
│ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | │ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | │ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; │ LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
} │ }
│ for (index = 0; index < number_of_rx_vectors; index++) {
│ int vector = index + 1 + used_tx_channels;
│ u32 int_bit = INT_BIT_DMA_RX_(index);
│
for (index = 0; index < number_of_tx_vectors; index++) { │ /* map RX interrupt to vector */
u32 int_bit = INT_BIT_DMA_TX_(index); │ int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
int vector = index + 1; │ lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
│ if (flags &
/* map TX interrupt to vector */ │ LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); │ int_vec_en_auto_clr |= INT_VEC_EN_(vector);
lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); │ lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
│ int_vec_en_auto_clr);
│ }
│
/* Remove TX interrupt from shared mask */ │ /* Remove RX interrupt from shared mask */
intr->vector_list[0].int_mask &= ~int_bit; │ intr->vector_list[0].int_mask &= ~int_bit;
ret = lan743x_intr_register_isr(adapter, vector, flags, │ ret = lan743x_intr_register_isr(adapter, vector, flags,
int_bit, lan743x_tx_isr, │ int_bit, lan743x_rx_isr,
&adapter->tx[index]); │ &adapter->rx[index]);
if (ret) │ if (ret)
goto clean_up; │ goto clean_up;
intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); │ intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
if (!(flags & │
LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)) │ lan743x_csr_write(adapter, INT_VEC_EN_SET,
lan743x_csr_write(adapter, INT_VEC_EN_SET, │ INT_VEC_EN_(vector));
INT_VEC_EN_(vector)); │
} │ }
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:9532 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8682
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10888 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8682
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/ti/cpsw_switchdev.c:377 │ linux/drivers/net/ethernet/ti/am65-cpsw-switchdev.c:367
│
struct cpsw_switchdev_event_work *switchdev_work = │ struct am65_cpsw_switchdev_event_work *switchdev_work =
container_of(work, struct cpsw_switchdev_event_work, work); │ container_of(work, struct am65_cpsw_switchdev_event_work, work);
struct cpsw_priv *priv = switchdev_work->priv; │ struct am65_cpsw_port *port = switchdev_work->port;
struct switchdev_notifier_fdb_info *fdb; │ struct switchdev_notifier_fdb_info *fdb;
struct cpsw_common *cpsw = priv->cpsw; │ struct am65_cpsw_common *cpsw = port->common;
int port = priv->emac_port; │ int port_id = port->port_id;
│
rtnl_lock(); │ rtnl_lock();
switch (switchdev_work->event) { │ switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE: │ case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb = &switchdev_work->fdb_info; │ fdb = &switchdev_work->fdb_info;
│
dev_dbg(cpsw->dev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- │ netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u
fdb->addr, fdb->vid, fdb->added_by_user, │ fdb->addr, fdb->vid, fdb->added_by_user,
fdb->offloaded, port); │ fdb->offloaded, port_id);
│
if (!fdb->added_by_user || fdb->is_local) │ if (!fdb->added_by_user || fdb->is_local)
break; │ break;
if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) │ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
port = HOST_PORT_NUM; │ port_id = HOST_PORT_NUM;
│
cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port, │ cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
fdb->vid ? ALE_VLAN : 0, fdb->vid); │ fdb->vid ? ALE_VLAN : 0, fdb->vid);
cpsw_fdb_offload_notify(priv->ndev, fdb); │ am65_cpsw_fdb_offload_notify(port->ndev, fdb);
break; │ break;
case SWITCHDEV_FDB_DEL_TO_DEVICE: │ case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb = &switchdev_work->fdb_info; │ fdb = &switchdev_work->fdb_info;
│
dev_dbg(cpsw->dev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- │ netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u
fdb->addr, fdb->vid, fdb->added_by_user, │ fdb->addr, fdb->vid, fdb->added_by_user,
fdb->offloaded, port); │ fdb->offloaded, port_id);
│
if (!fdb->added_by_user || fdb->is_local) │ if (!fdb->added_by_user || fdb->is_local)
break; │ break;
if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) │ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
port = HOST_PORT_NUM; │ port_id = HOST_PORT_NUM;
│
cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port, │ cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
fdb->vid ? ALE_VLAN : 0, fdb->vid); │ fdb->vid ? ALE_VLAN : 0, fdb->vid);
break; │ break;
default: │ default:
break; │ break;
} │ }
rtnl_unlock(); │ rtnl_unlock();
│
kfree(switchdev_work->fdb_info.addr); │ kfree(switchdev_work->fdb_info.addr);
kfree(switchdev_work); │ kfree(switchdev_work);
dev_put(priv->ndev); │ dev_put(port->ndev);
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:9532 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8638
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10888 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8638
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:9532 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8594
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10888 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8594
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10888 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:9532
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/dec/tulip/media.c:110 │ linux/drivers/net/ethernet/dec/tulip/media.c:48
│
struct tulip_private *tp = netdev_priv(dev); │ struct tulip_private *tp = netdev_priv(dev);
int i; │ int i;
int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xf │ int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
│ int retval = 0;
void __iomem *ioaddr = tp->base_addr; │ void __iomem *ioaddr = tp->base_addr;
void __iomem *mdio_addr = ioaddr + CSR9; │ void __iomem *mdio_addr = ioaddr + CSR9;
unsigned long flags; │ unsigned long flags;
│
if (location & ~0x1f) │ if (location & ~0x1f)
return; │ return 0xffff;
│
if (tp->chip_id == COMET && phy_id == 30) { │ if (tp->chip_id == COMET && phy_id == 30) {
if (comet_miireg2offset[location]) │ if (comet_miireg2offset[location])
iowrite32(val, ioaddr + comet_miireg2offset[location]); │ return ioread32(ioaddr + comet_miireg2offset[location]);
return; │ return 0xffff;
} │ }
│
spin_lock_irqsave(&tp->mii_lock, flags); │ spin_lock_irqsave(&tp->mii_lock, flags);
if (tp->chip_id == LC82C168) { │ if (tp->chip_id == LC82C168) {
iowrite32(cmd, ioaddr + 0xA0); │ iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
│ ioread32(ioaddr + 0xA0);
│ ioread32(ioaddr + 0xA0);
for (i = 1000; i >= 0; --i) { │ for (i = 1000; i >= 0; --i) {
barrier(); │ barrier();
if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000)) │ if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000))
break; │ break;
} │ }
spin_unlock_irqrestore(&tp->mii_lock, flags); │ spin_unlock_irqrestore(&tp->mii_lock, flags);
return; │ return retval & 0xffff;
} │ }
│
/* Establish sync by sending 32 logic ones. */ │ /* Establish sync by sending at least 32 logic ones. */
for (i = 32; i >= 0; i--) { │ for (i = 32; i >= 0; i--) {
iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); │ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
mdio_delay(); │ mdio_delay();
iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); │ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay(); │ mdio_delay();
} │ }
/* Shift the command bits out. */ │ /* Shift the read command bits out. */
for (i = 31; i >= 0; i--) { │ for (i = 15; i >= 0; i--) {
int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; │ int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
│
iowrite32(MDIO_ENB | dataval, mdio_addr); │ iowrite32(MDIO_ENB | dataval, mdio_addr);
mdio_delay(); │ mdio_delay();
iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); │ iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay(); │ mdio_delay();
} │ }
/* Clear out extra bits. */ │ /* Read the two transition, 16 data, and wire-idle bits. */
for (i = 2; i > 0; i--) { │ for (i = 19; i > 0; i--) {
iowrite32(MDIO_ENB_IN, mdio_addr); │ iowrite32(MDIO_ENB_IN, mdio_addr);
mdio_delay(); │ mdio_delay();
│ retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0
iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); │ iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay(); │ mdio_delay();
} │ }
│
spin_unlock_irqrestore(&tp->mii_lock, flags); │ spin_unlock_irqrestore(&tp->mii_lock, flags);
│ return (retval>>1) & 0xffff;
} │
next prev up linux/drivers/net/ethernet/freescale/ucc_geth.c:2201 │ linux/drivers/net/ethernet/freescale/ucc_geth.c:2139
│
struct ucc_geth_info *ug_info; │ struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info; │ struct ucc_fast_info *uf_info;
int length; │ int length;
u16 i, j; │ u16 i, j;
u8 __iomem *bd; │ u8 __iomem *bd;
│
ug_info = ugeth->ug_info; │ ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info; │ uf_info = &ug_info->uf_info;
│
/* Allocate Rx bds */ │ /* Allocate Tx bds */
for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) { │ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT; │ u32 align = max(UCC_GETH_TX_BD_RING_ALIGNMENT,
│ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT);
u32 alloc; │ u32 alloc;
│
length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); │ length = ug_info->bdRingLenTx[j] * sizeof(struct qe_bd);
alloc = round_up(length, align); │ alloc = round_up(length, align);
alloc = roundup_pow_of_two(alloc); │ alloc = roundup_pow_of_two(alloc);
│
ugeth->p_rx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL); │ ugeth->p_tx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
if (!ugeth->p_rx_bd_ring[j]) { │
│ if (!ugeth->p_tx_bd_ring[j]) {
if (netif_msg_ifup(ugeth)) │ if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Rx bd rings\n"); │ pr_err("Can not allocate memory for Tx bd rings\n");
return -ENOMEM; │ return -ENOMEM;
} │ }
│ /* Zero unused end of bd ring, according to spec */
│ memset(ugeth->p_tx_bd_ring[j] + length, 0, alloc - length);
} │ }
│
/* Init Rx bds */ │ /* Init Tx bds */
for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) { │ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
/* Setup the skbuff rings */ │ /* Setup the skbuff rings */
ugeth->rx_skbuff[j] = │ ugeth->tx_skbuff[j] =
kcalloc(ugeth->ug_info->bdRingLenRx[j], │ kcalloc(ugeth->ug_info->bdRingLenTx[j],
sizeof(struct sk_buff *), GFP_KERNEL); │ sizeof(struct sk_buff *), GFP_KERNEL);
│
if (ugeth->rx_skbuff[j] == NULL) { │ if (ugeth->tx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth)) │ if (netif_msg_ifup(ugeth))
pr_err("Could not allocate rx_skbuff\n"); │ pr_err("Could not allocate tx_skbuff\n");
return -ENOMEM; │ return -ENOMEM;
} │ }
│
ugeth->skb_currx[j] = 0; │ ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; │ bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { │ for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
/* set bd status and length */ │
out_be32((u32 __iomem *)bd, R_I); │
/* clear bd buffer */ │ /* clear bd buffer */
out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); │ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
│ /* set bd status and length */
│ out_be32((u32 __iomem *)bd, 0);
bd += sizeof(struct qe_bd); │ bd += sizeof(struct qe_bd);
} │ }
bd -= sizeof(struct qe_bd); │ bd -= sizeof(struct qe_bd);
/* set bd status and length */ │ /* set bd status and length */
out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ │ out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7714 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8638
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
│ u8 byte2;
│ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
│ __le16 word1;
│ __le16 word2;
│ __le16 word3;
│ __le16 word4;
│ __le32 reg2;
│ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7714 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7751
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1639 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8682
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10324 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8682
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1639 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8638
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10324 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8638
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1639 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8594
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10324 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8594
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1639 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:9532
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10324 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:9532
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1639 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10888
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10324 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10888
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10324 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1639
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2; │ u8 byte2;
u8 byte3; │ u8 byte3;
__le16 word0; │ __le16 word0;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
__le16 word1; │ __le16 word1;
__le16 word2; │ __le16 word2;
__le16 word3; │ __le16 word3;
__le16 word4; │ __le16 word4;
__le32 reg2; │ __le32 reg2;
__le32 reg3; │ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:8952 │ linux/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c:652
│
union e1000_adv_rx_desc *rx_desc; │ union ixgbe_adv_rx_desc *rx_desc;
struct igb_rx_buffer *bi; │ struct ixgbevf_rx_buffer *bi;
u16 i = rx_ring->next_to_use; │ unsigned int i = rx_ring->next_to_use;
u16 bufsz; │
│
/* nothing to do */ │ /* nothing to do or no valid netdev defined */
if (!cleaned_count) │ if (!cleaned_count || !rx_ring->netdev)
return; │ return;
│
rx_desc = IGB_RX_DESC(rx_ring, i); │ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i]; │ bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count; │ i -= rx_ring->count;
│
bufsz = igb_rx_bufsz(rx_ring); │
│
do { │ do {
if (!igb_alloc_mapped_page(rx_ring, bi)) │ if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
break; │ break;
│
/* sync the buffer for use by the device */ │ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, │ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bufsz, │ bi->page_offset,
│ ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); │ DMA_FROM_DEVICE);
│
/* Refresh the desc even if buffer_addrs didn't change │ /* Refresh the desc even if pkt_addr didn't change
* because each write-back erases this info. │ * because each write-back erases this info.
*/ │ */
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); │ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
│
rx_desc++; │ rx_desc++;
bi++; │ bi++;
i++; │ i++;
if (unlikely(!i)) { │ if (unlikely(!i)) {
rx_desc = IGB_RX_DESC(rx_ring, 0); │ rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer_info; │ bi = rx_ring->rx_buffer_info;
i -= rx_ring->count; │ i -= rx_ring->count;
} │ }
│
/* clear the length for the next_to_use descriptor */ │ /* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0; │ rx_desc->wb.upper.length = 0;
│
cleaned_count--; │ cleaned_count--;
} while (cleaned_count); │ } while (cleaned_count);
│
i += rx_ring->count; │ i += rx_ring->count;
│
if (rx_ring->next_to_use != i) { │ if (rx_ring->next_to_use != i) {
/* record the next descriptor to use */ │ /* record the next descriptor to use */
rx_ring->next_to_use = i; │ rx_ring->next_to_use = i;
│
/* update next to alloc since we have filled the ring */ │ /* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i; │ rx_ring->next_to_alloc = i;
│
/* Force memory writes to complete before letting h/w │ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only │ * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, │ * applicable for weak-ordered memory model archs,
* such as IA-64). │ * such as IA-64).
*/ │ */
dma_wmb(); │ wmb();
writel(i, rx_ring->tail); │ ixgbevf_write_tail(rx_ring, i);
} │ }
} │
next prev up linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c:115 │ linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c:125
│
/* CDR Settings */ │ /* CDR Settings */
{EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, │ {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, │ UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
{EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)}, │ {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
{EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, │ {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
│
/* TX/RX Settings */ │ /* TX/RX Settings */
{EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2}, │ {EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2},
│
{EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE}, │ {EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE},
{EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN}, │ {EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN},
{EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)}, │ {EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)},
{EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX}, │ {EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX},
{EMAC_SGMII_LN_TX_POST, TX_POST_MUX}, │ {EMAC_SGMII_LN_TX_POST, TX_POST_MUX},
│
{EMAC_SGMII_LN_CML_CTRL_MODE0, │ {EMAC_SGMII_LN_CML_CTRL_MODE0,
CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)}, │ CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)},
{EMAC_SGMII_LN_MIXER_CTRL_MODE0, │ {EMAC_SGMII_LN_MIXER_CTRL_MODE0,
MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)}, │ MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)},
{EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)}, │ {EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)},
{EMAC_SGMII_LN_SIGDET_ENABLES, │ {EMAC_SGMII_LN_SIGDET_ENABLES,
SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP}, │ SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP},
{EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)}, │ {EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)},
│
{EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)}, │ {EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)},
{EMAC_SGMII_LN_RX_MISC_CNTRL0, 0}, │ {EMAC_SGMII_LN_RX_MISC_CNTRL0, INVERT_PCS_RX_CLK},
{EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV, │ {EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV,
DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)}, │ DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)},
│
{EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)}, │ {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
{EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(2)}, │ {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(1)},
{EMAC_SGMII_LN_RX_BAND, BAND_MODE0(3)}, │ {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(2)},
│ {EMAC_SGMII_LN_DRVR_CTRL1, RESCODE_OFFSET(7)},
│ {EMAC_SGMII_LN_RX_RESECODE_OFFSET, RESCODE_OFFSET(9)},
{EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)}, │ {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
{EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(3)}, │ {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(2) |
│ EN_DLL_MODE0 | EN_IQ_DCC_MODE0 | EN_IQCAL_MODE0},
{EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL}, │ {EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL},
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:4815 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:5999
│
u16 i = tx_ring->next_to_clean; │ u16 i = tx_ring->next_to_clean;
struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; │ struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
│
│ if (tx_ring->xsk_pool) {
│ ixgbe_xsk_clean_tx_ring(tx_ring);
│ goto out;
│ }
│
while (i != tx_ring->next_to_use) { │ while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc; │ union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
│
/* Free all the Tx ring sk_buffs */ │ /* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer->skb); │ if (ring_is_xdp(tx_ring))
│ xdp_return_frame(tx_buffer->xdpf);
│ else
│ dev_kfree_skb_any(tx_buffer->skb);
│
/* unmap skb header data */ │ /* unmap skb header data */
dma_unmap_single(tx_ring->dev, │ dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
│
/* check for eop_desc to determine the end of the packet */ │ /* check for eop_desc to determine the end of the packet */
eop_desc = tx_buffer->next_to_watch; │ eop_desc = tx_buffer->next_to_watch;
tx_desc = IGB_TX_DESC(tx_ring, i); │ tx_desc = IXGBE_TX_DESC(tx_ring, i);
│
/* unmap remaining buffers */ │ /* unmap remaining buffers */
while (tx_desc != eop_desc) { │ while (tx_desc != eop_desc) {
tx_buffer++; │ tx_buffer++;
tx_desc++; │ tx_desc++;
i++; │ i++;
if (unlikely(i == tx_ring->count)) { │ if (unlikely(i == tx_ring->count)) {
i = 0; │ i = 0;
tx_buffer = tx_ring->tx_buffer_info; │ tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, 0); │ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
} │ }
│
/* unmap any remaining paged data */ │ /* unmap any remaining paged data */
if (dma_unmap_len(tx_buffer, len)) │ if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev, │ dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
} │ }
│
tx_buffer->next_to_watch = NULL; │
│
/* move us one more past the eop_desc for start of next pkt */ │ /* move us one more past the eop_desc for start of next pkt */
tx_buffer++; │ tx_buffer++;
i++; │ i++;
if (unlikely(i == tx_ring->count)) { │ if (unlikely(i == tx_ring->count)) {
i = 0; │ i = 0;
tx_buffer = tx_ring->tx_buffer_info; │ tx_buffer = tx_ring->tx_buffer_info;
} │ }
} │ }
│
/* reset BQL for queue */ │ /* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring)); │ if (!ring_is_xdp(tx_ring))
│ netdev_tx_reset_queue(txring_txq(tx_ring));
│
│ out:
/* reset next_to_use and next_to_clean */ │ /* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0; │ tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0; │ tx_ring->next_to_clean = 0;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c:1615 │ linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c:8058
│
int offset, ret = 0; │ int offset, ret = 0;
struct fw_vi_mac_cmd cmd; │ struct fw_vi_mac_cmd c;
unsigned int nfilters = 0; │ unsigned int nfilters = 0;
unsigned int max_naddr = adapter->params.arch.mps_tcam_size; │ unsigned int max_naddr = is_t4(adap->params.chip) ?
│ NUM_MPS_CLS_SRAM_L_INSTANCES :
│ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
unsigned int rem = naddr; │ unsigned int rem = naddr;
│
if (naddr > max_naddr) │ if (naddr > max_naddr)
return -EINVAL; │ return -EINVAL;
│
for (offset = 0; offset < (int)naddr ; /**/) { │ for (offset = 0; offset < (int)naddr ; /**/) {
unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ? │ unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
rem : ARRAY_SIZE(cmd.u.exact)); │ ? rem
│ : ARRAY_SIZE(c.u.exact));
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, │ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[fw_naddr]), 16); │ u.exact[fw_naddr]), 16);
struct fw_vi_mac_exact *p; │ struct fw_vi_mac_exact *p;
int i; │ int i;
│
memset(&cmd, 0, sizeof(cmd)); │ memset(&c, 0, sizeof(c));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | │ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
FW_CMD_REQUEST_F | │ FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | │ FW_CMD_WRITE_F |
FW_CMD_EXEC_V(0) | │ FW_CMD_EXEC_V(0) |
FW_VI_MAC_CMD_VIID_V(viid)); │ FW_VI_MAC_CMD_VIID_V(viid));
cmd.freemacs_to_len16 = │ c.freemacs_to_len16 =
cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | │ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
FW_CMD_LEN16_V(len16)); │ FW_CMD_LEN16_V(len16));
│
for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) { │ for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
p->valid_to_idx = cpu_to_be16( │ p->valid_to_idx = cpu_to_be16(
FW_VI_MAC_CMD_VALID_F | │ FW_VI_MAC_CMD_VALID_F |
FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); │ FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); │ memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
} │ }
│
ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd, │ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
sleep_ok); │
if (ret) │ if (ret)
break; │ break;
│
for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { │ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
u16 index = FW_VI_MAC_CMD_IDX_G( │ u16 index = FW_VI_MAC_CMD_IDX_G(
be16_to_cpu(p->valid_to_idx)); │ be16_to_cpu(p->valid_to_idx));
│
if (index < max_naddr) │ if (index < max_naddr)
nfilters++; │ nfilters++;
} │ }
│
offset += fw_naddr; │ offset += fw_naddr;
rem -= fw_naddr; │ rem -= fw_naddr;
} │ }
│
if (ret == 0) │ if (ret == 0)
ret = nfilters; │ ret = nfilters;
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:864 │ linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:227
│
int blkaddr, base; │ int blkaddr, base;
bool rc; │ bool rc;
│
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); │ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
if (blkaddr < 0) │ if (blkaddr < 0)
return blkaddr; │ return blkaddr;
│
/* Get NPA AF MSIX vectors offset. */ │ /* Get NIX AF MSIX vectors offset. */
base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff; │ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
if (!base) { │ if (!base) {
dev_warn(rvu->dev, │ dev_warn(rvu->dev,
"Failed to get NPA_AF_INT vector offsets\n"); │ "Failed to get NIX%d NIX_AF_INT vector offsets\n",
│ blkaddr - BLKADDR_NIX0);
return 0; │ return 0;
} │ }
│ /* Register and enable NIX_AF_RVU_INT interrupt */
/* Register and enable NPA_AF_RVU_INT interrupt */ │ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU, │ "NIX_AF_RVU_INT",
"NPA_AF_RVU_INT", │ rvu_nix_af_rvu_intr_handler);
rvu_npa_af_rvu_intr_handler); │
if (!rc) │ if (!rc)
goto err; │ goto err;
rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL); │ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
│
/* Register and enable NPA_AF_GEN_INT interrupt */ │ /* Register and enable NIX_AF_GEN_INT interrupt */
rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN, │ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
"NPA_AF_RVU_GEN", │ "NIX_AF_GEN_INT",
rvu_npa_af_gen_intr_handler); │ rvu_nix_af_rvu_gen_handler);
if (!rc) │ if (!rc)
goto err; │ goto err;
rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL); │ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
│
/* Register and enable NPA_AF_ERR_INT interrupt */ │ /* Register and enable NIX_AF_ERR_INT interrupt */
rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR, │ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
"NPA_AF_ERR_INT", │ "NIX_AF_ERR_INT",
rvu_npa_af_err_intr_handler); │ rvu_nix_af_rvu_err_handler);
if (!rc) │ if (!rc)
goto err; │ goto err;
rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL); │ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
│
/* Register and enable NPA_AF_RAS interrupt */ │ /* Register and enable NIX_AF_RAS interrupt */
rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON, │ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
"NPA_AF_RAS", │ "NIX_AF_RAS",
rvu_npa_af_ras_intr_handler); │ rvu_nix_af_rvu_ras_handler);
if (!rc) │ if (!rc)
goto err; │ goto err;
rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL); │ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
│
return 0; │ return 0;
err: │ err:
rvu_npa_unregister_interrupts(rvu); │ rvu_nix_unregister_interrupts(rvu);
return rc; │ return rc;
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_main.c:2011 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:8952
│
union igc_adv_rx_desc *rx_desc; │ union e1000_adv_rx_desc *rx_desc;
│ struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use; │ u16 i = rx_ring->next_to_use;
struct igc_rx_buffer *bi; │
u16 bufsz; │ u16 bufsz;
│
/* nothing to do */ │ /* nothing to do */
if (!cleaned_count) │ if (!cleaned_count)
return; │ return;
│
rx_desc = IGC_RX_DESC(rx_ring, i); │ rx_desc = IGB_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i]; │ bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count; │ i -= rx_ring->count;
│
bufsz = igc_rx_bufsz(rx_ring); │ bufsz = igb_rx_bufsz(rx_ring);
│
do { │ do {
if (!igc_alloc_mapped_page(rx_ring, bi)) │ if (!igb_alloc_mapped_page(rx_ring, bi))
break; │ break;
│
/* sync the buffer for use by the device */ │ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, │ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bufsz, │ bi->page_offset, bufsz,
DMA_FROM_DEVICE); │ DMA_FROM_DEVICE);
│
/* Refresh the desc even if buffer_addrs didn't change │ /* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. │ * because each write-back erases this info.
*/ │ */
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); │ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
│
rx_desc++; │ rx_desc++;
bi++; │ bi++;
i++; │ i++;
if (unlikely(!i)) { │ if (unlikely(!i)) {
rx_desc = IGC_RX_DESC(rx_ring, 0); │ rx_desc = IGB_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer_info; │ bi = rx_ring->rx_buffer_info;
i -= rx_ring->count; │ i -= rx_ring->count;
} │ }
│
/* clear the length for the next_to_use descriptor */ │ /* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0; │ rx_desc->wb.upper.length = 0;
│
cleaned_count--; │ cleaned_count--;
} while (cleaned_count); │ } while (cleaned_count);
│
i += rx_ring->count; │ i += rx_ring->count;
│
if (rx_ring->next_to_use != i) { │ if (rx_ring->next_to_use != i) {
/* record the next descriptor to use */ │ /* record the next descriptor to use */
rx_ring->next_to_use = i; │ rx_ring->next_to_use = i;
│
/* update next to alloc since we have filled the ring */ │ /* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i; │ rx_ring->next_to_alloc = i;
│
/* Force memory writes to complete before letting h/w │ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only │ * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, │ * applicable for weak-ordered memory model archs,
* such as IA-64). │ * such as IA-64).
*/ │ */
wmb(); │ dma_wmb();
writel(i, rx_ring->tail); │ writel(i, rx_ring->tail);
} │ }
} │
next prev up linux/drivers/net/ethernet/3com/3c574_cs.c:985 │ linux/drivers/net/ethernet/3com/3c589_cs.c:818
│
unsigned int ioaddr = dev->base_addr; │ unsigned int ioaddr = dev->base_addr;
│ int worklimit = 32;
short rx_status; │ short rx_status;
│
pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", │ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); │ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && │ while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
worklimit > 0) { │ worklimit > 0) {
worklimit--; │ worklimit--;
if (rx_status & 0x4000) { /* Error, update stats. */ │ if (rx_status & 0x4000) { /* Error, update stats. */
short error = rx_status & 0x3800; │ short error = rx_status & 0x3800;
dev->stats.rx_errors++; │ dev->stats.rx_errors++;
switch (error) { │ switch (error) {
case 0x0000: dev->stats.rx_over_errors++; break; │ case 0x0000:
case 0x0800: dev->stats.rx_length_errors++; break; │ dev->stats.rx_over_errors++;
case 0x1000: dev->stats.rx_frame_errors++; break; │ break;
case 0x1800: dev->stats.rx_length_errors++; break; │ case 0x0800:
case 0x2000: dev->stats.rx_frame_errors++; break; │ dev->stats.rx_length_errors++;
case 0x2800: dev->stats.rx_crc_errors++; break; │ break;
│ case 0x1000:
│ dev->stats.rx_frame_errors++;
│ break;
│ case 0x1800:
│ dev->stats.rx_length_errors++;
│ break;
│ case 0x2000:
│ dev->stats.rx_frame_errors++;
│ break;
│ case 0x2800:
│ dev->stats.rx_crc_errors++;
│ break;
} │ }
} else { │ } else {
short pkt_len = rx_status & 0x7ff; │ short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb; │ struct sk_buff *skb;
│
skb = netdev_alloc_skb(dev, pkt_len + 5); │ skb = netdev_alloc_skb(dev, pkt_len + 5);
│
pr_debug(" Receiving packet size %d status %4.4x.\n", │ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status); │ pkt_len, rx_status);
if (skb != NULL) { │ if (skb != NULL) {
skb_reserve(skb, 2); │ skb_reserve(skb, 2);
insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), │ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
((pkt_len+3)>>2)); │ (pkt_len+3)>>2);
skb->protocol = eth_type_trans(skb, dev); │ skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); │ netif_rx(skb);
dev->stats.rx_packets++; │ dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len; │ dev->stats.rx_bytes += pkt_len;
} else { │ } else {
pr_debug("%s: couldn't allocate a sk_buff of" │ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.
" size %d.\n", dev->name, pkt_len); │ pkt_len);
dev->stats.rx_dropped++; │ dev->stats.rx_dropped++;
} │ }
} │ }
tc574_wait_for_completion(dev, RxDiscard); │ /* Pop the top of the Rx FIFO */
│ tc589_wait_for_completion(dev, RxDiscard);
} │ }
│ if (worklimit == 0)
return worklimit; │ netdev_warn(dev, "too much work in el3_rx!\n");
│ return 0;
} │
next prev up linux/drivers/net/ethernet/i825xx/82596.c:1281 │ linux/drivers/net/ethernet/i825xx/lib82596.c:1186
│
case CmdTx: │ case CmdTx:
{ │ {
struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; │ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
struct sk_buff *skb = tx_cmd->skb; │ struct sk_buff *skb = tx_cmd->skb;
│
if ((ptr->status) & STAT_OK) { │ if (ptr->status & SWAP16(STAT_OK)) {
DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); │ DEB(DEB_TXADDR,
│ print_eth(skb->data, "tx-done"));
} else { │ } else {
dev->stats.tx_errors++; │ dev->stats.tx_errors++;
if ((ptr->status) & 0x0020) │ if (ptr->status & SWAP16(0x0020))
dev->stats.collisions++; │ dev->stats.collisions++;
if (!((ptr->status) & 0x0040)) │ if (!(ptr->status & SWAP16(0x0040)))
dev->stats.tx_heartbeat_errors++; │ dev->stats.tx_heartbeat_errors++;
if ((ptr->status) & 0x0400) │ if (ptr->status & SWAP16(0x0400))
dev->stats.tx_carrier_errors++; │ dev->stats.tx_carrier_errors++;
if ((ptr->status) & 0x0800) │ if (ptr->status & SWAP16(0x0800))
dev->stats.collisions++; │ dev->stats.collisions++;
if ((ptr->status) & 0x1000) │ if (ptr->status & SWAP16(0x1000))
dev->stats.tx_aborted_errors++; │ dev->stats.tx_aborted_errors++;
} │ }
│ dma_unmap_single(dev->dev.parent,
│ tx_cmd->dma_addr,
│ skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(skb); │ dev_consume_skb_irq(skb);
│
tx_cmd->cmd.command = 0; /* Mark free */ │ tx_cmd->cmd.command = 0; /* Mark free */
break; │ break;
} │ }
case CmdTDR: │ case CmdTDR:
{ │ {
unsigned short status = ((struct tdr_cmd *)ptr)->status; │ unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->
│
if (status & 0x8000) { │ if (status & 0x8000) {
DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", d │ DEB(DEB_ANY,
│ printk(KERN_DEBUG "%s: link ok.\n",
│ dev->name));
} else { │ } else {
if (status & 0x4000) │ if (status & 0x4000)
printk(KERN_ERR "%s: Transceiver problem │ printk(KERN_ERR
│ "%s: Transceiver problem.\n",
│ dev->name);
if (status & 0x2000) │ if (status & 0x2000)
printk(KERN_ERR "%s: Termination problem │ printk(KERN_ERR
│ "%s: Termination problem.\n",
│ dev->name);
if (status & 0x1000) │ if (status & 0x1000)
printk(KERN_ERR "%s: Short circuit.\n", │ printk(KERN_ERR
│ "%s: Short circuit.\n",
│ dev->name);
│
DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", d │ DEB(DEB_TDR,
│ printk(KERN_DEBUG "%s: Time %d.\n",
│ dev->name, status & 0x07ff));
} │ }
break; │ break;
} │ }
case CmdConfigure: │ case CmdConfigure:
case CmdMulticastList: │ /*
/* Zap command so set_multicast_list() knows it is free │ * Zap command so set_multicast_list() know
│ * it is free
│ */
ptr->command = 0; │ ptr->command = 0;
break; │ break;
} │
next prev up linux/drivers/net/ethernet/broadcom/cnic_defs.h:4252 │ linux/drivers/net/ethernet/broadcom/cnic_defs.h:3387
│
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 remote_addr_4; │ u8 remote_addr_4;
u8 remote_addr_5; │ u8 remote_addr_5;
u8 local_addr_0; │ u8 local_addr_0;
u8 local_addr_1; │ u8 local_addr_1;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 local_addr_1; │ u8 local_addr_1;
u8 local_addr_0; │ u8 local_addr_0;
u8 remote_addr_5; │ u8 remote_addr_5;
u8 remote_addr_4; │ u8 remote_addr_4;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 remote_addr_0; │ u8 remote_addr_0;
u8 remote_addr_1; │ u8 remote_addr_1;
u8 remote_addr_2; │ u8 remote_addr_2;
u8 remote_addr_3; │ u8 remote_addr_3;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 remote_addr_3; │ u8 remote_addr_3;
u8 remote_addr_2; │ u8 remote_addr_2;
u8 remote_addr_1; │ u8 remote_addr_1;
u8 remote_addr_0; │ u8 remote_addr_0;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u16 reserved_vlan_type; │ u16 reserved_vlan_type;
u16 vlan_params; │ u16 params;
#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12) │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12 │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u16 vlan_params; │ u16 params;
#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12) │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12 │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 │ #define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
u16 reserved_vlan_type; │ u16 reserved_vlan_type;
#endif │ #endif
#if defined(__BIG_ENDIAN) │ #if defined(__BIG_ENDIAN)
u8 local_addr_2; │ u8 local_addr_2;
u8 local_addr_3; │ u8 local_addr_3;
u8 local_addr_4; │ u8 local_addr_4;
u8 local_addr_5; │ u8 local_addr_5;
#elif defined(__LITTLE_ENDIAN) │ #elif defined(__LITTLE_ENDIAN)
u8 local_addr_5; │ u8 local_addr_5;
u8 local_addr_4; │ u8 local_addr_4;
u8 local_addr_3; │ u8 local_addr_3;
u8 local_addr_2; │ u8 local_addr_2;
#endif │ #endif
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/80003es2lan.c:342 │ linux/drivers/net/ethernet/intel/e1000e/80003es2lan.c:411
│
s32 ret_val; │ s32 ret_val;
u32 page_select; │ u32 page_select;
u16 temp; │ u16 temp;
│
ret_val = e1000_acquire_phy_80003es2lan(hw); │ ret_val = e1000_acquire_phy_80003es2lan(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
/* Select Configuration Page */ │ /* Select Configuration Page */
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { │ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
page_select = GG82563_PHY_PAGE_SELECT; │ page_select = GG82563_PHY_PAGE_SELECT;
} else { │ } else {
/* Use Alternative Page Select register to access │ /* Use Alternative Page Select register to access
* registers 30 and 31 │ * registers 30 and 31
*/ │ */
page_select = GG82563_PHY_PAGE_SELECT_ALT; │ page_select = GG82563_PHY_PAGE_SELECT_ALT;
} │ }
│
temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); │ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); │ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
if (ret_val) { │ if (ret_val) {
e1000_release_phy_80003es2lan(hw); │ e1000_release_phy_80003es2lan(hw);
return ret_val; │ return ret_val;
} │ }
│
if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { │ if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
/* The "ready" bit in the MDIC register may be incorrectly set │ /* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI │ * before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command... │ * transaction. So we wait 200us after each MDI command...
*/ │ */
usleep_range(200, 400); │ usleep_range(200, 400);
│
/* ...and verify the command was successful. */ │ /* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); │ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
│
if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { │ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
e1000_release_phy_80003es2lan(hw); │ e1000_release_phy_80003es2lan(hw);
return -E1000_ERR_PHY; │ return -E1000_ERR_PHY;
} │ }
│
usleep_range(200, 400); │ usleep_range(200, 400);
│
ret_val = e1000e_read_phy_reg_mdic(hw, │ ret_val = e1000e_write_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset, │ MAX_PHY_REG_ADDRESS &
data); │ offset, data);
│
usleep_range(200, 400); │ usleep_range(200, 400);
} else { │ } else {
ret_val = e1000e_read_phy_reg_mdic(hw, │ ret_val = e1000e_write_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset, │ MAX_PHY_REG_ADDRESS &
data); │ offset, data);
} │ }
│
e1000_release_phy_80003es2lan(hw); │ e1000_release_phy_80003es2lan(hw);
│
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c:411 │ linux/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c:368
│
struct xgbe_phy_data *phy_data = pdata->phy_data; │ struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg; │ unsigned int reg;
│
/* Set PCS to KX/1G speed */ │ /* Set PCS to KR/10G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); │ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE; │ reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBX; │ reg |= MDIO_PCS_CTRL2_10GBR;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg); │ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
│
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); │ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL; │ reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED1G; │ reg |= MDIO_CTRL1_SPEED10G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); │ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
│
xgbe_phy_pcs_power_cycle(pdata); │ xgbe_phy_pcs_power_cycle(pdata);
│
/* Set SerDes to 2.5G speed */ │ /* Set SerDes to 10G speed */
xgbe_phy_start_ratechange(pdata); │ xgbe_phy_start_ratechange(pdata);
│
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE); │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD); │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL); │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
│
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE, │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
phy_data->cdr_rate[XGBE_SPEED_2500]); │ phy_data->cdr_rate[XGBE_SPEED_10000]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP, │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
phy_data->tx_amp[XGBE_SPEED_2500]); │ phy_data->tx_amp[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA, │ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
phy_data->blwc[XGBE_SPEED_2500]); │ phy_data->blwc[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG, │ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
phy_data->pq_skew[XGBE_SPEED_2500]); │ phy_data->pq_skew[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG, │ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
phy_data->dfe_tap_cfg[XGBE_SPEED_2500]); │ phy_data->dfe_tap_cfg[XGBE_SPEED_10000]);
XRXTX_IOWRITE(pdata, RXTX_REG22, │ XRXTX_IOWRITE(pdata, RXTX_REG22,
phy_data->dfe_tap_ena[XGBE_SPEED_2500]); │ phy_data->dfe_tap_ena[XGBE_SPEED_10000]);
│
xgbe_phy_complete_ratechange(pdata); │ xgbe_phy_complete_ratechange(pdata);
│
netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n"); │ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
} │
next prev up linux/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c:2400 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:5999
│
u16 i = tx_ring->next_to_clean; │ u16 i = tx_ring->next_to_clean;
struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; │ struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
│
│ if (tx_ring->xsk_pool) {
│ ixgbe_xsk_clean_tx_ring(tx_ring);
│ goto out;
│ }
│
while (i != tx_ring->next_to_use) { │ while (i != tx_ring->next_to_use) {
union ixgbe_adv_tx_desc *eop_desc, *tx_desc; │ union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
│
/* Free all the Tx ring sk_buffs */ │ /* Free all the Tx ring sk_buffs */
if (ring_is_xdp(tx_ring)) │ if (ring_is_xdp(tx_ring))
page_frag_free(tx_buffer->data); │ xdp_return_frame(tx_buffer->xdpf);
else │ else
dev_kfree_skb_any(tx_buffer->skb); │ dev_kfree_skb_any(tx_buffer->skb);
│
/* unmap skb header data */ │ /* unmap skb header data */
dma_unmap_single(tx_ring->dev, │ dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
│
/* check for eop_desc to determine the end of the packet */ │ /* check for eop_desc to determine the end of the packet */
eop_desc = tx_buffer->next_to_watch; │ eop_desc = tx_buffer->next_to_watch;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i); │ tx_desc = IXGBE_TX_DESC(tx_ring, i);
│
/* unmap remaining buffers */ │ /* unmap remaining buffers */
while (tx_desc != eop_desc) { │ while (tx_desc != eop_desc) {
tx_buffer++; │ tx_buffer++;
tx_desc++; │ tx_desc++;
i++; │ i++;
if (unlikely(i == tx_ring->count)) { │ if (unlikely(i == tx_ring->count)) {
i = 0; │ i = 0;
tx_buffer = tx_ring->tx_buffer_info; │ tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); │ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
} │ }
│
/* unmap any remaining paged data */ │ /* unmap any remaining paged data */
if (dma_unmap_len(tx_buffer, len)) │ if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev, │ dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), │ dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), │ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
} │ }
│
/* move us one more past the eop_desc for start of next pkt */ │ /* move us one more past the eop_desc for start of next pkt */
tx_buffer++; │ tx_buffer++;
i++; │ i++;
if (unlikely(i == tx_ring->count)) { │ if (unlikely(i == tx_ring->count)) {
i = 0; │ i = 0;
tx_buffer = tx_ring->tx_buffer_info; │ tx_buffer = tx_ring->tx_buffer_info;
} │ }
} │ }
│
│ /* reset BQL for queue */
│ if (!ring_is_xdp(tx_ring))
│ netdev_tx_reset_queue(txring_txq(tx_ring));
│
│ out:
/* reset next_to_use and next_to_clean */ │ /* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0; │ tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0; │ tx_ring->next_to_clean = 0;
│
} │
next prev up linux/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c:454 │ linux/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c:368
│
struct xgbe_phy_data *phy_data = pdata->phy_data; │ struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg; │ unsigned int reg;
│
/* Set PCS to KX/1G speed */ │ /* Set PCS to KR/10G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); │ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE; │ reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBX; │ reg |= MDIO_PCS_CTRL2_10GBR;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg); │ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
│
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); │ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL; │ reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED1G; │ reg |= MDIO_CTRL1_SPEED10G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); │ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
│
xgbe_phy_pcs_power_cycle(pdata); │ xgbe_phy_pcs_power_cycle(pdata);
│
/* Set SerDes to 1G speed */ │ /* Set SerDes to 10G speed */
xgbe_phy_start_ratechange(pdata); │ xgbe_phy_start_ratechange(pdata);
│
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE); │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD); │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL); │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
│
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE, │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
phy_data->cdr_rate[XGBE_SPEED_1000]); │ phy_data->cdr_rate[XGBE_SPEED_10000]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP, │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
phy_data->tx_amp[XGBE_SPEED_1000]); │ phy_data->tx_amp[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA, │ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
phy_data->blwc[XGBE_SPEED_1000]); │ phy_data->blwc[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG, │ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
phy_data->pq_skew[XGBE_SPEED_1000]); │ phy_data->pq_skew[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG, │ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
phy_data->dfe_tap_cfg[XGBE_SPEED_1000]); │ phy_data->dfe_tap_cfg[XGBE_SPEED_10000]);
XRXTX_IOWRITE(pdata, RXTX_REG22, │ XRXTX_IOWRITE(pdata, RXTX_REG22,
phy_data->dfe_tap_ena[XGBE_SPEED_1000]); │ phy_data->dfe_tap_ena[XGBE_SPEED_10000]);
│
xgbe_phy_complete_ratechange(pdata); │ xgbe_phy_complete_ratechange(pdata);
│
netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n"); │ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
} │
next prev up linux/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c:454 │ linux/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c:411
│
struct xgbe_phy_data *phy_data = pdata->phy_data; │ struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg; │ unsigned int reg;
│
/* Set PCS to KX/1G speed */ │ /* Set PCS to KX/1G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); │ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE; │ reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBX; │ reg |= MDIO_PCS_CTRL2_10GBX;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg); │ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
│
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); │ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL; │ reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED1G; │ reg |= MDIO_CTRL1_SPEED1G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); │ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
│
xgbe_phy_pcs_power_cycle(pdata); │ xgbe_phy_pcs_power_cycle(pdata);
│
/* Set SerDes to 1G speed */ │ /* Set SerDes to 2.5G speed */
xgbe_phy_start_ratechange(pdata); │ xgbe_phy_start_ratechange(pdata);
│
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE); │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD); │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL); │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
│
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE, │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
phy_data->cdr_rate[XGBE_SPEED_1000]); │ phy_data->cdr_rate[XGBE_SPEED_2500]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP, │ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
phy_data->tx_amp[XGBE_SPEED_1000]); │ phy_data->tx_amp[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA, │ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
phy_data->blwc[XGBE_SPEED_1000]); │ phy_data->blwc[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG, │ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
phy_data->pq_skew[XGBE_SPEED_1000]); │ phy_data->pq_skew[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG, │ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
phy_data->dfe_tap_cfg[XGBE_SPEED_1000]); │ phy_data->dfe_tap_cfg[XGBE_SPEED_2500]);
XRXTX_IOWRITE(pdata, RXTX_REG22, │ XRXTX_IOWRITE(pdata, RXTX_REG22,
phy_data->dfe_tap_ena[XGBE_SPEED_1000]); │ phy_data->dfe_tap_ena[XGBE_SPEED_2500]);
│
xgbe_phy_complete_ratechange(pdata); │ xgbe_phy_complete_ratechange(pdata);
│
netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n"); │ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
} │
next prev up linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:1581 │ linux/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c:652
│
union ixgbe_adv_rx_desc *rx_desc; │ union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi; │ struct ixgbevf_rx_buffer *bi;
u16 i = rx_ring->next_to_use; │ unsigned int i = rx_ring->next_to_use;
u16 bufsz; │
│
/* nothing to do */ │ /* nothing to do or no valid netdev defined */
if (!cleaned_count) │ if (!cleaned_count || !rx_ring->netdev)
return; │ return;
│
rx_desc = IXGBE_RX_DESC(rx_ring, i); │ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i]; │ bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count; │ i -= rx_ring->count;
│
bufsz = ixgbe_rx_bufsz(rx_ring); │
│
do { │ do {
if (!ixgbe_alloc_mapped_page(rx_ring, bi)) │ if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
break; │ break;
│
/* sync the buffer for use by the device */ │ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, │ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bufsz, │ bi->page_offset,
│ ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); │ DMA_FROM_DEVICE);
│
/* │ /* Refresh the desc even if pkt_addr didn't change
* Refresh the desc even if buffer_addrs didn't change │
* because each write-back erases this info. │ * because each write-back erases this info.
*/ │ */
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); │ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
│
rx_desc++; │ rx_desc++;
bi++; │ bi++;
i++; │ i++;
if (unlikely(!i)) { │ if (unlikely(!i)) {
rx_desc = IXGBE_RX_DESC(rx_ring, 0); │ rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer_info; │ bi = rx_ring->rx_buffer_info;
i -= rx_ring->count; │ i -= rx_ring->count;
} │ }
│
/* clear the length for the next_to_use descriptor */ │ /* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0; │ rx_desc->wb.upper.length = 0;
│
cleaned_count--; │ cleaned_count--;
} while (cleaned_count); │ } while (cleaned_count);
│
i += rx_ring->count; │ i += rx_ring->count;
│
if (rx_ring->next_to_use != i) { │ if (rx_ring->next_to_use != i) {
│ /* record the next descriptor to use */
rx_ring->next_to_use = i; │ rx_ring->next_to_use = i;
│
/* update next to alloc since we have filled the ring */ │ /* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i; │ rx_ring->next_to_alloc = i;
│
/* Force memory writes to complete before letting h/w │ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only │ * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, │ * applicable for weak-ordered memory model archs,
* such as IA-64). │ * such as IA-64).
*/ │ */
wmb(); │ wmb();
writel(i, rx_ring->tail); │ ixgbevf_write_tail(rx_ring, i);
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:1581 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:8952
│
union ixgbe_adv_rx_desc *rx_desc; │ union e1000_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi; │ struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use; │ u16 i = rx_ring->next_to_use;
u16 bufsz; │ u16 bufsz;
│
/* nothing to do */ │ /* nothing to do */
if (!cleaned_count) │ if (!cleaned_count)
return; │ return;
│
rx_desc = IXGBE_RX_DESC(rx_ring, i); │ rx_desc = IGB_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i]; │ bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count; │ i -= rx_ring->count;
│
bufsz = ixgbe_rx_bufsz(rx_ring); │ bufsz = igb_rx_bufsz(rx_ring);
│
do { │ do {
if (!ixgbe_alloc_mapped_page(rx_ring, bi)) │ if (!igb_alloc_mapped_page(rx_ring, bi))
break; │ break;
│
/* sync the buffer for use by the device */ │ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, │ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bufsz, │ bi->page_offset, bufsz,
DMA_FROM_DEVICE); │ DMA_FROM_DEVICE);
│
/* │ /* Refresh the desc even if buffer_addrs didn't change
* Refresh the desc even if buffer_addrs didn't change │
* because each write-back erases this info. │ * because each write-back erases this info.
*/ │ */
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); │ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
│
rx_desc++; │ rx_desc++;
bi++; │ bi++;
i++; │ i++;
if (unlikely(!i)) { │ if (unlikely(!i)) {
rx_desc = IXGBE_RX_DESC(rx_ring, 0); │ rx_desc = IGB_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer_info; │ bi = rx_ring->rx_buffer_info;
i -= rx_ring->count; │ i -= rx_ring->count;
} │ }
│
/* clear the length for the next_to_use descriptor */ │ /* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0; │ rx_desc->wb.upper.length = 0;
│
cleaned_count--; │ cleaned_count--;
} while (cleaned_count); │ } while (cleaned_count);
│
i += rx_ring->count; │ i += rx_ring->count;
│
if (rx_ring->next_to_use != i) { │ if (rx_ring->next_to_use != i) {
│ /* record the next descriptor to use */
rx_ring->next_to_use = i; │ rx_ring->next_to_use = i;
│
/* update next to alloc since we have filled the ring */ │ /* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i; │ rx_ring->next_to_alloc = i;
│
/* Force memory writes to complete before letting h/w │ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only │ * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, │ * applicable for weak-ordered memory model archs,
* such as IA-64). │ * such as IA-64).
*/ │ */
wmb(); │ dma_wmb();
writel(i, rx_ring->tail); │ writel(i, rx_ring->tail);
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:1581 │ linux/drivers/net/ethernet/intel/igc/igc_main.c:2011
│
union ixgbe_adv_rx_desc *rx_desc; │ union igc_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi; │
u16 i = rx_ring->next_to_use; │ u16 i = rx_ring->next_to_use;
│ struct igc_rx_buffer *bi;
u16 bufsz; │ u16 bufsz;
│
/* nothing to do */ │ /* nothing to do */
if (!cleaned_count) │ if (!cleaned_count)
return; │ return;
│
rx_desc = IXGBE_RX_DESC(rx_ring, i); │ rx_desc = IGC_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i]; │ bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count; │ i -= rx_ring->count;
│
bufsz = ixgbe_rx_bufsz(rx_ring); │ bufsz = igc_rx_bufsz(rx_ring);
│
do { │ do {
if (!ixgbe_alloc_mapped_page(rx_ring, bi)) │ if (!igc_alloc_mapped_page(rx_ring, bi))
break; │ break;
│
/* sync the buffer for use by the device */ │ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, │ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bufsz, │ bi->page_offset, bufsz,
DMA_FROM_DEVICE); │ DMA_FROM_DEVICE);
│
/* │ /* Refresh the desc even if buffer_addrs didn't change
* Refresh the desc even if buffer_addrs didn't change │
* because each write-back erases this info. │ * because each write-back erases this info.
*/ │ */
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); │ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
│
rx_desc++; │ rx_desc++;
bi++; │ bi++;
i++; │ i++;
if (unlikely(!i)) { │ if (unlikely(!i)) {
rx_desc = IXGBE_RX_DESC(rx_ring, 0); │ rx_desc = IGC_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer_info; │ bi = rx_ring->rx_buffer_info;
i -= rx_ring->count; │ i -= rx_ring->count;
} │ }
│
/* clear the length for the next_to_use descriptor */ │ /* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0; │ rx_desc->wb.upper.length = 0;
│
cleaned_count--; │ cleaned_count--;
} while (cleaned_count); │ } while (cleaned_count);
│
i += rx_ring->count; │ i += rx_ring->count;
│
if (rx_ring->next_to_use != i) { │ if (rx_ring->next_to_use != i) {
│ /* record the next descriptor to use */
rx_ring->next_to_use = i; │ rx_ring->next_to_use = i;
│
/* update next to alloc since we have filled the ring */ │ /* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i; │ rx_ring->next_to_alloc = i;
│
/* Force memory writes to complete before letting h/w │ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only │ * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, │ * applicable for weak-ordered memory model archs,
* such as IA-64). │ * such as IA-64).
*/ │ */
wmb(); │ wmb();
writel(i, rx_ring->tail); │ writel(i, rx_ring->tail);
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_dcb.c:514 │ linux/drivers/net/ethernet/intel/i40e/i40e_dcb.c:413
│
struct ice_cee_feat_tlv *sub_tlv; │ u16 len, tlvlen, sublen, typelength;
│ struct i40e_cee_feat_tlv *sub_tlv;
u8 subtype, feat_tlv_count = 0; │ u8 subtype, feat_tlv_count = 0;
u16 len, tlvlen, typelen; │
u32 ouisubtype; │ u32 ouisubtype;
│
ouisubtype = ntohl(tlv->ouisubtype); │ ouisubtype = ntohl(tlv->ouisubtype);
subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> │ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
ICE_LLDP_TLV_SUBTYPE_S); │ I40E_LLDP_TLV_SUBTYPE_SHIFT);
/* Return if not CEE DCBX */ │ /* Return if not CEE DCBX */
if (subtype != ICE_CEE_DCBX_TYPE) │ if (subtype != I40E_CEE_DCBX_TYPE)
return; │ return;
│
typelen = ntohs(tlv->typelen); │ typelength = ntohs(tlv->typelength);
tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); │ tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
len = sizeof(tlv->typelen) + sizeof(ouisubtype) + │ I40E_LLDP_TLV_LEN_SHIFT);
sizeof(struct ice_cee_ctrl_tlv); │ len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
│ sizeof(struct i40e_cee_ctrl_tlv);
/* Return if no CEE DCBX Feature TLVs */ │ /* Return if no CEE DCBX Feature TLVs */
if (tlvlen <= len) │ if (tlvlen <= len)
return; │ return;
│
sub_tlv = (struct ice_cee_feat_tlv *)((char *)tlv + len); │ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
while (feat_tlv_count < ICE_CEE_MAX_FEAT_TYPE) { │ while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
u16 sublen; │ typelength = ntohs(sub_tlv->hdr.typelen);
│ sublen = (u16)((typelength &
typelen = ntohs(sub_tlv->hdr.typelen); │ I40E_LLDP_TLV_LEN_MASK) >>
sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); │ I40E_LLDP_TLV_LEN_SHIFT);
subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >> │ subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
ICE_LLDP_TLV_TYPE_S); │ I40E_LLDP_TLV_TYPE_SHIFT);
switch (subtype) { │ switch (subtype) {
case ICE_CEE_SUBTYPE_PG_CFG: │ case I40E_CEE_SUBTYPE_PG_CFG:
ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); │ i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
break; │ break;
case ICE_CEE_SUBTYPE_PFC_CFG: │ case I40E_CEE_SUBTYPE_PFC_CFG:
ice_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); │ i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
break; │ break;
case ICE_CEE_SUBTYPE_APP_PRI: │ case I40E_CEE_SUBTYPE_APP_PRI:
ice_parse_cee_app_tlv(sub_tlv, dcbcfg); │ i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
break; │ break;
default: │ default:
return; /* Invalid Sub-type return */ │ return; /* Invalid Sub-type return */
} │ }
feat_tlv_count++; │ feat_tlv_count++;
/* Move to next sub TLV */ │ /* Move to next sub TLV */
sub_tlv = (struct ice_cee_feat_tlv *) │ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) + │ sizeof(sub_tlv->hdr.typelen) +
sublen); │ sublen);
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/mac.c:942 │ linux/drivers/net/ethernet/intel/igb/e1000_mac.c:838
│
u32 ctrl; │ u32 ctrl;
│ s32 ret_val = 0;
│
ctrl = er32(CTRL); │ ctrl = rd32(E1000_CTRL);
│
/* Because we didn't get link via the internal auto-negotiation │ /* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY │ * mechanism (we either forced link or we got link via PHY
* auto-neg), we have to manually enable/disable transmit an │ * auto-neg), we have to manually enable/disable transmit an
* receive flow control. │ * receive flow control.
* │ *
* The "Case" statement below enables/disable flow control │ * The "Case" statement below enables/disable flow control
* according to the "hw->fc.current_mode" parameter. │ * according to the "hw->fc.current_mode" parameter.
* │ *
* The possible values of the "fc" parameter are: │ * The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled │ * 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause │ * 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames). │ * frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames │ * 2: Tx flow control is enabled (we can send pause frames
* frames but we do not receive pause frames). │ * frames but we do not receive pause frames).
* 3: Both Rx and Tx flow control (symmetric) is enabled. │ * 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point. │ * other: No other values should be possible at this point.
*/ │ */
e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); │ hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
│
switch (hw->fc.current_mode) { │ switch (hw->fc.current_mode) {
case e1000_fc_none: │ case e1000_fc_none:
ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); │ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
break; │ break;
case e1000_fc_rx_pause: │ case e1000_fc_rx_pause:
ctrl &= (~E1000_CTRL_TFCE); │ ctrl &= (~E1000_CTRL_TFCE);
ctrl |= E1000_CTRL_RFCE; │ ctrl |= E1000_CTRL_RFCE;
break; │ break;
case e1000_fc_tx_pause: │ case e1000_fc_tx_pause:
ctrl &= (~E1000_CTRL_RFCE); │ ctrl &= (~E1000_CTRL_RFCE);
ctrl |= E1000_CTRL_TFCE; │ ctrl |= E1000_CTRL_TFCE;
break; │ break;
case e1000_fc_full: │ case e1000_fc_full:
ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); │ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
break; │ break;
default: │ default:
e_dbg("Flow control param set incorrectly\n"); │ hw_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG; │ ret_val = -E1000_ERR_CONFIG;
│ goto out;
} │ }
│
ew32(CTRL, ctrl); │ wr32(E1000_CTRL, ctrl);
│
return 0; │ out:
│ return ret_val;
} │
next prev up linux/drivers/net/ethernet/agere/et131x.c:1171 │ linux/drivers/net/ethernet/agere/et131x.c:1240
│
struct mac_regs __iomem *mac = &adapter->regs->mac; │ struct mac_regs __iomem *mac = &adapter->regs->mac;
int status = 0; │ int status = 0;
u32 delay = 0; │ u32 delay = 0;
u32 mii_addr; │ u32 mii_addr;
u32 mii_cmd; │ u32 mii_cmd;
u32 mii_indicator; │ u32 mii_indicator;
│
/* Save a local copy of the registers we are dealing with so we can │ /* Save a local copy of the registers we are dealing with so we can
* set them back │ * set them back
*/ │ */
mii_addr = readl(&mac->mii_mgmt_addr); │ mii_addr = readl(&mac->mii_mgmt_addr);
mii_cmd = readl(&mac->mii_mgmt_cmd); │ mii_cmd = readl(&mac->mii_mgmt_cmd);
│
/* Stop the current operation */ │ /* Stop the current operation */
writel(0, &mac->mii_mgmt_cmd); │ writel(0, &mac->mii_mgmt_cmd);
│
/* Set up the register we need to read from on the correct PHY */ │ /* Set up the register we need to write to on the correct PHY */
writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); │ writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
│
writel(0x1, &mac->mii_mgmt_cmd); │ /* Add the value to write to the registers to the mac */
│ writel(value, &mac->mii_mgmt_ctrl);
│
do { │ do {
udelay(50); │ udelay(50);
delay++; │ delay++;
mii_indicator = readl(&mac->mii_mgmt_indicator); │ mii_indicator = readl(&mac->mii_mgmt_indicator);
} while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50); │ } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
│
│ /* If we hit the max delay, we could not write the register */
│ if (delay == 100) {
│ u16 tmp;
│
/* If we hit the max delay, we could not read the register */ │
if (delay == 50) { │
dev_warn(&adapter->pdev->dev, │ dev_warn(&adapter->pdev->dev,
"reg 0x%08x could not be read\n", reg); │ "reg 0x%08x could not be written", reg);
dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", │ dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
mii_indicator); │ mii_indicator);
│ dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
│ readl(&mac->mii_mgmt_cmd));
│
│ et131x_mii_read(adapter, reg, &tmp);
│
status = -EIO; │ status = -EIO;
goto out; │
} │ }
│ /* Stop the write operation */
/* If we hit here we were able to read the register and we need to │
* return the value to the caller │
*/ │
*value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK; │
│
out: │
/* Stop the read operation */ │
writel(0, &mac->mii_mgmt_cmd); │ writel(0, &mac->mii_mgmt_cmd);
│
/* set the registers we touched back to the state at which we entered │ /* set the registers we touched back to the state at which we entered
* this function │ * this function
*/ │ */
writel(mii_addr, &mac->mii_mgmt_addr); │ writel(mii_addr, &mac->mii_mgmt_addr);
writel(mii_cmd, &mac->mii_mgmt_cmd); │ writel(mii_cmd, &mac->mii_mgmt_cmd);
│
return status; │ return status;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/mac.c:1467 │ linux/drivers/net/ethernet/intel/igb/e1000_mac.c:1396
│
struct e1000_mac_info *mac = &hw->mac; │ struct e1000_mac_info *mac = &hw->mac;
s32 ret_val; │ s32 ret_val;
const u32 ledctl_mask = 0x000000FF; │ const u32 ledctl_mask = 0x000000FF;
const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; │ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; │ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
u16 data, i, temp; │ u16 data, i, temp;
const u16 led_mask = 0x0F; │ const u16 led_mask = 0x0F;
│
ret_val = hw->nvm.ops.valid_led_default(hw, &data); │ /* i210 and i211 devices have different LED mechanism */
│ if ((hw->mac.type == e1000_i210) ||
│ (hw->mac.type == e1000_i211))
│ ret_val = igb_valid_led_default_i210(hw, &data);
│ else
│ ret_val = igb_valid_led_default(hw, &data);
│
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
mac->ledctl_default = er32(LEDCTL); │ mac->ledctl_default = rd32(E1000_LEDCTL);
mac->ledctl_mode1 = mac->ledctl_default; │ mac->ledctl_mode1 = mac->ledctl_default;
mac->ledctl_mode2 = mac->ledctl_default; │ mac->ledctl_mode2 = mac->ledctl_default;
│
for (i = 0; i < 4; i++) { │ for (i = 0; i < 4; i++) {
temp = (data >> (i << 2)) & led_mask; │ temp = (data >> (i << 2)) & led_mask;
switch (temp) { │ switch (temp) {
case ID_LED_ON1_DEF2: │ case ID_LED_ON1_DEF2:
case ID_LED_ON1_ON2: │ case ID_LED_ON1_ON2:
case ID_LED_ON1_OFF2: │ case ID_LED_ON1_OFF2:
mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); │ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
mac->ledctl_mode1 |= ledctl_on << (i << 3); │ mac->ledctl_mode1 |= ledctl_on << (i << 3);
break; │ break;
case ID_LED_OFF1_DEF2: │ case ID_LED_OFF1_DEF2:
case ID_LED_OFF1_ON2: │ case ID_LED_OFF1_ON2:
case ID_LED_OFF1_OFF2: │ case ID_LED_OFF1_OFF2:
mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); │ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
mac->ledctl_mode1 |= ledctl_off << (i << 3); │ mac->ledctl_mode1 |= ledctl_off << (i << 3);
break; │ break;
default: │ default:
/* Do nothing */ │ /* Do nothing */
break; │ break;
} │ }
switch (temp) { │ switch (temp) {
case ID_LED_DEF1_ON2: │ case ID_LED_DEF1_ON2:
case ID_LED_ON1_ON2: │ case ID_LED_ON1_ON2:
case ID_LED_OFF1_ON2: │ case ID_LED_OFF1_ON2:
mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); │ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
mac->ledctl_mode2 |= ledctl_on << (i << 3); │ mac->ledctl_mode2 |= ledctl_on << (i << 3);
break; │ break;
case ID_LED_DEF1_OFF2: │ case ID_LED_DEF1_OFF2:
case ID_LED_ON1_OFF2: │ case ID_LED_ON1_OFF2:
case ID_LED_OFF1_OFF2: │ case ID_LED_OFF1_OFF2:
mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); │ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
mac->ledctl_mode2 |= ledctl_off << (i << 3); │ mac->ledctl_mode2 |= ledctl_off << (i << 3);
break; │ break;
default: │ default:
/* Do nothing */ │ /* Do nothing */
break; │ break;
} │ }
} │ }
│
return 0; │ out:
│ return ret_val;
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/selftest.c:527 │ linux/drivers/net/ethernet/sfc/selftest.c:525
│
struct ef4_nic *efx = tx_queue->efx; │ struct efx_nic *efx = tx_queue->efx;
struct ef4_loopback_state *state = efx->loopback_selftest; │ struct efx_loopback_state *state = efx->loopback_selftest;
int i, begin_rc, end_rc; │ int i, begin_rc, end_rc;
│
for (i = 0; i < 3; i++) { │ for (i = 0; i < 3; i++) {
/* Determine how many packets to send */ │ /* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3; │ state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count); │ state->packet_count = min(1 << (i << 2), state->packet_count);
state->skbs = kcalloc(state->packet_count, │ state->skbs = kcalloc(state->packet_count,
sizeof(state->skbs[0]), GFP_KERNEL); │ sizeof(state->skbs[0]), GFP_KERNEL);
if (!state->skbs) │ if (!state->skbs)
return -ENOMEM; │ return -ENOMEM;
state->flush = false; │ state->flush = false;
│
netif_dbg(efx, drv, efx->net_dev, │ netif_dbg(efx, drv, efx->net_dev,
"TX queue %d testing %s loopback with %d packets\n", │ "TX queue %d (hw %d) testing %s loopback with %d packets\n",
tx_queue->queue, LOOPBACK_MODE(efx), │ tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count); │ state->packet_count);
│
ef4_iterate_state(efx); │ efx_iterate_state(efx);
begin_rc = ef4_begin_loopback(tx_queue); │ begin_rc = efx_begin_loopback(tx_queue);
│
/* This will normally complete very quickly, but be │ /* This will normally complete very quickly, but be
* prepared to wait much longer. */ │ * prepared to wait much longer. */
msleep(1); │ msleep(1);
if (!ef4_poll_loopback(efx)) { │ if (!efx_poll_loopback(efx)) {
msleep(LOOPBACK_TIMEOUT_MS); │ msleep(LOOPBACK_TIMEOUT_MS);
ef4_poll_loopback(efx); │ efx_poll_loopback(efx);
} │ }
│
end_rc = ef4_end_loopback(tx_queue, lb_tests); │ end_rc = efx_end_loopback(tx_queue, lb_tests);
kfree(state->skbs); │ kfree(state->skbs);
│
if (begin_rc || end_rc) { │ if (begin_rc || end_rc) {
/* Wait a while to ensure there are no packets │ /* Wait a while to ensure there are no packets
* floating around after a failure. */ │ * floating around after a failure. */
schedule_timeout_uninterruptible(HZ / 10); │ schedule_timeout_uninterruptible(HZ / 10);
return begin_rc ? begin_rc : end_rc; │ return begin_rc ? begin_rc : end_rc;
} │ }
} │ }
│
netif_dbg(efx, drv, efx->net_dev, │ netif_dbg(efx, drv, efx->net_dev,
"TX queue %d passed %s loopback test with a burst length " │ "TX queue %d passed %s loopback test with a burst length "
"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), │ "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
state->packet_count); │ state->packet_count);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c:1637 │ linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c:1759
│
if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) │ j = lio->linfo.txpciq[vj].s.q_no;
continue; │
/*packets to network port*/ │
/*# of packets tx to network */ │
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); │
/*# of bytes tx to network */ │
data[i++] = │
CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); │
/*# of packets dropped */ │
data[i++] = │
CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); │
/*# of tx fails due to queue full */ │
data[i++] = │
CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); │
/*XXX gather entries sent */ │
data[i++] = │
CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); │
│
/*instruction to firmware: data and control */ │ /* packets to network port */
/*# of instructions to the queue */ │ /* # of packets tx to network */
data[i++] = │ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); │ /* # of bytes tx to network */
/*# of instructions processed */ │
data[i++] = CVM_CAST64( │ data[i++] = CVM_CAST64(
oct_dev->instr_queue[j]->stats.instr_processed); │ oct_dev->instr_queue[j]->stats.tx_tot_bytes);
/*# of instructions could not be processed */ │ /* # of packets dropped */
data[i++] = CVM_CAST64( │ data[i++] = CVM_CAST64(
oct_dev->instr_queue[j]->stats.instr_dropped); │ oct_dev->instr_queue[j]->stats.tx_dropped);
/*bytes sent through the queue */ │ /* # of tx fails due to queue full */
data[i++] = │ data[i++] = CVM_CAST64(
CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); │ oct_dev->instr_queue[j]->stats.tx_iq_busy);
│ /* XXX gather entries sent */
│ data[i++] = CVM_CAST64(
│ oct_dev->instr_queue[j]->stats.sgentry_sent);
│
/*tso request*/ │ /* instruction to firmware: data and control */
│ /* # of instructions to the queue */
│ data[i++] = CVM_CAST64(
│ oct_dev->instr_queue[j]->stats.instr_posted);
│ /* # of instructions processed */
│ data[i++] =
│ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
│ /* # of instructions could not be processed */
│ data[i++] =
│ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
│ /* bytes sent through the queue */
│ data[i++] = CVM_CAST64(
│ oct_dev->instr_queue[j]->stats.bytes_sent);
│ /* tso request */
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); │ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
/*vxlan request*/ │ /* vxlan request */
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); │ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
/*txq restart*/ │ /* txq restart */
data[i++] = │ data[i++] = CVM_CAST64(
CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); │ oct_dev->instr_queue[j]->stats.tx_restart);
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7677 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:8594
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
│ u8 byte2;
│ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
│ __le16 word1;
│ __le16 word2;
│ __le16 word3;
│ __le16 word4;
│ __le32 reg2;
│ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7677 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7751
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7677 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7714
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/intel/i40e/i40e_nvm.c:223 │ linux/drivers/net/ethernet/intel/i40e/i40e_nvm.c:550
│
i40e_status ret_code = I40E_ERR_NVM; │ i40e_status ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details; │ struct i40e_asq_cmd_details cmd_details;
│
memset(&cmd_details, 0, sizeof(cmd_details)); │ memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc; │ cmd_details.wb_desc = &hw->nvm_wb_desc;
│
/* Here we are checking the SR limit only for the flat memory model. │ /* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire │ * We cannot do it for the module-based model, as we did not acquire
* the NVM resource yet (we cannot get the module pointer value). │ * the NVM resource yet (we cannot get the module pointer value).
* Firmware will check the module-based model. │ * Firmware will check the module-based model.
*/ │ */
if ((offset + words) > hw->nvm.sr_size) │ if ((offset + words) > hw->nvm.sr_size)
i40e_debug(hw, I40E_DEBUG_NVM, │ i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: offset %d beyond Shadow RAM limit %d\n", │ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
(offset + words), hw->nvm.sr_size); │ (offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) │ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */ │ /* We can write only up to 4KB (one sector), in one AQ write */
i40e_debug(hw, I40E_DEBUG_NVM, │ i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write fail error: tried to write %d words, limit is %d.\ │ "NVM write fail error: tried to write %d words, limit is %d.\
words, I40E_SR_SECTOR_SIZE_IN_WORDS); │ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) │ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) │ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */ │ /* A single write cannot spread over two sectors */
i40e_debug(hw, I40E_DEBUG_NVM, │ i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: cannot spread over two sectors in a single │ "NVM write error: cannot spread over two sectors in a single
offset, words); │ offset, words);
else │ else
ret_code = i40e_aq_read_nvm(hw, module_pointer, │ ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/ │ 2 * offset, /*bytes*/
2 * words, /*bytes*/ │ 2 * words, /*bytes*/
data, last_command, &cmd_details); │ data, last_command, 0,
│ &cmd_details);
│
return ret_code; │ return ret_code;
} │
next prev up linux/drivers/net/ethernet/atheros/atl1e/atl1e_main.c:1153 │ linux/drivers/net/ethernet/atheros/atl1c/atl1c_main.c:1557
│
struct atl1e_adapter *adapter = netdev_priv(netdev); │ struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; │ struct atl1c_hw_stats *hw_stats = &adapter->hw_stats;
struct net_device_stats *net_stats = &netdev->stats; │ struct net_device_stats *net_stats = &netdev->stats;
│
│ atl1c_update_hw_stats(adapter);
net_stats->rx_bytes = hw_stats->rx_byte_cnt; │ net_stats->rx_bytes = hw_stats->rx_byte_cnt;
net_stats->tx_bytes = hw_stats->tx_byte_cnt; │ net_stats->tx_bytes = hw_stats->tx_byte_cnt;
net_stats->multicast = hw_stats->rx_mcast; │ net_stats->multicast = hw_stats->rx_mcast;
net_stats->collisions = hw_stats->tx_1_col + │ net_stats->collisions = hw_stats->tx_1_col +
hw_stats->tx_2_col + │ hw_stats->tx_2_col +
hw_stats->tx_late_col + │ hw_stats->tx_late_col +
hw_stats->tx_abort_col; │ hw_stats->tx_abort_col;
│
net_stats->rx_errors = hw_stats->rx_frag + │ net_stats->rx_errors = hw_stats->rx_frag +
hw_stats->rx_fcs_err + │ hw_stats->rx_fcs_err +
hw_stats->rx_len_err + │ hw_stats->rx_len_err +
hw_stats->rx_sz_ov + │ hw_stats->rx_sz_ov +
hw_stats->rx_rrd_ov + │ hw_stats->rx_rrd_ov +
hw_stats->rx_align_err + │ hw_stats->rx_align_err +
hw_stats->rx_rxf_ov; │ hw_stats->rx_rxf_ov;
│
net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; │ net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
net_stats->rx_length_errors = hw_stats->rx_len_err; │ net_stats->rx_length_errors = hw_stats->rx_len_err;
net_stats->rx_crc_errors = hw_stats->rx_fcs_err; │ net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
net_stats->rx_frame_errors = hw_stats->rx_align_err; │ net_stats->rx_frame_errors = hw_stats->rx_align_err;
net_stats->rx_dropped = hw_stats->rx_rrd_ov; │ net_stats->rx_dropped = hw_stats->rx_rrd_ov;
│
net_stats->tx_errors = hw_stats->tx_late_col + │ net_stats->tx_errors = hw_stats->tx_late_col +
hw_stats->tx_abort_col + │ hw_stats->tx_abort_col +
hw_stats->tx_underrun + │ hw_stats->tx_underrun +
hw_stats->tx_trunc; │ hw_stats->tx_trunc;
│
net_stats->tx_fifo_errors = hw_stats->tx_underrun; │ net_stats->tx_fifo_errors = hw_stats->tx_underrun;
net_stats->tx_aborted_errors = hw_stats->tx_abort_col; │ net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col; │ net_stats->tx_window_errors = hw_stats->tx_late_col;
│
net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; │ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; │ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
│
return net_stats; │ return net_stats;
} │
next prev up linux/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h:5429 │ linux/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h:5402
│
│ __le16 error_code;
__le16 req_type; │ __le16 req_type;
__le16 cmpl_ring; │
__le16 seq_id; │ __le16 seq_id;
__le16 target_id; │ __le16 resp_len;
__le64 resp_addr; │
__le32 flags; │ __le32 flags;
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL │ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
__le16 port_id; │ u8 unused_0[3];
u8 unused_0[2]; │ u8 valid;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:1883 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c:2066
│
struct igb_ring *tx_ring = &adapter->test_tx_ring; │ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
struct igb_ring *rx_ring = &adapter->test_rx_ring; │ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
u16 i, j, lc, good_cnt; │ int i, j, lc, good_cnt, ret_val = 0;
int ret_val = 0; │ unsigned int size = 1024;
unsigned int size = IGB_RX_HDR_LEN; │
netdev_tx_t tx_ret_val; │ netdev_tx_t tx_ret_val;
struct sk_buff *skb; │ struct sk_buff *skb;
│ u32 flags_orig = adapter->flags;
│
│ /* DCB can modify the frames on Tx */
│ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
│
/* allocate test skb */ │ /* allocate test skb */
skb = alloc_skb(size, GFP_KERNEL); │ skb = alloc_skb(size, GFP_KERNEL);
if (!skb) │ if (!skb)
return 11; │ return 11;
│
/* place data into test skb */ │ /* place data into test skb */
igb_create_lbtest_frame(skb, size); │ ixgbe_create_lbtest_frame(skb, size);
skb_put(skb, size); │ skb_put(skb, size);
│
/* Calculate the loop count based on the largest descriptor ring │ /*
│ * Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64 │ * The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop │ * send/receive pairs during each loop
*/ │ */
│
if (rx_ring->count <= tx_ring->count) │ if (rx_ring->count <= tx_ring->count)
lc = ((tx_ring->count / 64) * 2) + 1; │ lc = ((tx_ring->count / 64) * 2) + 1;
else │ else
lc = ((rx_ring->count / 64) * 2) + 1; │ lc = ((rx_ring->count / 64) * 2) + 1;
│
for (j = 0; j <= lc; j++) { /* loop count loop */ │ for (j = 0; j <= lc; j++) {
/* reset count of good packets */ │ /* reset count of good packets */
good_cnt = 0; │ good_cnt = 0;
│
/* place 64 packets on the transmit queue*/ │ /* place 64 packets on the transmit queue*/
for (i = 0; i < 64; i++) { │ for (i = 0; i < 64; i++) {
skb_get(skb); │ skb_get(skb);
tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); │ tx_ret_val = ixgbe_xmit_frame_ring(skb,
│ adapter,
│ tx_ring);
if (tx_ret_val == NETDEV_TX_OK) │ if (tx_ret_val == NETDEV_TX_OK)
good_cnt++; │ good_cnt++;
} │ }
│
if (good_cnt != 64) { │ if (good_cnt != 64) {
ret_val = 12; │ ret_val = 12;
break; │ break;
} │ }
│
/* allow 200 milliseconds for packets to go from Tx to Rx */ │ /* allow 200 milliseconds for packets to go from Tx to Rx */
msleep(200); │ msleep(200);
│
good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); │ good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
if (good_cnt != 64) { │ if (good_cnt != 64) {
ret_val = 13; │ ret_val = 13;
break; │ break;
} │ }
} /* end loop count loop */ │ }
│
/* free the original skb */ │ /* free the original skb */
kfree_skb(skb); │ kfree_skb(skb);
│ adapter->flags = flags_orig;
│
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/nvidia/forcedeth.c:2283 │ linux/drivers/net/ethernet/nvidia/forcedeth.c:2461
│
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; │ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 frag_size = skb_frag_size(frag); │ u32 frag_size = skb_frag_size(frag);
offset = 0; │ offset = 0;
│
do { │ do {
│ bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE :
if (!start_tx_ctx) │ if (!start_tx_ctx)
start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; │ start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
│
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : │
np->put_tx_ctx->dma = skb_frag_dma_map( │ np->put_tx_ctx->dma = skb_frag_dma_map(
&np->pci_dev->dev, │ &np->pci_dev->dev,
frag, offset, │ frag, offset,
bcnt, │ bcnt,
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
│
if (unlikely(dma_mapping_error(&np->pci_dev->dev, │ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
np->put_tx_ctx->dma))) { │ np->put_tx_ctx->dma))) {
│
/* Unwind the mapped fragments */ │ /* Unwind the mapped fragments */
do { │ do {
nv_unmap_txskb(np, start_tx_ctx); │ nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) │ if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->tx_skb; │ tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx); │ } while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb); │ dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx; │ np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp); │ u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped); │ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp); │ u64_stats_update_end(&np->swstats_tx_syncp);
│
ret = NETDEV_TX_OK; │ ret = NETDEV_TX_OK;
│
goto dma_error; │ goto dma_error;
} │ }
│
np->put_tx_ctx->dma_len = bcnt; │ np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0; │ np->put_tx_ctx->dma_single = 0;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); │ put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
│ put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); │ put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
│
offset += bcnt; │ offset += bcnt;
frag_size -= bcnt; │ frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig)) │ if (unlikely(put_tx++ == np->last_tx.ex))
put_tx = np->tx_ring.orig; │ put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) │ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->tx_skb; │ np->put_tx_ctx = np->tx_skb;
} while (frag_size); │ } while (frag_size);
} │
next prev up linux/drivers/net/ethernet/ni/nixge.c:699 │ linux/drivers/net/ethernet/ni/nixge.c:743
│
struct nixge_priv *priv = netdev_priv(_ndev); │ struct nixge_priv *priv = netdev_priv(_ndev);
struct net_device *ndev = _ndev; │ struct net_device *ndev = _ndev;
unsigned int status; │ unsigned int status;
dma_addr_t phys; │ dma_addr_t phys;
u32 cr; │ u32 cr;
│
status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET); │ status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { │ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); │ /* Turn of IRQs because NAPI */
nixge_start_xmit_done(priv->ndev); │ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
│ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
│ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
│ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
│
│ if (napi_schedule_prep(&priv->napi))
│ __napi_schedule(&priv->napi);
goto out; │ goto out;
} │ }
if (!(status & XAXIDMA_IRQ_ALL_MASK)) { │ if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
netdev_err(ndev, "No interrupts asserted in Tx path\n"); │ netdev_err(ndev, "No interrupts asserted in Rx path\n");
return IRQ_NONE; │ return IRQ_NONE;
} │ }
if (status & XAXIDMA_IRQ_ERROR_MASK) { │ if (status & XAXIDMA_IRQ_ERROR_MASK) {
phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci], │ phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci],
phys); │ phys);
│ netdev_err(ndev, "DMA Rx error 0x%x\n", status);
netdev_err(ndev, "DMA Tx error 0x%x\n", status); │
netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); │ netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
│
cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); │ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */ │ /* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK); │ cr &= (~XAXIDMA_IRQ_ALL_MASK);
/* Write to the Tx channel control register */ │ /* Finally write to the Tx channel control register */
nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); │ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
│
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); │ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */ │ /* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK); │ cr &= (~XAXIDMA_IRQ_ALL_MASK);
/* Write to the Rx channel control register */ │ /* write to the Rx channel control register */
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); │ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
│
tasklet_schedule(&priv->dma_err_tasklet); │ tasklet_schedule(&priv->dma_err_tasklet);
nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); │ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
} │ }
out: │ out:
return IRQ_HANDLED; │ return IRQ_HANDLED;
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c:640 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c:743
│
char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN]; │ char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); │ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
char *result[ARRAY_SIZE(rx_queue_info_items)]; │ char *result[ARRAY_SIZE(tx_queue_info_items)];
struct hns3_nic_priv *priv = h->priv; │ struct hns3_nic_priv *priv = h->priv;
char content[HNS3_DBG_INFO_LEN]; │ char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring; │ struct hns3_enet_ring *ring;
int pos = 0; │ int pos = 0;
u32 i; │ u32 i;
│
if (!priv->ring) { │ if (!priv->ring) {
dev_err(&h->pdev->dev, "priv->ring is NULL\n"); │ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
return -EFAULT; │ return -EFAULT;
} │ }
│
for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++) │ for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++)
result[i] = &data_str[i][0]; │ result[i] = &data_str[i][0];
│
hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items, │ hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items,
NULL, ARRAY_SIZE(rx_queue_info_items)); │ NULL, ARRAY_SIZE(tx_queue_info_items));
pos += scnprintf(buf + pos, len - pos, "%s", content); │ pos += scnprintf(buf + pos, len - pos, "%s", content);
│
for (i = 0; i < h->kinfo.num_tqps; i++) { │ for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset, │ /* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure │ * to prevent reference to invalid memory. And need to ensure
* that the following code is executed within 100ms. │ * that the following code is executed within 100ms.
*/ │ */
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || │ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) │ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM; │ return -EPERM;
│
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; │ ring = &priv->ring[i];
hns3_dump_rx_queue_info(ring, ae_dev, result, i); │ hns3_dump_tx_queue_info(ring, ae_dev, result, i);
hns3_dbg_fill_content(content, sizeof(content), │ hns3_dbg_fill_content(content, sizeof(content),
rx_queue_info_items, │ tx_queue_info_items,
(const char **)result, │ (const char **)result,
ARRAY_SIZE(rx_queue_info_items)); │ ARRAY_SIZE(tx_queue_info_items));
pos += scnprintf(buf + pos, len - pos, "%s", content); │ pos += scnprintf(buf + pos, len - pos, "%s", content);
} │ }
│
│ hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10813 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7751
│
u8 reserved; │ u8 byte0;
u8 state; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10813 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10888
│
u8 reserved; │ u8 byte0;
u8 state; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
│ u8 byte2;
│ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
│ __le16 word1;
│ __le16 word2;
│ __le16 word3;
│ __le16 word4;
│ __le32 reg2;
│ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10813 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7714
│
u8 reserved; │ u8 byte0;
u8 state; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10813 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7677
│
u8 reserved; │ u8 byte0;
u8 state; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c:1991 │ linux/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c:2307
│
struct mss_egress_ctl_register ctl_reg; │ struct mss_ingress_ctl_register ctl_reg;
int ret; │ int ret;
│
memset(&ctl_reg, 0, sizeof(ctl_reg)); │ memset(&ctl_reg, 0, sizeof(ctl_reg));
│
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1, MSS_EGRESS_CTL_REGISTER_ADDR, │ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
&ctl_reg.word_0); │ MSS_INGRESS_CTL_REGISTER_ADDR, &ctl_reg.word_0);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR + 4, │ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
&ctl_reg.word_1); │ &ctl_reg.word_1);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
│
/* Toggle the Egress MIB clear bit 0->1->0 */ │ /* Toggle the Ingress MIB clear bit 0->1->0 */
ctl_reg.bits_0.clear_counter = 0; │ ctl_reg.bits_0.clear_count = 0;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0); │ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR + 4, │ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
ctl_reg.word_1); │ ctl_reg.word_1);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
│
ctl_reg.bits_0.clear_counter = 1; │ ctl_reg.bits_0.clear_count = 1;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0); │ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR + 4, │ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
ctl_reg.word_1); │ ctl_reg.word_1);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
│
ctl_reg.bits_0.clear_counter = 0; │ ctl_reg.bits_0.clear_count = 0;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0); │ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_CTL_REGISTER_ADDR + 4, │ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
ctl_reg.word_1); │ ctl_reg.word_1);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx4/port.c:843 │ linux/drivers/net/ethernet/mellanox/mlx4/port.c:950
│
struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table; │ struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table; │ struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
int ret = 0; │ int ret = 0;
int i; │ int i;
bool update1 = false; │ bool update1 = false;
bool update2 = false; │ bool update2 = false;
│
mutex_lock(&t1->mutex); │ mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex); │ mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { │ for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if ((t1->entries[i] != t2->entries[i]) && │ if ((t1->entries[i] != t2->entries[i]) &&
t1->entries[i] && t2->entries[i]) { │ t1->entries[i] && t2->entries[i]) {
mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i); │ mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
ret = -EINVAL; │ ret = -EINVAL;
goto unlock; │ goto unlock;
} │ }
} │ }
│
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { │ for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if (t1->entries[i] && !t2->entries[i]) { │ if (t1->entries[i] && !t2->entries[i]) {
t2->entries[i] = t1->entries[i]; │ t2->entries[i] = t1->entries[i];
t2->is_dup[i] = true; │ t2->is_dup[i] = true;
update2 = true; │ update2 = true;
} else if (!t1->entries[i] && t2->entries[i]) { │ } else if (!t1->entries[i] && t2->entries[i]) {
t1->entries[i] = t2->entries[i]; │ t1->entries[i] = t2->entries[i];
t1->is_dup[i] = true; │ t1->is_dup[i] = true;
update1 = true; │ update1 = true;
} else if (t1->entries[i] && t2->entries[i]) { │ } else if (t1->entries[i] && t2->entries[i]) {
t1->is_dup[i] = true; │ t1->is_dup[i] = true;
t2->is_dup[i] = true; │ t2->is_dup[i] = true;
} │ }
} │ }
│
if (update1) { │ if (update1) {
ret = mlx4_set_port_mac_table(dev, 1, t1->entries); │ ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
if (ret) │ if (ret)
mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret) │ mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret
} │ }
if (!ret && update2) { │ if (!ret && update2) {
ret = mlx4_set_port_mac_table(dev, 2, t2->entries); │ ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
if (ret) │ if (ret)
mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret) │ mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret
} │ }
│
if (ret) │ if (ret)
mlx4_warn(dev, "failed to create mirror MAC tables\n"); │ mlx4_warn(dev, "failed to create mirror VLAN tables\n");
unlock: │ unlock:
mutex_unlock(&t2->mutex); │ mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex); │ mutex_unlock(&t1->mutex);
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_type.h:979 │ linux/drivers/net/ethernet/intel/i40e/i40e_type.h:1186
│
/* eth stats collected by the port */ │ /* eth stats collected by the port */
struct ice_eth_stats eth; │ struct i40e_eth_stats eth;
│
/* additional port specific stats */ │ /* additional port specific stats */
u64 tx_dropped_link_down; /* tdold */ │ u64 tx_dropped_link_down; /* tdold */
u64 crc_errors; /* crcerrs */ │ u64 crc_errors; /* crcerrs */
u64 illegal_bytes; /* illerrc */ │ u64 illegal_bytes; /* illerrc */
u64 error_bytes; /* errbc */ │ u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */ │ u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */ │ u64 mac_remote_faults; /* mrfc */
u64 rx_len_errors; /* rlec */ │ u64 rx_length_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */ │ u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */ │ u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */ │
u64 link_xoff_tx; /* lxofftxc */ │
u64 priority_xon_rx[8]; /* pxonrxc[8] */ │ u64 priority_xon_rx[8]; /* pxonrxc[8] */
u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ │ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
│ u64 link_xon_tx; /* lxontxc */
│ u64 link_xoff_tx; /* lxofftxc */
u64 priority_xon_tx[8]; /* pxontxc[8] */ │ u64 priority_xon_tx[8]; /* pxontxc[8] */
u64 priority_xoff_tx[8]; /* pxofftxc[8] */ │ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ │ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
u64 rx_size_64; /* prc64 */ │ u64 rx_size_64; /* prc64 */
u64 rx_size_127; /* prc127 */ │ u64 rx_size_127; /* prc127 */
u64 rx_size_255; /* prc255 */ │ u64 rx_size_255; /* prc255 */
u64 rx_size_511; /* prc511 */ │ u64 rx_size_511; /* prc511 */
u64 rx_size_1023; /* prc1023 */ │ u64 rx_size_1023; /* prc1023 */
u64 rx_size_1522; /* prc1522 */ │ u64 rx_size_1522; /* prc1522 */
u64 rx_size_big; /* prc9522 */ │ u64 rx_size_big; /* prc9522 */
u64 rx_undersize; /* ruc */ │ u64 rx_undersize; /* ruc */
u64 rx_fragments; /* rfc */ │ u64 rx_fragments; /* rfc */
u64 rx_oversize; /* roc */ │ u64 rx_oversize; /* roc */
u64 rx_jabber; /* rjc */ │ u64 rx_jabber; /* rjc */
u64 tx_size_64; /* ptc64 */ │ u64 tx_size_64; /* ptc64 */
u64 tx_size_127; /* ptc127 */ │ u64 tx_size_127; /* ptc127 */
u64 tx_size_255; /* ptc255 */ │ u64 tx_size_255; /* ptc255 */
u64 tx_size_511; /* ptc511 */ │ u64 tx_size_511; /* ptc511 */
u64 tx_size_1023; /* ptc1023 */ │ u64 tx_size_1023; /* ptc1023 */
u64 tx_size_1522; /* ptc1522 */ │ u64 tx_size_1522; /* ptc1522 */
u64 tx_size_big; /* ptc9522 */ │ u64 tx_size_big; /* ptc9522 */
│ u64 mac_short_packet_dropped; /* mspdc */
│ u64 checksum_error; /* xec */
/* flow director stats */ │ /* flow director stats */
u32 fd_sb_status; │ u64 fd_atr_match;
u64 fd_sb_match; │ u64 fd_sb_match;
│ u64 fd_atr_tunnel_match;
│ u32 fd_atr_status;
│ u32 fd_sb_status;
│ /* EEE LPI */
│ u32 tx_lpi_status;
│ u32 rx_lpi_status;
│ u64 tx_lpi_count; /* etlpic */
│ u64 rx_lpi_count; /* erlpic */
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_lib.c:3833 │ linux/drivers/net/ethernet/intel/ice/ice_lib.c:3894
│
struct ice_pf *pf = vsi->back; │ struct ice_pf *pf = vsi->back;
struct device *dev; │ struct device *dev;
int status; │ int status;
int speed; │ int speed;
│
dev = ice_pf_to_dev(pf); │ dev = ice_pf_to_dev(pf);
if (!vsi->port_info) { │ if (!vsi->port_info) {
dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", │ dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
vsi->idx, vsi->type); │ vsi->idx, vsi->type);
return -EINVAL; │ return -EINVAL;
} │ }
│
speed = ice_get_link_speed_kbps(vsi); │ speed = ice_get_link_speed_kbps(vsi);
if (min_tx_rate > (u64)speed) { │ if (max_tx_rate > (u64)speed) {
dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is great │ dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is great
min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, │ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
speed); │ speed);
return -EINVAL; │ return -EINVAL;
} │ }
│
/* Configure min BW for VSI limit */ │ /* Configure max BW for VSI limit */
if (min_tx_rate) { │ if (max_tx_rate) {
status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, │ status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
ICE_MIN_BW, min_tx_rate); │ ICE_MAX_BW, max_tx_rate);
if (status) { │ if (status) {
dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", │ dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n"
min_tx_rate, ice_vsi_type_str(vsi->type), │ max_tx_rate, ice_vsi_type_str(vsi->type),
vsi->idx); │ vsi->idx);
return status; │ return status;
} │ }
│
dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", │ dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
min_tx_rate, ice_vsi_type_str(vsi->type)); │ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
} else { │ } else {
status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, │ status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
vsi->idx, 0, │ vsi->idx, 0,
ICE_MIN_BW); │ ICE_MAX_BW);
if (status) { │ if (status) {
dev_err(dev, "failed to clear min Tx rate configuration for %s % │ dev_err(dev, "failed clearing max Tx rate configuration for %s %
ice_vsi_type_str(vsi->type), vsi->idx); │ ice_vsi_type_str(vsi->type), vsi->idx);
return status; │ return status;
} │ }
│
dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", │ dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
ice_vsi_type_str(vsi->type), vsi->idx); │ ice_vsi_type_str(vsi->type), vsi->idx);
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/dec/tulip/de4x5.c:2802 │ linux/drivers/net/ethernet/dec/tulip/de4x5.c:2985
│
case 0: │ case 0:
if (lp->timeout < 0) { │ if (lp->timeout < 0) {
mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5 │ mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5
} │ }
cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500); │ cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
if (cr < 0) { │ if (cr < 0) {
next_tick = cr & ~TIMER_CB; │ next_tick = cr & ~TIMER_CB;
} else { │ } else {
if (cr) { │ if (cr) {
lp->local_state = 0; │ lp->local_state = 0;
lp->media = SPD_DET; │ lp->media = SPD_DET;
} else { │ } else {
lp->local_state++; │ lp->local_state++;
} │ }
next_tick = dc21140m_autoconf(dev); │ next_tick = dc2114x_autoconf(dev);
} │ }
break; │ break;
│
case 1: │ case 1:
if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) { │ sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
│ if (sr < 0) {
next_tick = sr & ~TIMER_CB; │ next_tick = sr & ~TIMER_CB;
} else { │ } else {
lp->media = SPD_DET; │ lp->media = SPD_DET;
lp->local_state = 0; │ lp->local_state = 0;
if (sr) { /* Success! */ │ if (sr) { /* Success! */
lp->tmp = MII_SR_ASSC; │ lp->tmp = MII_SR_ASSC;
anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); │ anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); │ ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
if (!(anlpa & MII_ANLPA_RF) && │ if (!(anlpa & MII_ANLPA_RF) &&
(cap = anlpa & MII_ANLPA_TAF & ana)) { │ (cap = anlpa & MII_ANLPA_TAF & ana)) {
if (cap & MII_ANA_100M) { │ if (cap & MII_ANA_100M) {
lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0; │ lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
lp->media = _100Mb; │ lp->media = _100Mb;
} else if (cap & MII_ANA_10M) { │ } else if (cap & MII_ANA_10M) {
lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0; │ lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
│
lp->media = _10Mb; │ lp->media = _10Mb;
} │ }
} │ }
} /* Auto Negotiation failed to finish */ │ } /* Auto Negotiation failed to finish */
next_tick = dc21140m_autoconf(dev); │ next_tick = dc2114x_autoconf(dev);
} /* Auto Negotiation failed to start */ │ } /* Auto Negotiation failed to start */
break; │ break;
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_dcb.c:1204 │ linux/drivers/net/ethernet/intel/i40e/i40e_dcb.c:1164
│
u16 typelen, len, offset = 0; │ u16 typelength, length, offset = 0;
u8 priority, selector, i = 0; │ u8 priority, selector, i = 0;
u8 *buf = tlv->tlvinfo; │ u8 *buf = tlv->tlvinfo;
u32 ouisubtype; │ u32 ouisubtype;
│
/* No APP TLVs then just return */ │ /* No APP TLVs then just return */
if (dcbcfg->numapps == 0) │ if (dcbcfg->numapps == 0)
return; │ return;
ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | │ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
ICE_IEEE_SUBTYPE_APP_PRI); │ I40E_IEEE_SUBTYPE_APP_PRI);
tlv->ouisubtype = htonl(ouisubtype); │ tlv->ouisubtype = htonl(ouisubtype);
│
/* Move offset to App Priority Table */ │ /* Move offset to App Priority Table */
offset++; │ offset++;
/* Application Priority Table (3 octets) │ /* Application Priority Table (3 octets)
* Octets:| 1 | 2 | 3 | │ * Octets:| 1 | 2 | 3 |
* ----------------------------------------- │ * -----------------------------------------
* |Priority|Rsrvd| Sel | Protocol ID | │ * |Priority|Rsrvd| Sel | Protocol ID |
* ----------------------------------------- │ * -----------------------------------------
* Bits:|23 21|20 19|18 16|15 0| │ * Bits:|23 21|20 19|18 16|15 0|
* ----------------------------------------- │ * -----------------------------------------
*/ │ */
while (i < dcbcfg->numapps) { │ while (i < dcbcfg->numapps) {
priority = dcbcfg->app[i].priority & 0x7; │ priority = dcbcfg->app[i].priority & 0x7;
selector = dcbcfg->app[i].selector & 0x7; │ selector = dcbcfg->app[i].selector & 0x7;
buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector; │ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF; │ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF; │ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
/* Move to next app */ │ /* Move to next app */
offset += 3; │ offset += 3;
i++; │ i++;
if (i >= ICE_DCBX_MAX_APPS) │ if (i >= I40E_DCBX_MAX_APPS)
break; │ break;
} │ }
/* len includes size of ouisubtype + 1 reserved + 3*numapps */ │ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
len = sizeof(tlv->ouisubtype) + 1 + (i * 3); │ length = sizeof(tlv->ouisubtype) + 1 + (i * 3);
typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF)); │ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
tlv->typelen = htons(typelen); │ (length & 0x1FF));
│ tlv->typelength = htons(typelength);
} │
next prev up linux/drivers/net/ethernet/sfc/efx.c:266 │ linux/drivers/net/ethernet/sfc/falcon/efx.c:1615
│
int rc; │ int rc;
│
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); │ netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
│
/* Carry out hardware-type specific initialisation */ │ /* Carry out hardware-type specific initialisation */
rc = efx->type->probe(efx); │ rc = efx->type->probe(efx);
if (rc) │ if (rc)
return rc; │ return rc;
│
do { │ do {
if (!efx->max_channels || !efx->max_tx_channels) { │ if (!efx->max_channels || !efx->max_tx_channels) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"Insufficient resources to allocate" │ "Insufficient resources to allocate"
" any channels\n"); │ " any channels\n");
rc = -ENOSPC; │ rc = -ENOSPC;
goto fail1; │ goto fail1;
} │ }
│
/* Determine the number of channels and queues by trying │ /* Determine the number of channels and queues by trying
* to hook in MSI-X interrupts. │ * to hook in MSI-X interrupts.
*/ │ */
rc = efx_probe_interrupts(efx); │ rc = ef4_probe_interrupts(efx);
if (rc) │ if (rc)
goto fail1; │ goto fail1;
│
rc = efx_set_channels(efx); │ ef4_set_channels(efx);
if (rc) │
goto fail1; │
│
/* dimension_resources can fail with EAGAIN */ │ /* dimension_resources can fail with EAGAIN */
rc = efx->type->dimension_resources(efx); │ rc = efx->type->dimension_resources(efx);
if (rc != 0 && rc != -EAGAIN) │ if (rc != 0 && rc != -EAGAIN)
goto fail2; │ goto fail2;
│
if (rc == -EAGAIN) │ if (rc == -EAGAIN)
/* try again with new max_channels */ │ /* try again with new max_channels */
efx_remove_interrupts(efx); │ ef4_remove_interrupts(efx);
│
} while (rc == -EAGAIN); │ } while (rc == -EAGAIN);
│
if (efx->n_channels > 1) │ if (efx->n_channels > 1)
netdev_rss_key_fill(efx->rss_context.rx_hash_key, │ netdev_rss_key_fill(&efx->rx_hash_key,
sizeof(efx->rss_context.rx_hash_key)); │ sizeof(efx->rx_hash_key));
efx_set_default_rx_indir_table(efx, &efx->rss_context); │ ef4_set_default_rx_indir_table(efx);
│
│ netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
│ netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
│
/* Initialise the interrupt moderation settings */ │ /* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); │ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, │ ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
true); │ true);
│
return 0; │ return 0;
│
fail2: │ fail2:
efx_remove_interrupts(efx); │ ef4_remove_interrupts(efx);
fail1: │ fail1:
efx->type->remove(efx); │ efx->type->remove(efx);
return rc; │ return rc;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1602 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7751
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10183 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7751
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1602 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7714
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10183 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7714
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1602 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1639
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
│ u8 byte2;
│ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
│ __le16 word1;
│ __le16 word2;
│ __le16 word3;
│ __le16 word4;
│ __le32 reg2;
│ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10183 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10324
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
│ u8 byte2;
│ u8 byte3;
__le16 word0; │ __le16 word0;
__le16 word1; │
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
│ __le16 word1;
│ __le16 word2;
│ __le16 word3;
│ __le16 word4;
│ __le32 reg2;
│ __le32 reg3;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1602 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7677
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10183 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7677
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1602 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10813
│
u8 byte0; │ u8 reserved;
u8 byte1; │ u8 state;
u8 flags0; │ u8 flags0;
#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10183 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10813
│
u8 byte0; │ u8 reserved;
u8 byte1; │ u8 state;
u8 flags0; │ u8 flags0;
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10183 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1602
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/lio_main.c:1156 │ linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:599
│
│ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
struct octeon_soft_command *sc; │ struct octeon_soft_command *sc;
union octnet_cmd *ncmd; │ union octnet_cmd *ncmd;
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; │
int retval; │ int retval;
│
if (oct->props[lio->ifidx].rx_on == start_stop) │ if (oct->props[lio->ifidx].rx_on == start_stop)
return 0; │ return 0;
│
sc = (struct octeon_soft_command *) │ sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, │ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0); │ 16, 0);
if (!sc) { │ if (!sc) {
netif_info(lio, rx_err, lio->netdev, │ netif_info(lio, rx_err, lio->netdev,
"Failed to allocate octeon_soft_command struct\n"); │ "Failed to allocate octeon_soft_command struct\n");
return -ENOMEM; │ return -ENOMEM;
} │ }
│
ncmd = (union octnet_cmd *)sc->virtdptr; │ ncmd = (union octnet_cmd *)sc->virtdptr;
│
ncmd->u64 = 0; │ ncmd->u64 = 0;
ncmd->s.cmd = OCTNET_CMD_RX_CTL; │ ncmd->s.cmd = OCTNET_CMD_RX_CTL;
ncmd->s.param1 = start_stop; │ ncmd->s.param1 = start_stop;
│
octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); │ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
│
sc->iq_no = lio->linfo.txpciq[0].s.q_no; │ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
│
octeon_prepare_soft_command(oct, sc, OPCODE_NIC, │ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_CMD, 0, 0, 0); │ OPCODE_NIC_CMD, 0, 0, 0);
│
init_completion(&sc->complete); │ init_completion(&sc->complete);
sc->sc_status = OCTEON_REQUEST_PENDING; │ sc->sc_status = OCTEON_REQUEST_PENDING;
│
retval = octeon_send_soft_command(oct, sc); │ retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) { │ if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\ │ netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\
octeon_free_soft_command(oct, sc); │ octeon_free_soft_command(oct, sc);
} else { │ } else {
/* Sleep on a wait queue till the cond flag indicates that the │ /* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out. │ * response arrived or timed-out.
*/ │ */
retval = wait_for_sc_completion_timeout(oct, sc, 0); │ retval = wait_for_sc_completion_timeout(oct, sc, 0);
if (retval) │ if (retval)
return retval; │ return retval;
│
oct->props[lio->ifidx].rx_on = start_stop; │ oct->props[lio->ifidx].rx_on = start_stop;
WRITE_ONCE(sc->caller_is_done, true); │ WRITE_ONCE(sc->caller_is_done, true);
} │ }
│
return retval; │ return retval;
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:9609 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:9564
│
struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; │ struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
struct hclge_vport_vtag_rx_cfg_cmd *req; │ struct hclge_vport_vtag_tx_cfg_cmd *req;
struct hclge_dev *hdev = vport->back; │ struct hclge_dev *hdev = vport->back;
struct hclge_desc desc; │ struct hclge_desc desc;
u16 bmap_index; │ u16 bmap_index;
int status; │ int status;
│
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); │ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
│
req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; │ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, │ req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
vcfg->strip_tag1_en ? 1 : 0); │ req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, │ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
vcfg->strip_tag2_en ? 1 : 0); │ vcfg->accept_tag1 ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, │ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
vcfg->vlan1_vlan_prionly ? 1 : 0); │ vcfg->accept_untag1 ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, │ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
vcfg->vlan2_vlan_prionly ? 1 : 0); │ vcfg->accept_tag2 ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, │ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
vcfg->strip_tag1_discard_en ? 1 : 0); │ vcfg->accept_untag2 ? 1 : 0);
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, │ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
vcfg->strip_tag2_discard_en ? 1 : 0); │ vcfg->insert_tag1_en ? 1 : 0);
│ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
│ vcfg->insert_tag2_en ? 1 : 0);
│ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
│ vcfg->tag_shift_mode_en ? 1 : 0);
│ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
│
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; │ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / │ bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
HCLGE_VF_NUM_PER_BYTE; │ HCLGE_VF_NUM_PER_BYTE;
req->vf_bitmap[bmap_index] = │ req->vf_bitmap[bmap_index] =
1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); │ 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
│
status = hclge_cmd_send(&hdev->hw, &desc, 1); │ status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status) │ if (status)
dev_err(&hdev->pdev->dev, │ dev_err(&hdev->pdev->dev,
"Send port rxvlan cfg command fail, ret =%d\n", │ "Send port txvlan cfg command fail, ret =%d\n",
status); │ status);
│
return status; │ return status;
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_ethtool.c:2453 │ linux/drivers/net/ethernet/intel/iavf/iavf_ethtool.c:1541
│
u64 hfld = ICE_HASH_INVALID; │ u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
│
if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) { │ if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
switch (nfc->flow_type) { │ switch (cmd->flow_type) {
case TCP_V4_FLOW: │ case TCP_V4_FLOW:
case UDP_V4_FLOW: │ case UDP_V4_FLOW:
case SCTP_V4_FLOW: │ case SCTP_V4_FLOW:
if (nfc->data & RXH_IP_SRC) │ if (cmd->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV4_SA; │ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
if (nfc->data & RXH_IP_DST) │ if (cmd->data & RXH_IP_DST)
hfld |= ICE_FLOW_HASH_FLD_IPV4_DA; │ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
break; │ break;
case TCP_V6_FLOW: │ case TCP_V6_FLOW:
case UDP_V6_FLOW: │ case UDP_V6_FLOW:
case SCTP_V6_FLOW: │ case SCTP_V6_FLOW:
if (nfc->data & RXH_IP_SRC) │ if (cmd->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV6_SA; │ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
if (nfc->data & RXH_IP_DST) │ if (cmd->data & RXH_IP_DST)
hfld |= ICE_FLOW_HASH_FLD_IPV6_DA; │ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
break; │ break;
default: │ default:
break; │ break;
} │ }
} │ }
│
if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) { │ if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
switch (nfc->flow_type) { │ switch (cmd->flow_type) {
case TCP_V4_FLOW: │ case TCP_V4_FLOW:
case TCP_V6_FLOW: │ case TCP_V6_FLOW:
if (nfc->data & RXH_L4_B_0_1) │ if (cmd->data & RXH_L4_B_0_1)
hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT; │ hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
if (nfc->data & RXH_L4_B_2_3) │ if (cmd->data & RXH_L4_B_2_3)
hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT; │ hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
break; │ break;
case UDP_V4_FLOW: │ case UDP_V4_FLOW:
case UDP_V6_FLOW: │ case UDP_V6_FLOW:
if (nfc->data & RXH_L4_B_0_1) │ if (cmd->data & RXH_L4_B_0_1)
hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT; │ hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
if (nfc->data & RXH_L4_B_2_3) │ if (cmd->data & RXH_L4_B_2_3)
hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT; │ hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
break; │ break;
case SCTP_V4_FLOW: │ case SCTP_V4_FLOW:
case SCTP_V6_FLOW: │ case SCTP_V6_FLOW:
if (nfc->data & RXH_L4_B_0_1) │ if (cmd->data & RXH_L4_B_0_1)
hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT; │ hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
if (nfc->data & RXH_L4_B_2_3) │ if (cmd->data & RXH_L4_B_2_3)
hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT; │ hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
break; │ break;
default: │ default:
break; │ break;
} │ }
} │ }
│
return hfld; │ return hfld;
} │
next prev up linux/drivers/net/ethernet/ti/cpsw_switchdev.c:431 │ linux/drivers/net/ethernet/ti/am65-cpsw-switchdev.c:421
│
struct net_device *ndev = switchdev_notifier_info_to_dev(ptr); │ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
│ struct am65_cpsw_switchdev_event_work *switchdev_work;
│ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct switchdev_notifier_fdb_info *fdb_info = ptr; │ struct switchdev_notifier_fdb_info *fdb_info = ptr;
struct cpsw_switchdev_event_work *switchdev_work; │
struct cpsw_priv *priv = netdev_priv(ndev); │
int err; │ int err;
│
if (event == SWITCHDEV_PORT_ATTR_SET) { │ if (event == SWITCHDEV_PORT_ATTR_SET) {
err = switchdev_handle_port_attr_set(ndev, ptr, │ err = switchdev_handle_port_attr_set(ndev, ptr,
cpsw_port_dev_check, │ am65_cpsw_port_dev_check,
cpsw_port_attr_set); │ am65_cpsw_port_attr_set);
return notifier_from_errno(err); │ return notifier_from_errno(err);
} │ }
│
if (!cpsw_port_dev_check(ndev)) │ if (!am65_cpsw_port_dev_check(ndev))
return NOTIFY_DONE; │ return NOTIFY_DONE;
│
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); │ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (WARN_ON(!switchdev_work)) │ if (WARN_ON(!switchdev_work))
return NOTIFY_BAD; │ return NOTIFY_BAD;
│
INIT_WORK(&switchdev_work->work, cpsw_switchdev_event_work); │ INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
switchdev_work->priv = priv; │ switchdev_work->port = port;
switchdev_work->event = event; │ switchdev_work->event = event;
│
switch (event) { │ switch (event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE: │ case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE: │ case SWITCHDEV_FDB_DEL_TO_DEVICE:
memcpy(&switchdev_work->fdb_info, ptr, │ memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info)); │ sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); │ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
if (!switchdev_work->fdb_info.addr) │ if (!switchdev_work->fdb_info.addr)
goto err_addr_alloc; │ goto err_addr_alloc;
ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, │ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
fdb_info->addr); │ fdb_info->addr);
dev_hold(ndev); │ dev_hold(ndev);
break; │ break;
default: │ default:
kfree(switchdev_work); │ kfree(switchdev_work);
return NOTIFY_DONE; │ return NOTIFY_DONE;
} │ }
│
queue_work(system_long_wq, &switchdev_work->work); │ queue_work(system_long_wq, &switchdev_work->work);
│
return NOTIFY_DONE; │ return NOTIFY_DONE;
│
err_addr_alloc: │ err_addr_alloc:
kfree(switchdev_work); │ kfree(switchdev_work);
return NOTIFY_BAD; │ return NOTIFY_BAD;
} │
next prev up linux/drivers/net/ethernet/cadence/macb_main.c:336 │ linux/drivers/net/ethernet/cadence/macb_main.c:390
│
struct macb *bp = bus->priv; │ struct macb *bp = bus->priv;
int status; │ int status;
│
status = pm_runtime_get_sync(&bp->pdev->dev); │ status = pm_runtime_get_sync(&bp->pdev->dev);
if (status < 0) { │ if (status < 0) {
pm_runtime_put_noidle(&bp->pdev->dev); │ pm_runtime_put_noidle(&bp->pdev->dev);
goto mdio_pm_exit; │ goto mdio_pm_exit;
} │ }
│
status = macb_mdio_wait_for_idle(bp); │ status = macb_mdio_wait_for_idle(bp);
if (status < 0) │ if (status < 0)
goto mdio_read_exit; │ goto mdio_write_exit;
│
if (regnum & MII_ADDR_C45) { │ if (regnum & MII_ADDR_C45) {
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) │ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
| MACB_BF(RW, MACB_MAN_C45_ADDR) │ | MACB_BF(RW, MACB_MAN_C45_ADDR)
| MACB_BF(PHYA, mii_id) │ | MACB_BF(PHYA, mii_id)
| MACB_BF(REGA, (regnum >> 16) & 0x1F) │ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
| MACB_BF(DATA, regnum & 0xFFFF) │ | MACB_BF(DATA, regnum & 0xFFFF)
| MACB_BF(CODE, MACB_MAN_C45_CODE))); │ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
│
status = macb_mdio_wait_for_idle(bp); │ status = macb_mdio_wait_for_idle(bp);
if (status < 0) │ if (status < 0)
goto mdio_read_exit; │ goto mdio_write_exit;
│
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) │ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
| MACB_BF(RW, MACB_MAN_C45_READ) │ | MACB_BF(RW, MACB_MAN_C45_WRITE)
| MACB_BF(PHYA, mii_id) │ | MACB_BF(PHYA, mii_id)
| MACB_BF(REGA, (regnum >> 16) & 0x1F) │ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
| MACB_BF(CODE, MACB_MAN_C45_CODE))); │ | MACB_BF(CODE, MACB_MAN_C45_CODE)
│ | MACB_BF(DATA, value)));
} else { │ } else {
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) │ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
| MACB_BF(RW, MACB_MAN_C22_READ) │ | MACB_BF(RW, MACB_MAN_C22_WRITE)
| MACB_BF(PHYA, mii_id) │ | MACB_BF(PHYA, mii_id)
| MACB_BF(REGA, regnum) │ | MACB_BF(REGA, regnum)
| MACB_BF(CODE, MACB_MAN_C22_CODE))); │ | MACB_BF(CODE, MACB_MAN_C22_CODE)
│ | MACB_BF(DATA, value)));
} │ }
│
status = macb_mdio_wait_for_idle(bp); │ status = macb_mdio_wait_for_idle(bp);
if (status < 0) │ if (status < 0)
goto mdio_read_exit; │ goto mdio_write_exit;
│
status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); │
│
mdio_read_exit: │ mdio_write_exit:
pm_runtime_mark_last_busy(&bp->pdev->dev); │ pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev); │ pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit: │ mdio_pm_exit:
return status; │ return status;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c:1540 │ linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c:7983
│
unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) │ unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
? rem │ rem : ARRAY_SIZE(c.u.exact));
: ARRAY_SIZE(cmd.u.exact)); │
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, │ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[fw_naddr]), 16); │ u.exact[fw_naddr]), 16);
struct fw_vi_mac_exact *p; │ struct fw_vi_mac_exact *p;
int i; │ int i;
│
memset(&cmd, 0, sizeof(cmd)); │ memset(&c, 0, sizeof(c));
cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | │ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
FW_CMD_REQUEST_F | │ FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | │ FW_CMD_WRITE_F |
(free ? FW_CMD_EXEC_F : 0) | │ FW_CMD_EXEC_V(free) |
FW_VI_MAC_CMD_VIID_V(viid)); │ FW_VI_MAC_CMD_VIID_V(viid));
cmd.freemacs_to_len16 = │ c.freemacs_to_len16 =
cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | │ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
FW_CMD_LEN16_V(len16)); │ FW_CMD_LEN16_V(len16));
│
for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { │ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
p->valid_to_idx = cpu_to_be16( │ p->valid_to_idx =
FW_VI_MAC_CMD_VALID_F | │ cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); │ FW_VI_MAC_CMD_IDX_V(
memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); │ FW_VI_MAC_ADD_MAC));
│ memcpy(p->macaddr, addr[offset + i],
│ sizeof(p->macaddr));
} │ }
│
│ /* It's okay if we run out of space in our MAC address arena.
ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, │ * Some of the addresses we submit may get stored so we need
sleep_ok); │ * to run through the reply to see what the results were ...
if (ret && ret != -ENOMEM) │ */
│ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
│ if (ret && ret != -FW_ENOMEM)
break; │ break;
│
for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { │ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
u16 index = FW_VI_MAC_CMD_IDX_G( │ u16 index = FW_VI_MAC_CMD_IDX_G(
be16_to_cpu(p->valid_to_idx)); │ be16_to_cpu(p->valid_to_idx));
│
if (idx) │ if (idx)
idx[offset+i] = │ idx[offset + i] = (index >= max_naddr ?
(index >= max_naddr │ 0xffff : index);
? 0xffff │
: index); │
if (index < max_naddr) │ if (index < max_naddr)
nfilters++; │ nfilters++;
else if (hash) │ else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[offset+i])); │ *hash |= (1ULL <<
│ hash_mac_addr(addr[offset + i]));
} │ }
│
free = false; │ free = false;
offset += fw_naddr; │ offset += fw_naddr;
rem -= fw_naddr; │ rem -= fw_naddr;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c:2613 │ linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c:2299
│
int err = -EINVAL; │ int err;
int alop = vhcr->op_modifier; │ int alop = vhcr->op_modifier;
│
switch (vhcr->in_modifier & 0xFF) { │ switch (vhcr->in_modifier & 0xFF) {
case RES_QP: │ case RES_QP:
err = qp_free_res(dev, slave, vhcr->op_modifier, alop, │ err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param); │ vhcr->in_param, &vhcr->out_param);
break; │ break;
│
case RES_MTT: │ case RES_MTT:
err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, │ err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); │ vhcr->in_param, &vhcr->out_param);
break; │ break;
│
case RES_MPT: │ case RES_MPT:
err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, │ err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param); │ vhcr->in_param, &vhcr->out_param);
break; │ break;
│
case RES_CQ: │ case RES_CQ:
err = cq_free_res(dev, slave, vhcr->op_modifier, alop, │ err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); │ vhcr->in_param, &vhcr->out_param);
break; │ break;
│
case RES_SRQ: │ case RES_SRQ:
err = srq_free_res(dev, slave, vhcr->op_modifier, alop, │ err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); │ vhcr->in_param, &vhcr->out_param);
break; │ break;
│
case RES_MAC: │ case RES_MAC:
err = mac_free_res(dev, slave, vhcr->op_modifier, alop, │ err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param, │ vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF); │ (vhcr->in_modifier >> 8) & 0xFF);
break; │ break;
│
case RES_VLAN: │ case RES_VLAN:
err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, │ err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param, │ vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF); │ (vhcr->in_modifier >> 8) & 0xFF);
break; │ break;
│
case RES_COUNTER: │ case RES_COUNTER:
err = counter_free_res(dev, slave, vhcr->op_modifier, alop, │ err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); │ vhcr->in_param, &vhcr->out_param, 0);
break; │ break;
│
case RES_XRCD: │ case RES_XRCD:
err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, │ err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); │ vhcr->in_param, &vhcr->out_param);
break; │ break;
│
default: │ default:
│ err = -EINVAL;
break; │ break;
} │ }
│
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4/sge.c:1937 │ linux/drivers/net/ethernet/chelsio/cxgb4vf/sge.c:1276
│
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); │ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; │ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb); │ int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; │ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
│
wr->op_immdlen = │ wr->op_immdlen =
cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | │ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
FW_WR_IMMDLEN_V(sizeof(*lso) + │ FW_WR_IMMDLEN_V(sizeof(*lso) +
sizeof(*cpl))); │ sizeof(*cpl)));
/* Fill in the LSO CPL message. */ │ /*
│ * Fill in the LSO CPL message.
│ */
lso->lso_ctrl = │ lso->lso_ctrl =
cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | │ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE_F | │ LSO_FIRST_SLICE_F |
LSO_LAST_SLICE_F | │ LSO_LAST_SLICE_F |
LSO_IPV6_V(v6) | │ LSO_IPV6_V(v6) |
LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | │ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
LSO_IPHDR_LEN_V(l3hdr_len / 4) | │ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); │ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->ipid_ofst = cpu_to_be16(0); │ lso->ipid_ofst = cpu_to_be16(0);
lso->mss = cpu_to_be16(ssi->gso_size); │ lso->mss = cpu_to_be16(ssi->gso_size);
lso->seqno_offset = cpu_to_be32(0); │ lso->seqno_offset = cpu_to_be32(0);
if (is_t4(adapter->params.chip)) │ if (is_t4(adapter->params.chip))
lso->len = cpu_to_be32(skb->len); │ lso->len = cpu_to_be32(skb->len);
else │ else
lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); │ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
│
/* Set up TX Packet CPL pointer, control word and perform │ /*
│ * Set up TX Packet CPL pointer, control word and perform
* accounting. │ * accounting.
*/ │ */
cpl = (void *)(lso + 1); │ cpl = (void *)(lso + 1);
│
if (chip_ver <= CHELSIO_T5) │ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); │ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
else │ else
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); │ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
│
cntrl |= TXPKT_CSUM_TYPE_V(v6 ? │ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | │ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
TXPKT_IPHDR_LEN_V(l3hdr_len); │ TXPKT_IPHDR_LEN_V(l3hdr_len);
txq->tso++; │ txq->tso++;
txq->tx_cso += ssi->gso_segs; │ txq->tx_cso += ssi->gso_segs;
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_virtchnl.c:3390 │ linux/drivers/net/ethernet/intel/ice/ice_virtchnl.c:3560
│
.get_ver_msg = ice_vc_get_ver_msg, │ .get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg, │ .get_vf_res_msg = ice_vc_get_vf_res_msg,
.reset_vf = ice_vc_reset_vf_msg, │ .reset_vf = ice_vc_reset_vf_msg,
.add_mac_addr_msg = ice_vc_add_mac_addr_msg, │ .add_mac_addr_msg = ice_vc_repr_add_mac,
.del_mac_addr_msg = ice_vc_del_mac_addr_msg, │ .del_mac_addr_msg = ice_vc_repr_del_mac,
.cfg_qs_msg = ice_vc_cfg_qs_msg, │ .cfg_qs_msg = ice_vc_cfg_qs_msg,
.ena_qs_msg = ice_vc_ena_qs_msg, │ .ena_qs_msg = ice_vc_ena_qs_msg,
.dis_qs_msg = ice_vc_dis_qs_msg, │ .dis_qs_msg = ice_vc_dis_qs_msg,
.request_qs_msg = ice_vc_request_qs_msg, │ .request_qs_msg = ice_vc_request_qs_msg,
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, │ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key, │ .config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut, │ .config_rss_lut = ice_vc_config_rss_lut,
.get_stats_msg = ice_vc_get_stats_msg, │ .get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, │ .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
.add_vlan_msg = ice_vc_add_vlan_msg, │ .add_vlan_msg = ice_vc_repr_add_vlan,
.remove_vlan_msg = ice_vc_remove_vlan_msg, │ .remove_vlan_msg = ice_vc_repr_del_vlan,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping, │ .ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping, │ .dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg, │ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
.add_fdir_fltr_msg = ice_vc_add_fdir_fltr, │ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
.del_fdir_fltr_msg = ice_vc_del_fdir_fltr, │ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps, │ .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg, │ .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg, │ .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg, │ .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, │ .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, │ .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, │ .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
} │
next prev up linux/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c:1055 │ linux/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c:987
│
int i, ret, count; │ int i, ret, count;
unsigned char *p_cache, *p_src; │ unsigned char *p_cache, *p_src;
│
p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); │ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
if (!p_cache) │ if (!p_cache)
return -ENOMEM; │ return -ENOMEM;
│
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); │ count = size / sizeof(u32);
│ qlcnic_swap32_buffer((u32 *)buf, count);
memcpy(p_cache, buf, size); │ memcpy(p_cache, buf, size);
p_src = p_cache; │ p_src = p_cache;
count = size / sizeof(u32); │
│
if (qlcnic_83xx_lock_flash(adapter) != 0) { │ if (qlcnic_83xx_lock_flash(adapter) != 0) {
kfree(p_cache); │ kfree(p_cache);
return -EIO; │ return -EIO;
} │ }
│
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { │ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
ret = qlcnic_83xx_enable_flash_write(adapter); │ ret = qlcnic_83xx_enable_flash_write(adapter);
if (ret) { │ if (ret) {
kfree(p_cache); │ kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter); │ qlcnic_83xx_unlock_flash(adapter);
return -EIO; │ return -EIO;
} │ }
} │ }
│
for (i = 0; i < count; i++) { │ for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src); │ ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
│ (u32 *)p_src,
│ QLC_83XX_FLASH_WRITE_MAX);
│
if (ret) { │ if (ret) {
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { │ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
ret = qlcnic_83xx_disable_flash_write(adapter); │ ret = qlcnic_83xx_disable_flash_write(adapter);
if (ret) { │ if (ret) {
kfree(p_cache); │ kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter); │ qlcnic_83xx_unlock_flash(adapter);
return -EIO; │ return -EIO;
} │ }
} │ }
│
kfree(p_cache); │ kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter); │ qlcnic_83xx_unlock_flash(adapter);
return -EIO; │ return -EIO;
} │ }
│
p_src = p_src + sizeof(u32); │ p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
offset = offset + sizeof(u32); │ offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
} │ }
│
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { │ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
ret = qlcnic_83xx_disable_flash_write(adapter); │ ret = qlcnic_83xx_disable_flash_write(adapter);
if (ret) { │ if (ret) {
kfree(p_cache); │ kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter); │ qlcnic_83xx_unlock_flash(adapter);
return -EIO; │ return -EIO;
} │ }
} │ }
│
kfree(p_cache); │ kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter); │ qlcnic_83xx_unlock_flash(adapter);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/freescale/ucc_geth.h:706 │ linux/drivers/net/ethernet/freescale/ucc_geth.h:507
│
u32 frrxfcser; /* frames with crc error */ │ u32 frrxfcser; /* frames with crc error */
u32 fraligner; /* frames with alignment error */ │ u32 fraligner; /* frames with alignment error */
u32 inrangelenrxer; /* in range length error */ │ u32 inrangelenrxer; /* in range length error */
u32 outrangelenrxer; /* out of range length error */ │ u32 outrangelenrxer; /* out of range length error */
u32 frtoolong; /* frame too long */ │ u32 frtoolong; /* frame too long */
u32 runt; /* runt */ │ u32 runt; /* runt */
u32 verylongevent; /* very long event */ │ u32 verylongevent; /* very long event */
u32 symbolerror; /* symbol error */ │ u32 symbolerror; /* symbol error */
u32 dropbsy; /* drop because of BD not ready */ │ u32 dropbsy; /* drop because of BD not ready */
u8 res0[0x8]; │ u8 res0[0x8];
u32 mismatchdrop; /* drop because of MAC filtering (e.g. address │ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
or type mismatch) */ │ or type mismatch) */
u32 underpkts; /* total frames less than 64 octets */ │ u32 underpkts; /* total frames less than 64 octets */
u32 pkts256; /* total frames (including bad) between 256 and │ u32 pkts256; /* total frames (including bad) between 256 and
511 octets */ │ 511 octets */
u32 pkts512; /* total frames (including bad) between 512 and │ u32 pkts512; /* total frames (including bad) between 512 and
1023 octets */ │ 1023 octets */
u32 pkts1024; /* total frames (including bad) between 1024 │ u32 pkts1024; /* total frames (including bad) between 1024
and 1518 octets */ │ and 1518 octets */
u32 pktsjumbo; /* total frames (including bad) between 1024 │ u32 pktsjumbo; /* total frames (including bad) between 1024
and MAXLength octets */ │ and MAXLength octets */
u32 frlossinmacer; /* frames lost because of internal MAC error │ u32 frlossinmacer; /* frames lost because of internal MAC error
that is not counted in any other counter */ │ that is not counted in any other counter */
u32 pausefr; /* pause frames */ │ u32 pausefr; /* pause frames */
u8 res1[0x4]; │ u8 res1[0x4];
u32 removevlan; /* total frames that had their VLAN tag removed │ u32 removevlan; /* total frames that had their VLAN tag removed
*/ │ */
u32 replacevlan; /* total frames that had their VLAN tag │ u32 replacevlan; /* total frames that had their VLAN tag
replaced */ │ replaced */
u32 insertvlan; /* total frames that had their VLAN tag │ u32 insertvlan; /* total frames that had their VLAN tag
inserted */ │ inserted */
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:6047 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7751
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:6047 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7714
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:6047 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:7677
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:6047 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10813
│
u8 byte0; │ u8 reserved;
u8 byte1; │ u8 state;
u8 flags0; │ u8 flags0;
#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:6047 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:1602
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:6047 │ linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h:10183
│
u8 byte0; │ u8 byte0;
u8 byte1; │ u8 byte1;
u8 flags0; │ u8 flags0;
#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 │ #define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 │ #define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 │ #define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 │ #define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 │ #define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 │ #define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 │ #define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 │ #define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1; │ u8 flags1;
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 │ #define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 │ #define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 │ #define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 │ #define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0; │ __le16 word0;
__le16 word1; │ __le16 word1;
__le32 reg0; │ __le32 reg0;
__le32 reg1; │ __le32 reg1;
} │
next prev up linux/drivers/net/ethernet/sfc/selftest.c:409 │ linux/drivers/net/ethernet/sfc/falcon/selftest.c:411
│
struct efx_nic *efx = tx_queue->efx; │ struct ef4_nic *efx = tx_queue->efx;
struct efx_loopback_state *state = efx->loopback_selftest; │ struct ef4_loopback_state *state = efx->loopback_selftest;
struct efx_loopback_payload *payload; │ struct ef4_loopback_payload *payload;
struct sk_buff *skb; │ struct sk_buff *skb;
int i; │ int i;
netdev_tx_t rc; │ netdev_tx_t rc;
│
/* Transmit N copies of buffer */ │ /* Transmit N copies of buffer */
for (i = 0; i < state->packet_count; i++) { │ for (i = 0; i < state->packet_count; i++) {
/* Allocate an skb, holding an extra reference for │ /* Allocate an skb, holding an extra reference for
* transmit completion counting */ │ * transmit completion counting */
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); │ skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
if (!skb) │ if (!skb)
return -ENOMEM; │ return -ENOMEM;
state->skbs[i] = skb; │ state->skbs[i] = skb;
skb_get(skb); │ skb_get(skb);
│
/* Copy the payload in, incrementing the source address to │ /* Copy the payload in, incrementing the source address to
* exercise the rss vectors */ │ * exercise the rss vectors */
payload = skb_put(skb, sizeof(state->payload)); │ payload = skb_put(skb, sizeof(state->payload));
memcpy(payload, &state->payload, sizeof(state->payload)); │ memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); │ payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
│
/* Ensure everything we've written is visible to the │ /* Ensure everything we've written is visible to the
* interrupt handler. */ │ * interrupt handler. */
smp_wmb(); │ smp_wmb();
│
netif_tx_lock_bh(efx->net_dev); │ netif_tx_lock_bh(efx->net_dev);
rc = efx_enqueue_skb(tx_queue, skb); │ rc = ef4_enqueue_skb(tx_queue, skb);
netif_tx_unlock_bh(efx->net_dev); │ netif_tx_unlock_bh(efx->net_dev);
│
if (rc != NETDEV_TX_OK) { │ if (rc != NETDEV_TX_OK) {
netif_err(efx, drv, efx->net_dev, │ netif_err(efx, drv, efx->net_dev,
"TX queue %d could not transmit packet %d of " │ "TX queue %d could not transmit packet %d of "
"%d in %s loopback test\n", tx_queue->label, │ "%d in %s loopback test\n", tx_queue->queue,
i + 1, state->packet_count, │ i + 1, state->packet_count,
LOOPBACK_MODE(efx)); │ LOOPBACK_MODE(efx));
│
/* Defer cleaning up the other skbs for the caller */ │ /* Defer cleaning up the other skbs for the caller */
kfree_skb(skb); │ kfree_skb(skb);
return -EPIPE; │ return -EPIPE;
} │ }
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:2212 │ linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:2109
│
/* The GPIO should be swapped if swap register is set and active */ │ /* The GPIO should be swapped if swap register is set and active */
int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && │ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; │ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
int gpio_shift = gpio_num + │ int gpio_shift = gpio_num +
(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); │ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
u32 gpio_mask = (1 << gpio_shift); │ u32 gpio_mask = (1 << gpio_shift);
u32 gpio_reg; │ u32 gpio_reg;
│
if (gpio_num > MISC_REGISTERS_GPIO_3) { │ if (gpio_num > MISC_REGISTERS_GPIO_3) {
BNX2X_ERR("Invalid GPIO %d\n", gpio_num); │ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
return -EINVAL; │ return -EINVAL;
} │ }
│
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); │ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
/* read GPIO int */ │ /* read GPIO and mask except the float bits */
gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); │ gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
│
switch (mode) { │ switch (mode) {
case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: │ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
DP(NETIF_MSG_LINK, │ DP(NETIF_MSG_LINK,
"Clear GPIO INT %d (shift %d) -> output low\n", │ "Set GPIO %d (shift %d) -> output low\n",
gpio_num, gpio_shift); │ gpio_num, gpio_shift);
/* clear SET and set CLR */ │ /* clear FLOAT and set CLR */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); │ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); │ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
break; │ break;
│
case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: │ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
DP(NETIF_MSG_LINK, │ DP(NETIF_MSG_LINK,
"Set GPIO INT %d (shift %d) -> output high\n", │ "Set GPIO %d (shift %d) -> output high\n",
gpio_num, gpio_shift); │ gpio_num, gpio_shift);
/* clear CLR and set SET */ │ /* clear FLOAT and set SET */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); │ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); │ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
│ break;
│
│ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
│ DP(NETIF_MSG_LINK,
│ "Set GPIO %d (shift %d) -> input\n",
│ gpio_num, gpio_shift);
│ /* set FLOAT */
│ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
break; │ break;
│
default: │ default:
break; │ break;
} │ }
│
REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); │ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); │ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:5737 │ linux/drivers/net/ethernet/intel/igc/igc_main.c:3955
│
unsigned int packets = ring_container->total_packets; │ unsigned int packets = ring_container->total_packets;
unsigned int bytes = ring_container->total_bytes; │ unsigned int bytes = ring_container->total_bytes;
u8 itrval = ring_container->itr; │ u8 itrval = ring_container->itr;
│
/* no packets, exit with status unchanged */ │ /* no packets, exit with status unchanged */
if (packets == 0) │ if (packets == 0)
return; │ return;
│
switch (itrval) { │ switch (itrval) {
case lowest_latency: │ case lowest_latency:
/* handle TSO and jumbo frames */ │ /* handle TSO and jumbo frames */
if (bytes/packets > 8000) │ if (bytes / packets > 8000)
itrval = bulk_latency; │ itrval = bulk_latency;
else if ((packets < 5) && (bytes > 512)) │ else if ((packets < 5) && (bytes > 512))
itrval = low_latency; │ itrval = low_latency;
break; │ break;
case low_latency: /* 50 usec aka 20000 ints/s */ │ case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) { │ if (bytes > 10000) {
/* this if handles the TSO accounting */ │ /* this if handles the TSO accounting */
if (bytes/packets > 8000) │ if (bytes / packets > 8000)
itrval = bulk_latency; │ itrval = bulk_latency;
else if ((packets < 10) || ((bytes/packets) > 1200)) │ else if ((packets < 10) || ((bytes / packets) > 1200))
itrval = bulk_latency; │ itrval = bulk_latency;
else if ((packets > 35)) │ else if ((packets > 35))
itrval = lowest_latency; │ itrval = lowest_latency;
} else if (bytes/packets > 2000) { │ } else if (bytes / packets > 2000) {
itrval = bulk_latency; │ itrval = bulk_latency;
} else if (packets <= 2 && bytes < 512) { │ } else if (packets <= 2 && bytes < 512) {
itrval = lowest_latency; │ itrval = lowest_latency;
} │ }
break; │ break;
case bulk_latency: /* 250 usec aka 4000 ints/s */ │ case bulk_latency: /* 250 usec aka 4000 ints/s */
if (bytes > 25000) { │ if (bytes > 25000) {
if (packets > 35) │ if (packets > 35)
itrval = low_latency; │ itrval = low_latency;
} else if (bytes < 1500) { │ } else if (bytes < 1500) {
itrval = low_latency; │ itrval = low_latency;
} │ }
break; │ break;
} │ }
│
/* clear work counters since we have the values we need */ │ /* clear work counters since we have the values we need */
ring_container->total_bytes = 0; │ ring_container->total_bytes = 0;
ring_container->total_packets = 0; │ ring_container->total_packets = 0;
│
/* write updated itr to ring container */ │ /* write updated itr to ring container */
ring_container->itr = itrval; │ ring_container->itr = itrval;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c:1097 │ linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c:1284
│
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; │ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc = &value->misc; │ struct mlx5dr_match_misc *misc = &value->misc;
│
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16); │ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0); │ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid); │ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi); │ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag); │ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio); │ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype); │ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
│
if (misc->vxlan_vni) { │ if (misc->vxlan_vni) {
MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id, │ MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
(misc->vxlan_vni << 8)); │ (misc->vxlan_vni << 8));
misc->vxlan_vni = 0; │ misc->vxlan_vni = 0;
} │ }
│
if (spec->cvlan_tag) { │ if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN); │ MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0; │ spec->cvlan_tag = 0;
} else if (spec->svlan_tag) { │ } else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN); │ MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0; │ spec->svlan_tag = 0;
} │ }
│
if (spec->ip_version) { │ if (spec->ip_version == IP_VERSION_IPV4) {
if (spec->ip_version == IP_VERSION_IPV4) { │ MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4); │ spec->ip_version = 0;
spec->ip_version = 0; │ } else if (spec->ip_version == IP_VERSION_IPV6) {
} else if (spec->ip_version == IP_VERSION_IPV6) { │ MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6); │ spec->ip_version = 0;
spec->ip_version = 0; │ } else if (spec->ip_version) {
} else { │ return -EINVAL;
return -EINVAL; │
} │
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/e1000/e1000_ethtool.c:856 │ linux/drivers/net/ethernet/intel/e1000e/ethtool.c:1032
│
/* Interrupt to test */ │ /* Interrupt to test */
mask = 1 << i; │ mask = BIT(i);
│
│ if (adapter->flags & FLAG_IS_ICH) {
│ switch (mask) {
│ case E1000_ICR_RXSEQ:
│ continue;
│ case 0x00000100:
│ if (adapter->hw.mac.type == e1000_ich8lan ||
│ adapter->hw.mac.type == e1000_ich9lan)
│ continue;
│ break;
│ default:
│ break;
│ }
│ }
│
if (!shared_int) { │ if (!shared_int) {
/* Disable the interrupt to be reported in │ /* Disable the interrupt to be reported in
* the cause register and then force the same │ * the cause register and then force the same
* interrupt and see if one gets posted. If │ * interrupt and see if one gets posted. If
* an interrupt was posted to the bus, the │ * an interrupt was posted to the bus, the
* test failed. │ * test failed.
*/ │ */
adapter->test_icr = 0; │ adapter->test_icr = 0;
ew32(IMC, mask); │ ew32(IMC, mask);
ew32(ICS, mask); │ ew32(ICS, mask);
E1000_WRITE_FLUSH(); │ e1e_flush();
msleep(10); │ usleep_range(10000, 11000);
│
if (adapter->test_icr & mask) { │ if (adapter->test_icr & mask) {
*data = 3; │ *data = 3;
break; │ break;
} │ }
} │ }
│
/* Enable the interrupt to be reported in │ /* Enable the interrupt to be reported in
* the cause register and then force the same │ * the cause register and then force the same
* interrupt and see if one gets posted. If │ * interrupt and see if one gets posted. If
* an interrupt was not posted to the bus, the │ * an interrupt was not posted to the bus, the
* test failed. │ * test failed.
*/ │ */
adapter->test_icr = 0; │ adapter->test_icr = 0;
ew32(IMS, mask); │ ew32(IMS, mask);
ew32(ICS, mask); │ ew32(ICS, mask);
E1000_WRITE_FLUSH(); │ e1e_flush();
msleep(10); │ usleep_range(10000, 11000);
│
if (!(adapter->test_icr & mask)) { │ if (!(adapter->test_icr & mask)) {
*data = 4; │ *data = 4;
break; │ break;
} │ }
│
if (!shared_int) { │ if (!shared_int) {
/* Disable the other interrupts to be reported in │ /* Disable the other interrupts to be reported in
* the cause register and then force the other │ * the cause register and then force the other
* interrupts and see if any get posted. If │ * interrupts and see if any get posted. If
* an interrupt was posted to the bus, the │ * an interrupt was posted to the bus, the
* test failed. │ * test failed.
*/ │ */
adapter->test_icr = 0; │ adapter->test_icr = 0;
ew32(IMC, ~mask & 0x00007FFF); │ ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF); │ ew32(ICS, ~mask & 0x00007FFF);
E1000_WRITE_FLUSH(); │ e1e_flush();
msleep(10); │ usleep_range(10000, 11000);
│
if (adapter->test_icr) { │ if (adapter->test_icr) {
*data = 5; │ *data = 5;
break; │ break;
} │ }
} │ }
} │
next prev up linux/drivers/net/ethernet/amd/a2065.c:268 │ linux/drivers/net/ethernet/amd/7990.c:300
│
│
/* We got an incomplete frame? */ │ /* We got an incomplete frame? */
if ((bits & LE_R1_POK) != LE_R1_POK) { │ if ((bits & LE_R1_POK) != LE_R1_POK) {
dev->stats.rx_over_errors++; │ dev->stats.rx_over_errors++;
dev->stats.rx_errors++; │ dev->stats.rx_errors++;
continue; │ continue;
} else if (bits & LE_R1_ERR) { │ } else if (bits & LE_R1_ERR) {
/* Count only the end frame as a rx error, │ /* Count only the end frame as a rx error,
* not the beginning │ * not the beginning
*/ │ */
if (bits & LE_R1_BUF) │ if (bits & LE_R1_BUF)
dev->stats.rx_fifo_errors++; │ dev->stats.rx_fifo_errors++;
if (bits & LE_R1_CRC) │ if (bits & LE_R1_CRC)
dev->stats.rx_crc_errors++; │ dev->stats.rx_crc_errors++;
if (bits & LE_R1_OFL) │ if (bits & LE_R1_OFL)
dev->stats.rx_over_errors++; │ dev->stats.rx_over_errors++;
if (bits & LE_R1_FRA) │ if (bits & LE_R1_FRA)
dev->stats.rx_frame_errors++; │ dev->stats.rx_frame_errors++;
if (bits & LE_R1_EOP) │ if (bits & LE_R1_EOP)
dev->stats.rx_errors++; │ dev->stats.rx_errors++;
} else { │ } else {
int len = (rd->mblength & 0xfff) - 4; │ int len = (rd->mblength & 0xfff) - 4;
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); │ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
│
if (!skb) { │ if (!skb) {
dev->stats.rx_dropped++; │ dev->stats.rx_dropped++;
rd->mblength = 0; │ rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN; │ rd->rmd1_bits = LE_R1_OWN;
lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; │ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
return 0; │ return 0;
} │ }
│
skb_reserve(skb, 2); /* 16 byte align */ │ skb_reserve(skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */ │ skb_put(skb, len); /* make room */
skb_copy_to_linear_data(skb, │ skb_copy_to_linear_data(skb,
(unsigned char *)&ib->rx_buf[lp->rx_new][0], │ (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
len); │ len);
skb->protocol = eth_type_trans(skb, dev); │ skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); │ netif_rx(skb);
dev->stats.rx_packets++; │ dev->stats.rx_packets++;
dev->stats.rx_bytes += len; │ dev->stats.rx_bytes += len;
} │ }
│
/* Return the packet to the pool */ │ /* Return the packet to the pool */
rd->mblength = 0; │ rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN; │ rd->rmd1_bits = LE_R1_OWN;
lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; │ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:8219 │ linux/drivers/net/ethernet/intel/igc/igc_main.c:2779
│
struct e1000_hw *hw = &adapter->hw; │ struct igc_hw *hw = &adapter->hw;
│
/* Detect a transmit hang in hardware, this serializes the │ /* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i │ * check with the clearing of time_stamp and movement of i
*/ │ */
clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); │ clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
if (tx_buffer->next_to_watch && │ if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp + │ time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) && │ (adapter->tx_timeout_factor * HZ)) &&
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { │ !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
│
/* detected Tx unit hang */ │ /* detected Tx unit hang */
dev_err(tx_ring->dev, │ netdev_err(tx_ring->netdev,
"Detected Tx Unit Hang\n" │ "Detected Tx Unit Hang\n"
" Tx Queue <%d>\n" │ " Tx Queue <%d>\n"
" TDH <%x>\n" │ " TDH <%x>\n"
" TDT <%x>\n" │ " TDT <%x>\n"
" next_to_use <%x>\n" │ " next_to_use <%x>\n"
" next_to_clean <%x>\n" │ " next_to_clean <%x>\n"
"buffer_info[next_to_clean]\n" │ "buffer_info[next_to_clean]\n"
" time_stamp <%lx>\n" │ " time_stamp <%lx>\n"
" next_to_watch <%p>\n" │ " next_to_watch <%p>\n"
" jiffies <%lx>\n" │ " jiffies <%lx>\n"
" desc.status <%x>\n", │ " desc.status <%x>\n",
tx_ring->queue_index, │ tx_ring->queue_index,
rd32(E1000_TDH(tx_ring->reg_idx)), │ rd32(IGC_TDH(tx_ring->reg_idx)),
readl(tx_ring->tail), │ readl(tx_ring->tail),
tx_ring->next_to_use, │ tx_ring->next_to_use,
tx_ring->next_to_clean, │ tx_ring->next_to_clean,
tx_buffer->time_stamp, │ tx_buffer->time_stamp,
tx_buffer->next_to_watch, │ tx_buffer->next_to_watch,
jiffies, │ jiffies,
tx_buffer->next_to_watch->wb.status); │ tx_buffer->next_to_watch->wb.status);
netif_stop_subqueue(tx_ring->netdev, │ netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index); │ tx_ring->queue_index);
│
/* we are about to reset, no point in enabling stuff */ │ /* we are about to reset, no point in enabling stuff */
return true; │ return true;
} │ }
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c:425 │ linux/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c:147
│
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); │ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *outer_headers_c; │ void *outer_headers_c;
int ix = 0; │ int ix = 0;
u32 *in; │ u32 *in;
int err; │ int err;
u8 *mc; │ u8 *mc;
│
ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); │ ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
in = kvzalloc(inlen, GFP_KERNEL); │ in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) { │ if (!in || !ft->g) {
kfree(ft->g); │ kfree(ft->g);
kvfree(in); │ kvfree(in);
return -ENOMEM; │ return -ENOMEM;
} │ }
│
/* Match on ethertype */ │
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); │ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); │ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype); │ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
│ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version);
│
│ switch (type) {
│ case FS_IPV4_UDP:
│ case FS_IPV6_UDP:
│ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
│ break;
│ default:
│ err = -EINVAL;
│ goto out;
│ }
│ /* Match on udp protocol, Ipv4/6 and dport */
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); │ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix); │ MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_FS_ANY_GROUP1_SIZE; │ ix += MLX5E_FS_UDP_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); │ MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); │ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) │ if (IS_ERR(ft->g[ft->num_groups]))
goto err; │ goto err;
ft->num_groups++; │ ft->num_groups++;
│
/* Default Flow Group */ │ /* Default Flow Group */
memset(in, 0, inlen); │ memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix); │ MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_FS_ANY_GROUP2_SIZE; │ ix += MLX5E_FS_UDP_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1); │ MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); │ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups])) │ if (IS_ERR(ft->g[ft->num_groups]))
goto err; │ goto err;
ft->num_groups++; │ ft->num_groups++;
│
kvfree(in); │ kvfree(in);
return 0; │ return 0;
│
err: │ err:
err = PTR_ERR(ft->g[ft->num_groups]); │ err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL; │ ft->g[ft->num_groups] = NULL;
│ out:
kvfree(in); │ kvfree(in);
│
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c:516 │ linux/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c:751
│
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); │ u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
│ u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; │ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
│
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx); │ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
│
hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); │ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
│
hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, │ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
aq_ring->idx); │ aq_ring->idx);
│
hw_atl_reg_rx_dma_desc_base_addressmswset(self, │ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
dma_desc_addr_msw, │ dma_desc_addr_msw, aq_ring->idx);
aq_ring->idx); │
│
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); │ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
│
hw_atl_rdm_rx_desc_data_buff_size_set(self, │ hw_atl_rdm_rx_desc_data_buff_size_set(self,
AQ_CFG_RX_FRAME_MAX / 1024U, │ AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx); │ aq_ring->idx);
│
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); │ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); │ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); │ hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping,
│ aq_ring->idx);
│
/* Rx ring set mode */ │ /* Rx ring set mode */
│
/* Mapping interrupt vector */ │ /* Mapping interrupt vector */
hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); │ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx); │ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
│
hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); │ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); │ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); │ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); │ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
│
return aq_hw_err_from_flags(self); │ return aq_hw_err_from_flags(self);
} │
next prev up linux/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c:2142 │ linux/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c:1560
│
mbx->mbx_reg = FM10K_GMBX; │ /* initialize registers */
mbx->mbmem_reg = FM10K_MBMEM_PF(0); │ switch (hw->mac.type) {
│ case fm10k_mac_vf:
│ mbx->mbx_reg = FM10K_VFMBX;
│ mbx->mbmem_reg = FM10K_VFMBMEM(FM10K_VFMBMEM_VF_XOR);
│ break;
│ case fm10k_mac_pf:
│ /* there are only 64 VF <-> PF mailboxes */
│ if (id < 64) {
│ mbx->mbx_reg = FM10K_MBX(id);
│ mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0);
│ break;
│ }
│ fallthrough;
│ default:
│ return FM10K_MBX_ERR_NO_MBX;
│ }
│
/* start out in closed state */ │ /* start out in closed state */
mbx->state = FM10K_STATE_CLOSED; │ mbx->state = FM10K_STATE_CLOSED;
│
/* validate layout of handlers before assigning them */ │ /* validate layout of handlers before assigning them */
if (fm10k_mbx_validate_handlers(msg_data)) │ if (fm10k_mbx_validate_handlers(msg_data))
return FM10K_ERR_PARAM; │ return FM10K_ERR_PARAM;
│
/* initialize the message handlers */ │ /* initialize the message handlers */
mbx->msg_data = msg_data; │ mbx->msg_data = msg_data;
│
/* start mailbox as timed out and let the reset_hw call │ /* start mailbox as timed out and let the reset_hw call
* set the timeout value to begin communications │ * set the timeout value to begin communications
*/ │ */
mbx->timeout = 0; │ mbx->timeout = 0;
mbx->udelay = FM10K_MBX_INIT_DELAY; │ mbx->udelay = FM10K_MBX_INIT_DELAY;
│
│ /* initialize tail and head */
│ mbx->tail = 1;
│ mbx->head = 1;
│
│ /* initialize CRC seeds */
│ mbx->local = FM10K_MBX_CRC_SEED;
│ mbx->remote = FM10K_MBX_CRC_SEED;
│
/* Split buffer for use by Tx/Rx FIFOs */ │ /* Split buffer for use by Tx/Rx FIFOs */
mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; │ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
mbx->mbmem_len = FM10K_MBMEM_PF_XOR; │ mbx->mbmem_len = FM10K_VFMBMEM_VF_XOR;
│
/* initialize the FIFOs, sizes are in 4 byte increments */ │ /* initialize the FIFOs, sizes are in 4 byte increments */
fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE); │ fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE);
fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE], │ fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE],
FM10K_MBX_RX_BUFFER_SIZE); │ FM10K_MBX_RX_BUFFER_SIZE);
│
/* initialize function pointers */ │ /* initialize function pointers */
mbx->ops.connect = fm10k_sm_mbx_connect; │ mbx->ops.connect = fm10k_mbx_connect;
mbx->ops.disconnect = fm10k_sm_mbx_disconnect; │ mbx->ops.disconnect = fm10k_mbx_disconnect;
mbx->ops.rx_ready = fm10k_mbx_rx_ready; │ mbx->ops.rx_ready = fm10k_mbx_rx_ready;
mbx->ops.tx_ready = fm10k_mbx_tx_ready; │ mbx->ops.tx_ready = fm10k_mbx_tx_ready;
mbx->ops.tx_complete = fm10k_mbx_tx_complete; │ mbx->ops.tx_complete = fm10k_mbx_tx_complete;
mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx; │ mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx;
mbx->ops.process = fm10k_sm_mbx_process; │ mbx->ops.process = fm10k_mbx_process;
mbx->ops.register_handlers = fm10k_mbx_register_handlers; │ mbx->ops.register_handlers = fm10k_mbx_register_handlers;
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c:597 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c:699
│
u32 base_add_l, base_add_h; │ u32 base_add_l, base_add_h;
u32 j = 0; │ u32 j = 0;
│
sprintf(result[j++], "%u", index); │ sprintf(result[j++], "%u", index);
│ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
│ HNS3_RING_TX_RING_BD_NUM_REG));
│
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + │ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG)); │ HNS3_RING_TX_RING_TC_REG));
│
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + │ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG)); │ HNS3_RING_TX_RING_TAIL_REG));
│
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + │ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG)); │ HNS3_RING_TX_RING_HEAD_REG));
│
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + │ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG)); │ HNS3_RING_TX_RING_FBDNUM_REG));
│
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + │ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG)); │ HNS3_RING_TX_RING_OFFSET_REG));
│
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + │ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG)); │ HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
sprintf(result[j++], "%u", ring->rx_copybreak); │
│
sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + │ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off"); │ HNS3_RING_EN_REG) ? "on" : "off");
│
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) │ if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + │ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_EN_REG) ? "on" : "off"); │ HNS3_RING_TX_EN_REG) ? "on" : "off");
else │ else
sprintf(result[j++], "%s", "NA"); │ sprintf(result[j++], "%s", "NA");
│
base_add_h = readl_relaxed(ring->tqp->io_base + │ base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG); │ HNS3_RING_TX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base + │ base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_L_REG); │ HNS3_RING_TX_RING_BASEADDR_L_REG);
sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l); │ sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
} │
next prev up linux/drivers/net/ethernet/ni/nixge.c:1083 │ linux/drivers/net/ethernet/ni/nixge.c:1132
│
struct nixge_priv *priv = bus->priv; │ struct nixge_priv *priv = bus->priv;
u32 status, tmp; │ u32 status, tmp;
int err; │
u16 device; │ u16 device;
│ int err;
│
if (reg & MII_ADDR_C45) { │ if (reg & MII_ADDR_C45) {
device = (reg >> 16) & 0x1f; │ device = (reg >> 16) & 0x1f;
│
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); │ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
│
tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) │ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
| NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); │ | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
│
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); │ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); │ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
│
err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, │ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
!status, 10, 1000); │ !status, 10, 1000);
if (err) { │ if (err) {
dev_err(priv->dev, "timeout setting address"); │ dev_err(priv->dev, "timeout setting address");
return err; │ return err;
} │ }
│
tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) | │ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); │ | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
│
│ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
│ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
│ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
│ !status, 10, 1000);
│ if (err)
│ dev_err(priv->dev, "timeout setting write command");
} else { │ } else {
device = reg & 0x1f; │ device = reg & 0x1f;
│
tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) | │ tmp = NIXGE_MDIO_CLAUSE22 |
│ NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); │ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
} │
│
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); │ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); │ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
│ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
│
err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, │ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
!status, 10, 1000); │ !status, 10, 1000);
if (err) { │ if (err)
dev_err(priv->dev, "timeout setting read command"); │ dev_err(priv->dev, "timeout setting write command");
return err; │
} │ }
│
status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA); │ return err;
│
return status; │
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/nfd3/dp.c:1085 │ linux/drivers/net/ethernet/netronome/nfp/nfdk/dp.c:1209
│
struct nfp_net_r_vector *r_vec = │ struct nfp_net_r_vector *r_vec =
container_of(napi, struct nfp_net_r_vector, napi); │ container_of(napi, struct nfp_net_r_vector, napi);
unsigned int pkts_polled = 0; │ unsigned int pkts_polled = 0;
│
if (r_vec->tx_ring) │ if (r_vec->tx_ring)
nfp_nfd3_tx_complete(r_vec->tx_ring, budget); │ nfp_nfdk_tx_complete(r_vec->tx_ring, budget);
if (r_vec->rx_ring) │ if (r_vec->rx_ring)
pkts_polled = nfp_nfd3_rx(r_vec->rx_ring, budget); │ pkts_polled = nfp_nfdk_rx(r_vec->rx_ring, budget);
│
if (pkts_polled < budget) │ if (pkts_polled < budget)
if (napi_complete_done(napi, pkts_polled)) │ if (napi_complete_done(napi, pkts_polled))
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); │ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
│
if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) { │ if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {}; │ struct dim_sample dim_sample = {};
unsigned int start; │ unsigned int start;
u64 pkts, bytes; │ u64 pkts, bytes;
│
do { │ do {
start = u64_stats_fetch_begin(&r_vec->rx_sync); │ start = u64_stats_fetch_begin(&r_vec->rx_sync);
pkts = r_vec->rx_pkts; │ pkts = r_vec->rx_pkts;
bytes = r_vec->rx_bytes; │ bytes = r_vec->rx_bytes;
} while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); │ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
│
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); │ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->rx_dim, dim_sample); │ net_dim(&r_vec->rx_dim, dim_sample);
} │ }
│
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) { │ if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {}; │ struct dim_sample dim_sample = {};
unsigned int start; │ unsigned int start;
u64 pkts, bytes; │ u64 pkts, bytes;
│
do { │ do {
start = u64_stats_fetch_begin(&r_vec->tx_sync); │ start = u64_stats_fetch_begin(&r_vec->tx_sync);
pkts = r_vec->tx_pkts; │ pkts = r_vec->tx_pkts;
bytes = r_vec->tx_bytes; │ bytes = r_vec->tx_bytes;
} while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); │ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
│
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); │ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->tx_dim, dim_sample); │ net_dim(&r_vec->tx_dim, dim_sample);
} │ }
│
return pkts_polled; │ return pkts_polled;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c:488 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c:187
│
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); │ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); │ u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
char rtdp_pl[MLXSW_REG_RTDP_LEN]; │ char rtdp_pl[MLXSW_REG_RTDP_LEN];
struct __ip6_tnl_parm parms; │ struct ip_tunnel_parm parms;
unsigned int type_check; │ unsigned int type_check;
bool has_ikey; │ bool has_ikey;
│ u32 daddr4;
u32 ikey; │ u32 ikey;
│
parms = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); │ parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
has_ikey = mlxsw_sp_ipip_parms6_has_ikey(&parms); │ has_ikey = mlxsw_sp_ipip_parms4_has_ikey(&parms);
ikey = mlxsw_sp_ipip_parms6_ikey(&parms); │ ikey = mlxsw_sp_ipip_parms4_ikey(&parms);
│
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); │ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id); │ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
│
type_check = has_ikey ? │ type_check = has_ikey ?
MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY : │ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY :
MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE; │ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE;
│
/* Linux demuxes tunnels based on packet SIP (which must match tunnel │ /* Linux demuxes tunnels based on packet SIP (which must match tunnel
* remote IP). Thus configure decap so that it filters out packets that │ * remote IP). Thus configure decap so that it filters out packets that
* are not IPv6 or have the wrong SIP. IPIP_DECAP_ERROR trap is │ * are not IPv4 or have the wrong SIP. IPIP_DECAP_ERROR trap is
* generated for packets that fail this criterion. Linux then handles │ * generated for packets that fail this criterion. Linux then handles
* such packets in slow path and generates ICMP destination unreachable. │ * such packets in slow path and generates ICMP destination unreachable.
*/ │ */
mlxsw_reg_rtdp_ipip6_pack(rtdp_pl, rif_index, │ daddr4 = be32_to_cpu(mlxsw_sp_ipip_netdev_daddr4(ipip_entry->ol_dev));
MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6, │ mlxsw_reg_rtdp_ipip4_pack(rtdp_pl, rif_index,
type_check, has_ikey, │ MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV4,
ipip_entry->dip_kvdl_index, ikey); │ type_check, has_ikey, daddr4, ikey);
│
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); │ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
} │
next prev up linux/drivers/net/ethernet/alteon/acenic.c:1623 │ linux/drivers/net/ethernet/alteon/acenic.c:1743
│
struct ace_private *ap = netdev_priv(dev); │ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs; │ struct ace_regs __iomem *regs = ap->regs;
short i, idx; │ short i, idx;
│
│ idx = ap->rx_jumbo_skbprd;
prefetchw(&ap->cur_rx_bufs); │
│
idx = ap->rx_std_skbprd; │
│
for (i = 0; i < nr_bufs; i++) { │ for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb; │ struct sk_buff *skb;
struct rx_desc *rd; │ struct rx_desc *rd;
dma_addr_t mapping; │ dma_addr_t mapping;
│
skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE); │ skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
if (!skb) │ if (!skb)
break; │ break;
│
mapping = dma_map_page(&ap->pdev->dev, │ mapping = dma_map_page(&ap->pdev->dev,
virt_to_page(skb->data), │ virt_to_page(skb->data),
offset_in_page(skb->data), │ offset_in_page(skb->data),
ACE_STD_BUFSIZE, DMA_FROM_DEVICE); │ ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb; │ ap->skb->rx_jumbo_skbuff[idx].skb = skb;
dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], │ dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
mapping, mapping); │ mapping, mapping);
│
rd = &ap->rx_std_ring[idx]; │ rd = &ap->rx_jumbo_ring[idx];
set_aceaddr(&rd->addr, mapping); │ set_aceaddr(&rd->addr, mapping);
rd->size = ACE_STD_BUFSIZE; │ rd->size = ACE_JUMBO_BUFSIZE;
rd->idx = idx; │ rd->idx = idx;
idx = (idx + 1) % RX_STD_RING_ENTRIES; │ idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
} │ }
│
if (!i) │ if (!i)
goto error_out; │ goto error_out;
│
atomic_add(i, &ap->cur_rx_bufs); │ atomic_add(i, &ap->cur_jumbo_bufs);
ap->rx_std_skbprd = idx; │ ap->rx_jumbo_skbprd = idx;
│
if (ACE_IS_TIGON_I(ap)) { │ if (ACE_IS_TIGON_I(ap)) {
struct cmd cmd; │ struct cmd cmd;
cmd.evt = C_SET_RX_PRD_IDX; │ cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
cmd.code = 0; │ cmd.code = 0;
cmd.idx = ap->rx_std_skbprd; │ cmd.idx = ap->rx_jumbo_skbprd;
ace_issue_cmd(regs, &cmd); │ ace_issue_cmd(regs, &cmd);
} else { │ } else {
writel(idx, ®s->RxStdPrd); │ writel(idx, ®s->RxJumboPrd);
wmb(); │ wmb();
} │ }
│
out: │ out:
clear_bit(0, &ap->std_refill_busy); │ clear_bit(0, &ap->jumbo_refill_busy);
return; │ return;
│
error_out: │ error_out:
printk(KERN_INFO "Out of memory when allocating " │ if (net_ratelimit())
"standard receive buffers\n"); │ printk(KERN_INFO "Out of memory when allocating "
│ "jumbo receive buffers\n");
goto out; │ goto out;
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/nic.c:330 │ linux/drivers/net/ethernet/sfc/nic.c:342
│
/* DRIVER is not used */ │ /* DRIVER is not used */
/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ │ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
REGISTER_TABLE_BB(TX_IPFIL_TBL), │ REGISTER_TABLE_BB(TX_IPFIL_TBL),
REGISTER_TABLE_BB(TX_SRC_MAC_TBL), │ REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), │ REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), │ REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), │ REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), │ REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), │ REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), │ REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
/* We can't reasonably read all of the buffer table (up to 8MB!). │ /* We can't reasonably read all of the buffer table (up to 8MB!).
* However this driver will only use a few entries. Reading │ * However this driver will only use a few entries. Reading
* 1K entries allows for some expansion of queue count and │ * 1K entries allows for some expansion of queue count and
* size before we need to change the version. */ │ * size before we need to change the version. */
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, │ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
F, A, A, 8, 1024), │ F, A, A, 8, 1024),
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, │ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
F, B, Z, 8, 1024), │ F, B, Z, 8, 1024),
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), │ REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
REGISTER_TABLE_BB_CZ(TIMER_TBL), │ REGISTER_TABLE_BB_CZ(TIMER_TBL),
REGISTER_TABLE_BB_CZ(TX_PACE_TBL), │ REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), │ REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
/* TX_FILTER_TBL0 is huge and not used by this driver */ │ /* TX_FILTER_TBL0 is huge and not used by this driver */
REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), │ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
REGISTER_TABLE_CZ(MC_TREG_SMEM), │ REGISTER_TABLE_CZ(MC_TREG_SMEM),
/* MSIX_PBA_TABLE is not mapped */ │ /* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ │ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
REGISTER_TABLE_BZ(RX_FILTER_TBL0), │ REGISTER_TABLE_BZ(RX_FILTER_TBL0),
│ REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c:1334 │ linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c:1296
│
struct cpl_t6_act_open_req *t6req = NULL; │ struct cpl_t6_act_open_req6 *t6req = NULL;
struct cpl_act_open_req *req = NULL; │ struct cpl_act_open_req6 *req = NULL;
│
t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req)); │ t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
INIT_TP_WR(t6req, 0); │ INIT_TP_WR(t6req, 0);
req = (struct cpl_act_open_req *)t6req; │ req = (struct cpl_act_open_req6 *)t6req;
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid)); │ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
req->local_port = cpu_to_be16(f->fs.val.lport); │ req->local_port = cpu_to_be16(f->fs.val.lport);
req->peer_port = cpu_to_be16(f->fs.val.fport); │ req->peer_port = cpu_to_be16(f->fs.val.fport);
memcpy(&req->local_ip, f->fs.val.lip, 4); │ req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
memcpy(&req->peer_ip, f->fs.val.fip, 4); │ req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
│ req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
│ req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE || │ req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) | │ f->fs.newvlan == VLAN_REWRITE) |
DELACK_V(f->fs.hitcnts) | │ DELACK_V(f->fs.hitcnts) |
L2T_IDX_V(f->l2t ? f->l2t->idx : 0) | │ L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
SMAC_SEL_V((cxgb4_port_viid(f->dev) & │ SMAC_SEL_V((cxgb4_port_viid(f->dev) &
0x7F) << 1) | │ 0x7F) << 1) |
TX_CHAN_V(f->fs.eport) | │ TX_CHAN_V(f->fs.eport) |
NO_CONG_V(f->fs.rpttid) | │ NO_CONG_V(f->fs.rpttid) |
ULP_MODE_V(f->fs.nat_mode ? │ ULP_MODE_V(f->fs.nat_mode ?
ULP_MODE_TCPDDP : ULP_MODE_NONE) | │ ULP_MODE_TCPDDP : ULP_MODE_NONE) |
TCAM_BYPASS_F | NON_OFFLOAD_F); │ TCAM_BYPASS_F | NON_OFFLOAD_F);
│
t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs, │ t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
f->dev))); │ f->dev)));
t6req->opt2 = htonl(RSS_QUEUE_VALID_F | │ t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
RSS_QUEUE_V(f->fs.iq) | │ RSS_QUEUE_V(f->fs.iq) |
TX_QUEUE_V(f->fs.nat_mode) | │ TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F | │ T5_OPT_2_VALID_F |
RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) | │ RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
PACE_V((f->fs.maskhash) | │ PACE_V((f->fs.maskhash) |
((f->fs.dirsteerhash) << 1))); │ ((f->fs.dirsteerhash) << 1)));
} │
next prev up linux/drivers/net/ethernet/sfc/rx.c:71 │ linux/drivers/net/ethernet/sfc/falcon/rx.c:477
│
struct efx_nic *efx = channel->efx; │ struct ef4_nic *efx = channel->efx;
struct sk_buff *skb; │ struct sk_buff *skb;
│
/* Allocate an SKB to store the headers */ │ /* Allocate an SKB to store the headers */
skb = netdev_alloc_skb(efx->net_dev, │ skb = netdev_alloc_skb(efx->net_dev,
efx->rx_ip_align + efx->rx_prefix_size + │ efx->rx_ip_align + efx->rx_prefix_size +
hdr_len); │ hdr_len);
if (unlikely(skb == NULL)) { │ if (unlikely(skb == NULL)) {
atomic_inc(&efx->n_rx_noskb_drops); │ atomic_inc(&efx->n_rx_noskb_drops);
return NULL; │ return NULL;
} │ }
│
EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len); │ EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
│
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, │ memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
efx->rx_prefix_size + hdr_len); │ efx->rx_prefix_size + hdr_len);
skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); │ skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
__skb_put(skb, hdr_len); │ __skb_put(skb, hdr_len);
│
/* Append the remaining page(s) onto the frag list */ │ /* Append the remaining page(s) onto the frag list */
if (rx_buf->len > hdr_len) { │ if (rx_buf->len > hdr_len) {
rx_buf->page_offset += hdr_len; │ rx_buf->page_offset += hdr_len;
rx_buf->len -= hdr_len; │ rx_buf->len -= hdr_len;
│
for (;;) { │ for (;;) {
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, │ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buf->page, rx_buf->page_offset, │ rx_buf->page, rx_buf->page_offset,
rx_buf->len, efx->rx_buffer_truesize); │ rx_buf->len);
rx_buf->page = NULL; │ rx_buf->page = NULL;
│ skb->len += rx_buf->len;
│ skb->data_len += rx_buf->len;
if (skb_shinfo(skb)->nr_frags == n_frags) │ if (skb_shinfo(skb)->nr_frags == n_frags)
break; │ break;
│
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); │ rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
} │ }
} else { │ } else {
__free_pages(rx_buf->page, efx->rx_buffer_order); │ __free_pages(rx_buf->page, efx->rx_buffer_order);
rx_buf->page = NULL; │ rx_buf->page = NULL;
n_frags = 0; │ n_frags = 0;
} │ }
│
│ skb->truesize += n_frags * efx->rx_buffer_truesize;
│
/* Move past the ethernet header */ │ /* Move past the ethernet header */
skb->protocol = eth_type_trans(skb, efx->net_dev); │ skb->protocol = eth_type_trans(skb, efx->net_dev);
│
skb_mark_napi_id(skb, &channel->napi_str); │ skb_mark_napi_id(skb, &channel->napi_str);
│
return skb; │ return skb;
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_virtchnl.c:3343 │ linux/drivers/net/ethernet/intel/ice/ice_virtchnl.c:3284
│
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; │ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vlan_supported_caps *insertion_support; │ struct virtchnl_vlan_supported_caps *insertion_support;
struct virtchnl_vlan_setting *insertion_msg = │ struct virtchnl_vlan_setting *insertion_msg =
(struct virtchnl_vlan_setting *)msg; │ (struct virtchnl_vlan_setting *)msg;
u32 ethertype_setting; │ u32 ethertype_setting;
struct ice_vsi *vsi; │ struct ice_vsi *vsi;
│
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { │ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) { │ if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
vsi = ice_get_vf_vsi(vf); │ vsi = ice_get_vf_vsi(vf);
if (!vsi) { │ if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
insertion_support = &vf->vlan_v2_caps.offloads.insertion_support; │ insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) { │ if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
ethertype_setting = insertion_msg->outer_ethertype_setting; │ ethertype_setting = insertion_msg->outer_ethertype_setting;
if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) { │ if (ethertype_setting &&
│ ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
│ ethertype_setting)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
ethertype_setting = insertion_msg->inner_ethertype_setting; │ ethertype_setting = insertion_msg->inner_ethertype_setting;
if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) { │ if (ethertype_setting &&
│ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
│ ethertype_setting)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; │ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto out; │ goto out;
} │ }
│
out: │ out:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2, │ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
v_ret, NULL, 0); │ v_ret, NULL, 0);
} │
next prev up linux/drivers/net/ethernet/sfc/nic.c:398 │ linux/drivers/net/ethernet/sfc/falcon/nic.c:385
│
const struct efx_nic_reg *reg; │ const struct ef4_nic_reg *reg;
const struct efx_nic_reg_table *table; │ const struct ef4_nic_reg_table *table;
│
for (reg = efx_nic_regs; │ for (reg = ef4_nic_regs;
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); │ reg < ef4_nic_regs + ARRAY_SIZE(ef4_nic_regs);
reg++) { │ reg++) {
if (efx->type->revision >= reg->min_revision && │ if (efx->type->revision >= reg->min_revision &&
efx->type->revision <= reg->max_revision) { │ efx->type->revision <= reg->max_revision) {
efx_reado(efx, (efx_oword_t *)buf, reg->offset); │ ef4_reado(efx, (ef4_oword_t *)buf, reg->offset);
buf += sizeof(efx_oword_t); │ buf += sizeof(ef4_oword_t);
} │ }
} │ }
│
for (table = efx_nic_reg_tables; │ for (table = ef4_nic_reg_tables;
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); │ table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
table++) { │ table++) {
size_t size, i; │ size_t size, i;
│
if (!(efx->type->revision >= table->min_revision && │ if (!(efx->type->revision >= table->min_revision &&
efx->type->revision <= table->max_revision)) │ efx->type->revision <= table->max_revision))
continue; │ continue;
│
size = min_t(size_t, table->step, 16); │ size = min_t(size_t, table->step, 16);
│
for (i = 0; i < table->rows; i++) { │ for (i = 0; i < table->rows; i++) {
switch (table->step) { │ switch (table->step) {
case 4: /* 32-bit SRAM */ │ case 4: /* 32-bit SRAM */
efx_readd(efx, buf, table->offset + 4 * i); │ ef4_readd(efx, buf, table->offset + 4 * i);
break; │ break;
case 8: /* 64-bit SRAM */ │ case 8: /* 64-bit SRAM */
efx_sram_readq(efx, │ ef4_sram_readq(efx,
efx->membase + table->offset, │ efx->membase + table->offset,
buf, i); │ buf, i);
break; │ break;
case 16: /* 128-bit-readable register */ │ case 16: /* 128-bit-readable register */
efx_reado_table(efx, buf, table->offset, i); │ ef4_reado_table(efx, buf, table->offset, i);
break; │ break;
case 32: /* 128-bit register, interleaved */ │ case 32: /* 128-bit register, interleaved */
efx_reado_table(efx, buf, table->offset, 2 * i); │ ef4_reado_table(efx, buf, table->offset, 2 * i);
break; │ break;
default: │ default:
WARN_ON(1); │ WARN_ON(1);
return; │ return;
} │ }
buf += size; │ buf += size;
} │ }
} │ }
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:1645 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c:323
│
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); │ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_comm_tqp *tqp; │ struct hclge_comm_tqp *tqp;
int i; │ int i;
│
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, │ hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclge_comm_tqp), GFP_KERNEL); │ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp) │ if (!hdev->htqp)
return -ENOMEM; │ return -ENOMEM;
│
tqp = hdev->htqp; │ tqp = hdev->htqp;
│
for (i = 0; i < hdev->num_tqps; i++) { │ for (i = 0; i < hdev->num_tqps; i++) {
tqp->dev = &hdev->pdev->dev; │ tqp->dev = &hdev->pdev->dev;
tqp->index = i; │ tqp->index = i;
│
tqp->q.ae_algo = &ae_algo; │ tqp->q.ae_algo = &ae_algovf;
tqp->q.buf_size = hdev->rx_buf_len; │ tqp->q.buf_size = hdev->rx_buf_len;
tqp->q.tx_desc_num = hdev->num_tx_desc; │ tqp->q.tx_desc_num = hdev->num_tx_desc;
tqp->q.rx_desc_num = hdev->num_rx_desc; │ tqp->q.rx_desc_num = hdev->num_rx_desc;
│
/* need an extended offset to configure queues >= │ /* need an extended offset to configure queues >=
* HCLGE_TQP_MAX_SIZE_DEV_V2 │ * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
*/ │ */
if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) │ if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
tqp->q.io_base = hdev->hw.hw.io_base + │ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET + │ HCLGEVF_TQP_REG_OFFSET +
i * HCLGE_TQP_REG_SIZE; │ i * HCLGEVF_TQP_REG_SIZE;
else │ else
tqp->q.io_base = hdev->hw.hw.io_base + │ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET + │ HCLGEVF_TQP_REG_OFFSET +
HCLGE_TQP_EXT_REG_OFFSET + │ HCLGEVF_TQP_EXT_REG_OFFSET +
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) * │ (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
HCLGE_TQP_REG_SIZE; │ HCLGEVF_TQP_REG_SIZE;
│
/* when device supports tx push and has device memory, │ /* when device supports tx push and has device memory,
* the queue can execute push mode or doorbell mode on │ * the queue can execute push mode or doorbell mode on
* device memory. │ * device memory.
*/ │ */
if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) │ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
tqp->q.mem_base = hdev->hw.hw.mem_base + │ tqp->q.mem_base = hdev->hw.hw.mem_base +
HCLGE_TQP_MEM_OFFSET(hdev, i); │ HCLGEVF_TQP_MEM_OFFSET(hdev, i);
│
tqp++; │ tqp++;
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:561 │ linux/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:499
│
│ struct mlx5e_txqsq *icosq = ctx;
struct mlx5_rsc_key key = {}; │ struct mlx5_rsc_key key = {};
struct mlx5e_rq *rq = ctx; │
int err; │ int err;
│
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) │ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0; │ return 0;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
if (err) │ if (err)
return err; │ return err;
│
key.size = PAGE_SIZE; │ key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL; │ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); │ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
if (err) │ if (err)
return err; │ return err;
│
key.rsc = MLX5_SGMT_TYPE_FULL_QPC; │ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = rq->rqn; │ key.index1 = icosq->sqn;
key.num_of_obj1 = 1; │ key.num_of_obj1 = 1;
│
err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); │ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
if (err) │ if (err)
return err; │ return err;
│
key.rsc = MLX5_SGMT_TYPE_RCV_BUFF; │ key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
key.num_of_obj2 = MLX5_RSC_DUMP_ALL; │ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
│
err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); │ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) │ if (err)
return err; │ return err;
│
return mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c:410 │ linux/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:499
│
│ struct mlx5e_txqsq *icosq = ctx;
struct mlx5_rsc_key key = {}; │ struct mlx5_rsc_key key = {};
struct mlx5e_txqsq *sq = ctx; │
int err; │ int err;
│
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) │ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0; │ return 0;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
if (err) │ if (err)
return err; │ return err;
│
key.size = PAGE_SIZE; │ key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; │ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); │ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
if (err) │ if (err)
return err; │ return err;
│
key.rsc = MLX5_SGMT_TYPE_FULL_QPC; │ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = sq->sqn; │ key.index1 = icosq->sqn;
key.num_of_obj1 = 1; │ key.num_of_obj1 = 1;
│
err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); │ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
if (err) │ if (err)
return err; │ return err;
│
key.rsc = MLX5_SGMT_TYPE_SND_BUFF; │ key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
key.num_of_obj2 = MLX5_RSC_DUMP_ALL; │ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
│
err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); │ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) │ if (err)
return err; │ return err;
│
return mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c:410 │ linux/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c:561
│
struct mlx5_rsc_key key = {}; │ struct mlx5_rsc_key key = {};
struct mlx5e_txqsq *sq = ctx; │ struct mlx5e_rq *rq = ctx;
int err; │ int err;
│
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) │ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0; │ return 0;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
if (err) │ if (err)
return err; │ return err;
│
key.size = PAGE_SIZE; │ key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; │ key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); │ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
if (err) │ if (err)
return err; │ return err;
│
key.rsc = MLX5_SGMT_TYPE_FULL_QPC; │ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = sq->sqn; │ key.index1 = rq->rqn;
key.num_of_obj1 = 1; │ key.num_of_obj1 = 1;
│
err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); │ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); │ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff");
if (err) │ if (err)
return err; │ return err;
│
key.rsc = MLX5_SGMT_TYPE_SND_BUFF; │ key.rsc = MLX5_SGMT_TYPE_RCV_BUFF;
key.num_of_obj2 = MLX5_RSC_DUMP_ALL; │ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); │ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
if (err) │ if (err)
return err; │ return err;
│
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) │ if (err)
return err; │ return err;
│
return mlx5e_health_fmsg_named_obj_nest_end(fmsg); │ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c:4928 │ linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c:5064
│
struct mlx4_priv *priv = mlx4_priv(dev); │ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = │ struct mlx4_resource_tracker *tracker =
&priv->mfunc.master.res_tracker; │ &priv->mfunc.master.res_tracker;
struct list_head *mtt_list = │ struct list_head *fs_rule_list =
&tracker->slave_list[slave].res_list[RES_MTT]; │ &tracker->slave_list[slave].res_list[RES_FS_RULE];
struct res_mtt *mtt; │ struct res_fs_rule *fs_rule;
struct res_mtt *tmp; │ struct res_fs_rule *tmp;
int state; │ int state;
int base; │ u64 base;
int err; │ int err;
│
err = move_all_busy(dev, slave, RES_MTT); │ err = move_all_busy(dev, slave, RES_FS_RULE);
if (err) │ if (err)
mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for │ mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for s
slave); │ slave);
│
spin_lock_irq(mlx4_tlock(dev)); │ spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { │ list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev)); │ spin_unlock_irq(mlx4_tlock(dev));
if (mtt->com.owner == slave) { │ if (fs_rule->com.owner == slave) {
base = mtt->com.res_id; │ base = fs_rule->com.res_id;
state = mtt->com.from_state; │ state = fs_rule->com.from_state;
while (state != 0) { │ while (state != 0) {
switch (state) { │ switch (state) {
case RES_MTT_ALLOCATED: │ case RES_FS_RULE_ALLOCATED:
__mlx4_free_mtt_range(dev, base, │ /* detach rule */
mtt->order); │ err = mlx4_cmd(dev, base, 0, 0,
│ MLX4_QP_FLOW_STEERING_DETACH,
│ MLX4_CMD_TIME_CLASS_A,
│ MLX4_CMD_NATIVE);
│
spin_lock_irq(mlx4_tlock(dev)); │ spin_lock_irq(mlx4_tlock(dev));
rb_erase(&mtt->com.node, │ rb_erase(&fs_rule->com.node,
&tracker->res_tree[RES_MTT]); │ &tracker->res_tree[RES_FS_RULE]);
list_del(&mtt->com.list); │ list_del(&fs_rule->com.list);
spin_unlock_irq(mlx4_tlock(dev)); │ spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave, RES_MTT, │ kfree(fs_rule->mirr_mbox);
1 << mtt->order, 0); │ kfree(fs_rule);
kfree(mtt); │
state = 0; │ state = 0;
break; │ break;
│
default: │ default:
state = 0; │ state = 0;
} │ }
} │ }
} │ }
spin_lock_irq(mlx4_tlock(dev)); │ spin_lock_irq(mlx4_tlock(dev));
} │ }
spin_unlock_irq(mlx4_tlock(dev)); │ spin_unlock_irq(mlx4_tlock(dev));
} │
next prev up linux/drivers/net/ethernet/intel/igb/e1000_phy.c:1987 │ linux/drivers/net/ethernet/intel/igb/e1000_phy.c:2509
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; │ s32 ret_val;
u16 data; │ u16 data;
bool link; │ bool link;
│
ret_val = igb_phy_has_link(hw, 1, 0, &link); │ ret_val = igb_phy_has_link(hw, 1, 0, &link);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
if (!link) { │ if (!link) {
hw_dbg("Phy info is only valid if link is up\n"); │ hw_dbg("Phy info is only valid if link is up\n");
ret_val = -E1000_ERR_CONFIG; │ ret_val = -E1000_ERR_CONFIG;
goto out; │ goto out;
} │ }
│
phy->polarity_correction = true; │ phy->polarity_correction = true;
│
ret_val = igb_check_polarity_igp(hw); │ ret_val = igb_check_polarity_82580(hw);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); │ ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; │ phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
│
if ((data & IGP01E1000_PSSR_SPEED_MASK) == │ if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) { │ I82580_PHY_STATUS2_SPEED_1000MBPS) {
ret_val = phy->ops.get_cable_length(hw); │ ret_val = hw->phy.ops.get_cable_length(hw);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); │ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
if (ret_val) │ if (ret_val)
goto out; │ goto out;
│
phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) │ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
? e1000_1000t_rx_status_ok │ ? e1000_1000t_rx_status_ok
: e1000_1000t_rx_status_not_ok; │ : e1000_1000t_rx_status_not_ok;
│
phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) │ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
? e1000_1000t_rx_status_ok │ ? e1000_1000t_rx_status_ok
: e1000_1000t_rx_status_not_ok; │ : e1000_1000t_rx_status_not_ok;
} else { │ } else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; │ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined; │ phy->local_rx = e1000_1000t_rx_status_undefined;
phy->remote_rx = e1000_1000t_rx_status_undefined; │ phy->remote_rx = e1000_1000t_rx_status_undefined;
} │ }
│
out: │ out:
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c:1877 │ linux/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c:1823
│
u16 packed_record[4]; │ u16 packed_record[4];
int ret; │ int ret;
│
if (sa_index >= NUMROWS_EGRESSSARECORD) │ if (sc_index >= NUMROWS_EGRESSSCRECORD)
return -EINVAL; │ return -EINVAL;
│
ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 0); │ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 4);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
counters->sa_hit_drop_redirect[0] = │ counters->sc_protected_pkts[0] =
packed_record[0] | (packed_record[1] << 16); │ packed_record[0] | (packed_record[1] << 16);
counters->sa_hit_drop_redirect[1] = │ counters->sc_protected_pkts[1] =
packed_record[2] | (packed_record[3] << 16); │ packed_record[2] | (packed_record[3] << 16);
│
ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 1); │ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 5);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
counters->sa_protected2_pkts[0] = │ counters->sc_encrypted_pkts[0] =
packed_record[0] | (packed_record[1] << 16); │ packed_record[0] | (packed_record[1] << 16);
counters->sa_protected2_pkts[1] = │ counters->sc_encrypted_pkts[1] =
packed_record[2] | (packed_record[3] << 16); │ packed_record[2] | (packed_record[3] << 16);
│
ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 2); │ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 6);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
counters->sa_protected_pkts[0] = │ counters->sc_protected_octets[0] =
packed_record[0] | (packed_record[1] << 16); │ packed_record[0] | (packed_record[1] << 16);
counters->sa_protected_pkts[1] = │ counters->sc_protected_octets[1] =
packed_record[2] | (packed_record[3] << 16); │ packed_record[2] | (packed_record[3] << 16);
│
ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 3); │ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 7);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
counters->sa_encrypted_pkts[0] = │ counters->sc_encrypted_octets[0] =
packed_record[0] | (packed_record[1] << 16); │ packed_record[0] | (packed_record[1] << 16);
counters->sa_encrypted_pkts[1] = │ counters->sc_encrypted_octets[1] =
packed_record[2] | (packed_record[3] << 16); │ packed_record[2] | (packed_record[3] << 16);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/nfd3/dp.c:622 │ linux/drivers/net/ethernet/netronome/nfp/nfdk/dp.c:652
│
skb_checksum_none_assert(skb); │ skb_checksum_none_assert(skb);
│
if (!(dp->netdev->features & NETIF_F_RXCSUM)) │ if (!(dp->netdev->features & NETIF_F_RXCSUM))
return; │ return;
│
if (meta->csum_type) { │ if (meta->csum_type) {
skb->ip_summed = meta->csum_type; │ skb->ip_summed = meta->csum_type;
skb->csum = meta->csum; │ skb->csum = meta->csum;
u64_stats_update_begin(&r_vec->rx_sync); │ u64_stats_update_begin(&r_vec->rx_sync);
r_vec->hw_csum_rx_complete++; │ r_vec->hw_csum_rx_complete++;
u64_stats_update_end(&r_vec->rx_sync); │ u64_stats_update_end(&r_vec->rx_sync);
return; │ return;
} │ }
│
if (nfp_nfd3_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { │ if (nfp_nfdk_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
u64_stats_update_begin(&r_vec->rx_sync); │ u64_stats_update_begin(&r_vec->rx_sync);
r_vec->hw_csum_rx_error++; │ r_vec->hw_csum_rx_error++;
u64_stats_update_end(&r_vec->rx_sync); │ u64_stats_update_end(&r_vec->rx_sync);
return; │ return;
} │ }
│
/* Assume that the firmware will never report inner CSUM_OK unless outer │ /* Assume that the firmware will never report inner CSUM_OK unless outer
* L4 headers were successfully parsed. FW will always report zero UDP │ * L4 headers were successfully parsed. FW will always report zero UDP
* checksum as CSUM_OK. │ * checksum as CSUM_OK.
*/ │ */
if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK || │ if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) { │ rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
__skb_incr_checksum_unnecessary(skb); │ __skb_incr_checksum_unnecessary(skb);
u64_stats_update_begin(&r_vec->rx_sync); │ u64_stats_update_begin(&r_vec->rx_sync);
r_vec->hw_csum_rx_ok++; │ r_vec->hw_csum_rx_ok++;
u64_stats_update_end(&r_vec->rx_sync); │ u64_stats_update_end(&r_vec->rx_sync);
} │ }
│
if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK || │ if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) { │ rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
__skb_incr_checksum_unnecessary(skb); │ __skb_incr_checksum_unnecessary(skb);
u64_stats_update_begin(&r_vec->rx_sync); │ u64_stats_update_begin(&r_vec->rx_sync);
r_vec->hw_csum_rx_inner_ok++; │ r_vec->hw_csum_rx_inner_ok++;
u64_stats_update_end(&r_vec->rx_sync); │ u64_stats_update_end(&r_vec->rx_sync);
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/i40e/i40e_debugfs.c:289 │ linux/drivers/net/ethernet/intel/i40e/i40e_debugfs.c:327
│
struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); │ for (i = 0; i < vsi->num_queue_pairs; i++) {
│ struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
│
if (!tx_ring) │ if (!xdp_ring)
continue; │ continue;
│
dev_info(&pf->pdev->dev, │ dev_info(&pf->pdev->dev,
" tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\ │ " xdp_rings[%i]: state = %lu, queue_index = %d, reg_
i, *tx_ring->state, │ i, *xdp_ring->state,
tx_ring->queue_index, │ xdp_ring->queue_index,
tx_ring->reg_idx); │ xdp_ring->reg_idx);
dev_info(&pf->pdev->dev, │ dev_info(&pf->pdev->dev,
" tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_a │ " xdp_rings[%i]: next_to_use = %d, next_to_clean = %
i, │ i,
tx_ring->next_to_use, │ xdp_ring->next_to_use,
tx_ring->next_to_clean, │ xdp_ring->next_to_clean,
tx_ring->ring_active); │ xdp_ring->ring_active);
dev_info(&pf->pdev->dev, │ dev_info(&pf->pdev->dev,
" tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, rest │ " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %
i, tx_ring->stats.packets, │ i, xdp_ring->stats.packets,
tx_ring->stats.bytes, │ xdp_ring->stats.bytes,
tx_ring->tx_stats.restart_queue); │ xdp_ring->tx_stats.restart_queue);
dev_info(&pf->pdev->dev, │ dev_info(&pf->pdev->dev,
" tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld │ " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_o
i, │ i,
tx_ring->tx_stats.tx_busy, │ xdp_ring->tx_stats.tx_busy,
tx_ring->tx_stats.tx_done_old); │ xdp_ring->tx_stats.tx_done_old);
dev_info(&pf->pdev->dev, │ dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i\n", │ " xdp_rings[%i]: size = %i\n",
i, tx_ring->size); │ i, xdp_ring->size);
dev_info(&pf->pdev->dev, │ dev_info(&pf->pdev->dev,
" tx_rings[%i]: DCB tc = %d\n", │ " xdp_rings[%i]: DCB tc = %d\n",
i, tx_ring->dcb_tc); │ i, xdp_ring->dcb_tc);
dev_info(&pf->pdev->dev, │ dev_info(&pf->pdev->dev,
" tx_rings[%i]: itr_setting = %d (%s)\n", │ " xdp_rings[%i]: itr_setting = %d (%s)\n",
i, tx_ring->itr_setting, │ i, xdp_ring->itr_setting,
ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed"); │ ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
│ "dynamic" : "fixed");
│ }
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/lio_main.c:2110 │ linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:1250
│
struct hwtstamp_config conf; │
struct lio *lio = GET_LIO(netdev); │ struct lio *lio = GET_LIO(netdev);
│ struct hwtstamp_config conf;
│
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) │ if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT; │ return -EFAULT;
│
switch (conf.tx_type) { │ switch (conf.tx_type) {
case HWTSTAMP_TX_ON: │ case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF: │ case HWTSTAMP_TX_OFF:
break; │ break;
default: │ default:
return -ERANGE; │ return -ERANGE;
} │ }
│
switch (conf.rx_filter) { │ switch (conf.rx_filter) {
case HWTSTAMP_FILTER_NONE: │ case HWTSTAMP_FILTER_NONE:
break; │ break;
case HWTSTAMP_FILTER_ALL: │ case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME: │ case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: │ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: │ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: │ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: │ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: │ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: │ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: │ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: │ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: │ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT: │ case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC: │ case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: │ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL: │ case HWTSTAMP_FILTER_NTP_ALL:
conf.rx_filter = HWTSTAMP_FILTER_ALL; │ conf.rx_filter = HWTSTAMP_FILTER_ALL;
break; │ break;
default: │ default:
return -ERANGE; │ return -ERANGE;
} │ }
│
if (conf.rx_filter == HWTSTAMP_FILTER_ALL) │ if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); │ ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
│
else │ else
ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); │ ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
│
return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; │ return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:513 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:520
│
efx_oword_t rx_desc_ptr; │ ef4_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx; │ struct ef4_nic *efx = rx_queue->efx;
│ bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0;
│ bool iscsi_digest_en = is_b0;
bool jumbo_en; │ bool jumbo_en;
│
/* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */ │ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
jumbo_en = efx->rx_scatter; │ * DMA to continue after a PCIe page boundary (and scattering
│ * is not possible). In Falcon B0 and Siena, it enables
│ * scatter.
│ */
│ jumbo_en = !is_b0 || efx->rx_scatter;
│
netif_dbg(efx, hw, efx->net_dev, │ netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n", │ "RX queue %d ring in special buffers %d-%d\n",
efx_rx_queue_index(rx_queue), rx_queue->rxd.index, │ ef4_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1); │ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
│
rx_queue->scatter_n = 0; │ rx_queue->scatter_n = 0;
│
/* Pin RX descriptor ring */ │ /* Pin RX descriptor ring */
efx_init_special_buffer(efx, &rx_queue->rxd); │ ef4_init_special_buffer(efx, &rx_queue->rxd);
│
/* Push RX descriptor ring to card */ │ /* Push RX descriptor ring to card */
EFX_POPULATE_OWORD_10(rx_desc_ptr, │ EF4_POPULATE_OWORD_10(rx_desc_ptr,
FRF_AZ_RX_ISCSI_DDIG_EN, true, │ FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
FRF_AZ_RX_ISCSI_HDIG_EN, true, │ FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, │ FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
FRF_AZ_RX_DESCQ_EVQ_ID, │ FRF_AZ_RX_DESCQ_EVQ_ID,
efx_rx_queue_channel(rx_queue)->channel, │ ef4_rx_queue_channel(rx_queue)->channel,
FRF_AZ_RX_DESCQ_OWNER_ID, 0, │ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
FRF_AZ_RX_DESCQ_LABEL, │ FRF_AZ_RX_DESCQ_LABEL,
efx_rx_queue_index(rx_queue), │ ef4_rx_queue_index(rx_queue),
FRF_AZ_RX_DESCQ_SIZE, │ FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries), │ __ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , │ FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, │ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
FRF_AZ_RX_DESCQ_EN, 1); │ FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, │ ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue)); │ ef4_rx_queue_index(rx_queue));
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:4345 │ linux/drivers/net/ethernet/intel/igc/igc_main.c:502
│
struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); │ struct net_device *ndev = rx_ring->netdev;
struct device *dev = rx_ring->dev; │ struct device *dev = rx_ring->dev;
int size, res; │ u8 index = rx_ring->queue_index;
│ int size, desc_len, res;
│
/* XDP RX-queue info */ │ /* XDP RX-queue info */
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) │ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq); │ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, │ res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
rx_ring->queue_index, 0); │ rx_ring->q_vector->napi.napi_id);
if (res < 0) { │ if (res < 0) {
dev_err(dev, "Failed to register xdp_rxq index %u\n", │ netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
rx_ring->queue_index); │ index);
return res; │ return res;
} │ }
│
size = sizeof(struct igb_rx_buffer) * rx_ring->count; │ size = sizeof(struct igc_rx_buffer) * rx_ring->count;
│ rx_ring->rx_buffer_info = vzalloc(size);
rx_ring->rx_buffer_info = vmalloc(size); │
if (!rx_ring->rx_buffer_info) │ if (!rx_ring->rx_buffer_info)
goto err; │ goto err;
│
│ desc_len = sizeof(union igc_adv_rx_desc);
│
/* Round up to nearest 4K */ │ /* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); │ rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096); │ rx_ring->size = ALIGN(rx_ring->size, 4096);
│
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, │ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL); │ &rx_ring->dma, GFP_KERNEL);
│
if (!rx_ring->desc) │ if (!rx_ring->desc)
goto err; │ goto err;
│
rx_ring->next_to_alloc = 0; │ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0; │ rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0; │ rx_ring->next_to_use = 0;
│
rx_ring->xdp_prog = adapter->xdp_prog; │
│
return 0; │ return 0;
│
err: │ err:
xdp_rxq_info_unreg(&rx_ring->xdp_rxq); │ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info); │ vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL; │ rx_ring->rx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); │ netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
return -ENOMEM; │ return -ENOMEM;
} │
next prev up linux/drivers/net/ethernet/intel/i40e/i40e_txrx.c:3509 │ linux/drivers/net/ethernet/intel/ice/ice_txrx.c:1611
│
unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; │ unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
│
if (dma_mapping_error(tx_ring->dev, dma)) │ if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error; │ goto dma_error;
│
/* record length, and DMA address */ │ /* record length, and DMA address */
dma_unmap_len_set(tx_bi, len, size); │ dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_bi, dma, dma); │ dma_unmap_addr_set(tx_buf, dma, dma);
│
/* align size to end of page */ │ /* align size to end of page */
max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); │ max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma); │ tx_desc->buf_addr = cpu_to_le64(dma);
│
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { │ /* account for data chunks larger than the hardware
│ * can handle
│ */
│ while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz = │ tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, │ ice_build_ctob(td_cmd, td_offset, max_data,
max_data, td_tag); │ td_tag);
│
tx_desc++; │ tx_desc++;
i++; │ i++;
desc_count++; │
│
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0); │ tx_desc = ICE_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
│
dma += max_data; │ dma += max_data;
size -= max_data; │ size -= max_data;
│
max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; │ max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma); │ tx_desc->buf_addr = cpu_to_le64(dma);
} │ }
│
if (likely(!data_len)) │ if (likely(!data_len))
break; │ break;
│
tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, │ tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
size, td_tag); │ size, td_tag);
│
tx_desc++; │ tx_desc++;
i++; │ i++;
desc_count++; │
│
if (i == tx_ring->count) { │ if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0); │ tx_desc = ICE_TX_DESC(tx_ring, 0);
i = 0; │ i = 0;
} │ }
│
size = skb_frag_size(frag); │ size = skb_frag_size(frag);
data_len -= size; │ data_len -= size;
│
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, │ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE); │ DMA_TO_DEVICE);
│
tx_bi = &tx_ring->tx_bi[i]; │ tx_buf = &tx_ring->tx_buf[i];
} │
next prev up linux/drivers/net/ethernet/sfc/efx.c:205 │ linux/drivers/net/ethernet/sfc/falcon/efx.c:1157
│
struct efx_nic *other, *next; │ struct ef4_nic *other, *next;
│
if (efx->primary == efx) { │ if (efx->primary == efx) {
/* Adding primary function; look for secondaries */ │ /* Adding primary function; look for secondaries */
│
netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); │ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
list_add_tail(&efx->node, &efx_primary_list); │ list_add_tail(&efx->node, &ef4_primary_list);
│
list_for_each_entry_safe(other, next, &efx_unassociated_list, │ list_for_each_entry_safe(other, next, &ef4_unassociated_list,
node) { │ node) {
if (efx_same_controller(efx, other)) { │ if (ef4_same_controller(efx, other)) {
list_del(&other->node); │ list_del(&other->node);
netif_dbg(other, probe, other->net_dev, │ netif_dbg(other, probe, other->net_dev,
"moving to secondary list of %s %s\n", │ "moving to secondary list of %s %s\n",
pci_name(efx->pci_dev), │ pci_name(efx->pci_dev),
efx->net_dev->name); │ efx->net_dev->name);
list_add_tail(&other->node, │ list_add_tail(&other->node,
&efx->secondary_list); │ &efx->secondary_list);
other->primary = efx; │ other->primary = efx;
} │ }
} │ }
} else { │ } else {
/* Adding secondary function; look for primary */ │ /* Adding secondary function; look for primary */
│
list_for_each_entry(other, &efx_primary_list, node) { │ list_for_each_entry(other, &ef4_primary_list, node) {
if (efx_same_controller(efx, other)) { │ if (ef4_same_controller(efx, other)) {
netif_dbg(efx, probe, efx->net_dev, │ netif_dbg(efx, probe, efx->net_dev,
"adding to secondary list of %s %s\n", │ "adding to secondary list of %s %s\n",
pci_name(other->pci_dev), │ pci_name(other->pci_dev),
other->net_dev->name); │ other->net_dev->name);
list_add_tail(&efx->node, │ list_add_tail(&efx->node,
&other->secondary_list); │ &other->secondary_list);
efx->primary = other; │ efx->primary = other;
return; │ return;
} │ }
} │ }
│
netif_dbg(efx, probe, efx->net_dev, │ netif_dbg(efx, probe, efx->net_dev,
"adding to unassociated list\n"); │ "adding to unassociated list\n");
list_add_tail(&efx->node, &efx_unassociated_list); │ list_add_tail(&efx->node, &ef4_unassociated_list);
} │ }
} │
next prev up linux/drivers/net/ethernet/nvidia/forcedeth.c:1823 │ linux/drivers/net/ethernet/nvidia/forcedeth.c:1864
│
struct fe_priv *np = netdev_priv(dev); │ struct fe_priv *np = netdev_priv(dev);
struct ring_desc *less_rx; │ struct ring_desc_ex *less_rx;
│
less_rx = np->get_rx.orig; │ less_rx = np->get_rx.ex;
if (less_rx-- == np->rx_ring.orig) │ if (less_rx-- == np->rx_ring.ex)
less_rx = np->last_rx.orig; │ less_rx = np->last_rx.ex;
│
while (np->put_rx.orig != less_rx) { │ while (np->put_rx.ex != less_rx) {
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_ │ struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_
if (likely(skb)) { │ if (likely(skb)) {
np->put_rx_ctx->skb = skb; │ np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, │ np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data, │ skb->data,
skb_tailroom(skb), │ skb_tailroom(skb),
DMA_FROM_DEVICE); │ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&np->pci_dev->dev, │ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
np->put_rx_ctx->dma))) { │ np->put_rx_ctx->dma))) {
kfree_skb(skb); │ kfree_skb(skb);
goto packet_dropped; │ goto packet_dropped;
} │ }
np->put_rx_ctx->dma_len = skb_tailroom(skb); │ np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); │ np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dm
│ np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)
wmb(); │ wmb();
np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVA │ np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAI
if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) │ if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
np->put_rx.orig = np->rx_ring.orig; │ np->put_rx.ex = np->rx_ring.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) │ if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->rx_skb; │ np->put_rx_ctx = np->rx_skb;
} else { │ } else {
packet_dropped: │ packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp); │ u64_stats_update_begin(&np->swstats_rx_syncp);
nv_txrx_stats_inc(stat_rx_dropped); │ nv_txrx_stats_inc(stat_rx_dropped);
u64_stats_update_end(&np->swstats_rx_syncp); │ u64_stats_update_end(&np->swstats_rx_syncp);
return 1; │ return 1;
} │ }
} │ }
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c:3864 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:8085
│
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0; │ u32 vlan_macip_lens = 0;
u32 fceof_saidx = 0; │ u32 fceof_saidx = 0;
u32 type_tucmd = 0; │ u32 type_tucmd = 0;
│
if (skb->ip_summed != CHECKSUM_PARTIAL) │ if (skb->ip_summed != CHECKSUM_PARTIAL) {
│ csum_failed:
│ if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
│ IXGBE_TX_FLAGS_CC)))
│ return;
goto no_csum; │ goto no_csum;
│ }
│
switch (skb->csum_offset) { │ switch (skb->csum_offset) {
case offsetof(struct tcphdr, check): │ case offsetof(struct tcphdr, check):
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; │ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
fallthrough; │ fallthrough;
case offsetof(struct udphdr, check): │ case offsetof(struct udphdr, check):
break; │ break;
case offsetof(struct sctphdr, checksum): │ case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */ │ /* validate that this is actually an SCTP request */
if (skb_csum_is_sctp(skb)) { │ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; │ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break; │ break;
} │ }
fallthrough; │ fallthrough;
default: │ default:
skb_checksum_help(skb); │ skb_checksum_help(skb);
goto no_csum; │ goto csum_failed;
} │ }
│
if (first->protocol == htons(ETH_P_IP)) │
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; │
│
/* update TX checksum flag */ │ /* update TX checksum flag */
first->tx_flags |= IXGBE_TX_FLAGS_CSUM; │ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
vlan_macip_lens = skb_checksum_start_offset(skb) - │ vlan_macip_lens = skb_checksum_start_offset(skb) -
skb_network_offset(skb); │ skb_network_offset(skb);
no_csum: │ no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */ │ /* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; │ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; │ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
│
fceof_saidx |= itd->pfsa; │ fceof_saidx |= itd->sa_idx;
type_tucmd |= itd->flags | itd->trailer_len; │ type_tucmd |= itd->flags | itd->trailer_len;
│
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, │ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
fceof_saidx, type_tucmd, 0); │
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:104 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:101
│
unsigned address = 0; │ unsigned address = 0;
int i, j; │ int i, j;
efx_oword_t mask, imask, original, reg, buf; │ ef4_oword_t mask, imask, original, reg, buf;
│
for (i = 0; i < n_regs; ++i) { │ for (i = 0; i < n_regs; ++i) {
address = regs[i].address; │ address = regs[i].address;
mask = imask = regs[i].mask; │ mask = imask = regs[i].mask;
EFX_INVERT_OWORD(imask); │ EF4_INVERT_OWORD(imask);
│
efx_reado(efx, &original, address); │ ef4_reado(efx, &original, address);
│
/* bit sweep on and off */ │ /* bit sweep on and off */
for (j = 0; j < 128; j++) { │ for (j = 0; j < 128; j++) {
if (!EFX_EXTRACT_OWORD32(mask, j, j)) │ if (!EF4_EXTRACT_OWORD32(mask, j, j))
continue; │ continue;
│
/* Test this testable bit can be set in isolation */ │ /* Test this testable bit can be set in isolation */
EFX_AND_OWORD(reg, original, mask); │ EF4_AND_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 1); │ EF4_SET_OWORD32(reg, j, j, 1);
│
efx_writeo(efx, ®, address); │ ef4_writeo(efx, ®, address);
efx_reado(efx, &buf, address); │ ef4_reado(efx, &buf, address);
│
if (efx_masked_compare_oword(®, &buf, &mask)) │ if (ef4_masked_compare_oword(®, &buf, &mask))
goto fail; │ goto fail;
│
/* Test this testable bit can be cleared in isolation */ │ /* Test this testable bit can be cleared in isolation */
EFX_OR_OWORD(reg, original, mask); │ EF4_OR_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 0); │ EF4_SET_OWORD32(reg, j, j, 0);
│
efx_writeo(efx, ®, address); │ ef4_writeo(efx, ®, address);
efx_reado(efx, &buf, address); │ ef4_reado(efx, &buf, address);
│
if (efx_masked_compare_oword(®, &buf, &mask)) │ if (ef4_masked_compare_oword(®, &buf, &mask))
goto fail; │ goto fail;
} │ }
│
efx_writeo(efx, &original, address); │ ef4_writeo(efx, &original, address);
} │ }
│
return 0; │ return 0;
│
fail: │ fail:
netif_err(efx, hw, efx->net_dev, │ netif_err(efx, hw, efx->net_dev,
"wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT │ "wrote "EF4_OWORD_FMT" read "EF4_OWORD_FMT
" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), │ " at address 0x%x mask "EF4_OWORD_FMT"\n", EF4_OWORD_VAL(reg),
EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); │ EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask));
return -EIO; │ return -EIO;
} │
next prev up linux/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c:2111 │ linux/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c:1399
│
struct qlcnic_adapter *adapter = netdev_priv(netdev); │ struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring; │ struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_rds_ring *rds_ring; │ struct qlcnic_host_rds_ring *rds_ring;
int ring; │ u16 adapter_state = adapter->is_up;
│ u8 ring;
int ret; │ int ret;
│
netif_device_detach(netdev); │ netif_device_detach(netdev);
│
if (netif_running(netdev)) │ if (netif_running(netdev))
__qlcnic_down(adapter, netdev); │ __qlcnic_down(adapter, netdev);
│
qlcnic_detach(adapter); │ qlcnic_detach(adapter);
│
adapter->drv_sds_rings = QLCNIC_SINGLE_RING; │ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test; │ adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0; │ adapter->ahw->linkup = 0;
│
ret = qlcnic_attach(adapter); │ ret = qlcnic_attach(adapter);
if (ret) { │ if (ret) {
netif_device_attach(netdev); │ netif_device_attach(netdev);
return ret; │ return ret;
} │ }
│
ret = qlcnic_fw_create_ctx(adapter); │ ret = qlcnic_fw_create_ctx(adapter);
if (ret) { │ if (ret) {
qlcnic_detach(adapter); │ qlcnic_detach(adapter);
│ if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
│ adapter->drv_sds_rings = num_sds_ring;
│ qlcnic_attach(adapter);
│ }
netif_device_attach(netdev); │ netif_device_attach(netdev);
return ret; │ return ret;
} │ }
│
for (ring = 0; ring < adapter->max_rds_rings; ring++) { │ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring]; │ rds_ring = &adapter->recv_ctx->rds_rings[ring];
qlcnic_post_rx_buffers(adapter, rds_ring, ring); │ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
} │ }
│
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { │ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) { │ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring]; │ sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_enable_sds_intr(adapter, sds_ring); │ qlcnic_enable_sds_intr(adapter, sds_ring);
} │ }
} │ }
│
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { │ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
adapter->ahw->loopback_state = 0; │ adapter->ahw->loopback_state = 0;
qlcnic_linkevent_request(adapter, 1); │ adapter->ahw->hw_ops->setup_link_event(adapter, 1);
} │ }
│
set_bit(__QLCNIC_DEV_UP, &adapter->state); │ set_bit(__QLCNIC_DEV_UP, &adapter->state);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx4/fw.c:2356 │ linux/drivers/net/ethernet/mellanox/mlx4/fw.c:2258
│
struct mlx4_priv *priv = mlx4_priv(dev); │ struct mlx4_priv *priv = mlx4_priv(dev);
int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); │ int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
int err; │ int err;
│
if (port < 0) │ if (port < 0)
return -EINVAL; │ return -EINVAL;
│
if (!(priv->mfunc.master.slave_state[slave].init_port_mask & │ if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
(1 << port))) │
return 0; │ return 0;
│
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { │ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
if (priv->mfunc.master.init_port_ref[port] == 1) { │ /* Enable port only if it was previously disabled */
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, │ if (!priv->mfunc.master.init_port_ref[port]) {
│ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); │ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err) │ if (err)
return err; │ return err;
} │ }
priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); │ priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
} else { │ } else {
/* infiniband port */ │
if (slave == mlx4_master_func_num(dev)) { │ if (slave == mlx4_master_func_num(dev)) {
if (!priv->mfunc.master.qp0_state[port].qp0_active && │ if (check_qp0_state(dev, slave, port) &&
priv->mfunc.master.qp0_state[port].port_active) { │ !priv->mfunc.master.qp0_state[port].port_active) {
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, │ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); │ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err) │ if (err)
return err; │ return err;
priv->mfunc.master.slave_state[slave].init_port_mask &= │ priv->mfunc.master.qp0_state[port].port_active = 1;
priv->mfunc.master.qp0_state[port].port_active = 0; │ priv->mfunc.master.slave_state[slave].init_port_mask |=
} │ }
} else │ } else
priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << p │ priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << po
} │ }
--priv->mfunc.master.init_port_ref[port]; │ ++priv->mfunc.master.init_port_ref[port];
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:3146 │ linux/drivers/net/ethernet/intel/igb/e1000_phy.c:2509
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; │ s32 ret_val;
u16 data; │ u16 data;
bool link; │ bool link;
│
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); │ ret_val = igb_phy_has_link(hw, 1, 0, &link);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
if (!link) { │ if (!link) {
e_dbg("Phy info is only valid if link is up\n"); │ hw_dbg("Phy info is only valid if link is up\n");
return -E1000_ERR_CONFIG; │ ret_val = -E1000_ERR_CONFIG;
│ goto out;
} │ }
│
phy->polarity_correction = true; │ phy->polarity_correction = true;
│
ret_val = e1000_check_polarity_82577(hw); │ ret_val = igb_check_polarity_82580(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); │ ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); │ phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
│
if ((data & I82577_PHY_STATUS2_SPEED_MASK) == │ if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
I82577_PHY_STATUS2_SPEED_1000MBPS) { │ I82580_PHY_STATUS2_SPEED_1000MBPS) {
ret_val = hw->phy.ops.get_cable_length(hw); │ ret_val = hw->phy.ops.get_cable_length(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
ret_val = e1e_rphy(hw, MII_STAT1000, &data); │ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
phy->local_rx = (data & LPA_1000LOCALRXOK) │ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; │ ? e1000_1000t_rx_status_ok
│ : e1000_1000t_rx_status_not_ok;
phy->remote_rx = (data & LPA_1000REMRXOK) │
? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; │ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
│ ? e1000_1000t_rx_status_ok
│ : e1000_1000t_rx_status_not_ok;
} else { │ } else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; │ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined; │ phy->local_rx = e1000_1000t_rx_status_undefined;
phy->remote_rx = e1000_1000t_rx_status_undefined; │ phy->remote_rx = e1000_1000t_rx_status_undefined;
} │ }
│
return 0; │ out:
│ return ret_val;
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:317 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:307
│
struct efx_tx_buffer *buffer; │ struct ef4_tx_buffer *buffer;
efx_qword_t *txd; │ ef4_qword_t *txd;
unsigned write_ptr; │ unsigned write_ptr;
unsigned old_write_count = tx_queue->write_count; │ unsigned old_write_count = tx_queue->write_count;
│
tx_queue->xmit_pending = false; │ tx_queue->xmit_more_available = false;
if (unlikely(tx_queue->write_count == tx_queue->insert_count)) │ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
return; │ return;
│
do { │ do {
write_ptr = tx_queue->write_count & tx_queue->ptr_mask; │ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[write_ptr]; │ buffer = &tx_queue->buffer[write_ptr];
txd = efx_tx_desc(tx_queue, write_ptr); │ txd = ef4_tx_desc(tx_queue, write_ptr);
++tx_queue->write_count; │ ++tx_queue->write_count;
│
EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); │ EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION);
│
/* Create TX descriptor ring entry */ │ /* Create TX descriptor ring entry */
BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); │ BUILD_BUG_ON(EF4_TX_BUF_CONT != 1);
EFX_POPULATE_QWORD_4(*txd, │ EF4_POPULATE_QWORD_4(*txd,
FSF_AZ_TX_KER_CONT, │ FSF_AZ_TX_KER_CONT,
buffer->flags & EFX_TX_BUF_CONT, │ buffer->flags & EF4_TX_BUF_CONT,
FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, │ FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
FSF_AZ_TX_KER_BUF_REGION, 0, │ FSF_AZ_TX_KER_BUF_REGION, 0,
FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); │ FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
} while (tx_queue->write_count != tx_queue->insert_count); │ } while (tx_queue->write_count != tx_queue->insert_count);
│
wmb(); /* Ensure descriptors are written before they are fetched */ │ wmb(); /* Ensure descriptors are written before they are fetched */
│
if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { │ if (ef4_nic_may_push_tx_desc(tx_queue, old_write_count)) {
txd = efx_tx_desc(tx_queue, │ txd = ef4_tx_desc(tx_queue,
old_write_count & tx_queue->ptr_mask); │ old_write_count & tx_queue->ptr_mask);
efx_farch_push_tx_desc(tx_queue, txd); │ ef4_farch_push_tx_desc(tx_queue, txd);
++tx_queue->pushes; │ ++tx_queue->pushes;
} else { │ } else {
efx_farch_notify_tx_desc(tx_queue); │ ef4_farch_notify_tx_desc(tx_queue);
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:1952 │ linux/drivers/net/ethernet/intel/igb/e1000_phy.c:1987
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; │ s32 ret_val;
u16 data; │ u16 data;
bool link; │ bool link;
│
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); │ ret_val = igb_phy_has_link(hw, 1, 0, &link);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
if (!link) { │ if (!link) {
e_dbg("Phy info is only valid if link is up\n"); │ hw_dbg("Phy info is only valid if link is up\n");
return -E1000_ERR_CONFIG; │ ret_val = -E1000_ERR_CONFIG;
│ goto out;
} │ }
│
phy->polarity_correction = true; │ phy->polarity_correction = true;
│
ret_val = e1000_check_polarity_igp(hw); │ ret_val = igb_check_polarity_igp(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); │ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); │ phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
│
if ((data & IGP01E1000_PSSR_SPEED_MASK) == │ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) { │ IGP01E1000_PSSR_SPEED_1000MBPS) {
ret_val = phy->ops.get_cable_length(hw); │ ret_val = phy->ops.get_cable_length(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
ret_val = e1e_rphy(hw, MII_STAT1000, &data); │ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
if (ret_val) │ if (ret_val)
return ret_val; │ goto out;
│
phy->local_rx = (data & LPA_1000LOCALRXOK) │ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; │ ? e1000_1000t_rx_status_ok
│ : e1000_1000t_rx_status_not_ok;
phy->remote_rx = (data & LPA_1000REMRXOK) │
? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; │ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
│ ? e1000_1000t_rx_status_ok
│ : e1000_1000t_rx_status_not_ok;
} else { │ } else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; │ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined; │ phy->local_rx = e1000_1000t_rx_status_undefined;
phy->remote_rx = e1000_1000t_rx_status_undefined; │ phy->remote_rx = e1000_1000t_rx_status_undefined;
} │ }
│
│ out:
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:1952 │ linux/drivers/net/ethernet/intel/e1000e/phy.c:3146
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; │ s32 ret_val;
u16 data; │ u16 data;
bool link; │ bool link;
│
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); │ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
if (!link) { │ if (!link) {
e_dbg("Phy info is only valid if link is up\n"); │ e_dbg("Phy info is only valid if link is up\n");
return -E1000_ERR_CONFIG; │ return -E1000_ERR_CONFIG;
} │ }
│
phy->polarity_correction = true; │ phy->polarity_correction = true;
│
ret_val = e1000_check_polarity_igp(hw); │ ret_val = e1000_check_polarity_82577(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); │ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); │ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
│
if ((data & IGP01E1000_PSSR_SPEED_MASK) == │ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) { │ I82577_PHY_STATUS2_SPEED_1000MBPS) {
ret_val = phy->ops.get_cable_length(hw); │ ret_val = hw->phy.ops.get_cable_length(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
ret_val = e1e_rphy(hw, MII_STAT1000, &data); │ ret_val = e1e_rphy(hw, MII_STAT1000, &data);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
phy->local_rx = (data & LPA_1000LOCALRXOK) │ phy->local_rx = (data & LPA_1000LOCALRXOK)
? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; │ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
│
phy->remote_rx = (data & LPA_1000REMRXOK) │ phy->remote_rx = (data & LPA_1000REMRXOK)
? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; │ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else { │ } else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; │ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined; │ phy->local_rx = e1000_1000t_rx_status_undefined;
phy->remote_rx = e1000_1000t_rx_status_undefined; │ phy->remote_rx = e1000_1000t_rx_status_undefined;
} │ }
│
return ret_val; │ return 0;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4/sge.c:1786 │ linux/drivers/net/ethernet/chelsio/cxgb4vf/sge.c:852
│
unsigned int flits; │ unsigned int flits;
│
/* If the skb is small enough, we can pump it out as a work request │ /*
│ * If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the │ * with only immediate data. In that case we just have to have the
* TX Packet header plus the skb data in the Work Request. │ * TX Packet header plus the skb data in the Work Request.
*/ │ */
if (t4vf_is_eth_imm(skb)) │ if (is_eth_imm(skb))
return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), │ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
sizeof(__be64)); │ sizeof(__be64));
│
/* Otherwise, we're going to have to construct a Scatter gather list │ /*
│ * Otherwise, we're going to have to construct a Scatter gather list
* of the skb body and fragments. We also include the flits necessary │ * of the skb body and fragments. We also include the flits necessary
* for the TX Packet Work Request and CPL. We always have a firmware │ * for the TX Packet Work Request and CPL. We always have a firmware
* Write Header (incorporated as part of the cpl_tx_pkt_lso and │ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
* cpl_tx_pkt structures), followed by either a TX Packet Write CPL │ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
* message or, if we're doing a Large Send Offload, an LSO CPL message │ * message or, if we're doing a Large Send Offload, an LSO CPL message
* with an embedded TX Packet Write CPL message. │ * with an embedded TX Packet Write CPL message.
*/ │ */
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); │ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
if (skb_shinfo(skb)->gso_size) │ if (skb_shinfo(skb)->gso_size)
flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + │ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
sizeof(struct cpl_tx_pkt_lso_core) + │ sizeof(struct cpl_tx_pkt_lso_core) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); │ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
else │ else
flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + │ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); │ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
return flits; │ return flits;
} │
next prev up linux/drivers/net/ethernet/intel/i40e/i40e_common.c:6002 │ linux/drivers/net/ethernet/intel/i40e/i40e_common.c:6096
│
struct i40e_aq_desc desc; │ struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd = │ struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; │ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
i40e_status status; │ i40e_status status;
u16 buff_len; │ u16 buff_len;
int i; │ int i;
│
i40e_fill_default_direct_cmd_desc(&desc, │ i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters); │ i40e_aqc_opc_remove_cloud_filters);
│
buff_len = filter_count * sizeof(*filters); │ buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len); │ desc.datalen = cpu_to_le16(buff_len);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); │ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count; │ cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid); │ cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; │ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
│
for (i = 0; i < filter_count; i++) { │ for (i = 0; i < filter_count; i++) {
u16 tnl_type; │ u16 tnl_type;
u32 ti; │ u32 ti;
│
tnl_type = (le16_to_cpu(filters[i].element.flags) & │ tnl_type = (le16_to_cpu(filters[i].element.flags) &
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> │ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; │ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
│
/* Due to hardware eccentricities, the VNI for Geneve is shifted │ /* Due to hardware eccentricities, the VNI for Geneve is shifted
* one more byte further than normally used for Tenant ID in │ * one more byte further than normally used for Tenant ID in
* other tunnel types. │ * other tunnel types.
*/ │ */
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { │ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
ti = le32_to_cpu(filters[i].element.tenant_id); │ ti = le32_to_cpu(filters[i].element.tenant_id);
filters[i].element.tenant_id = cpu_to_le32(ti << 8); │ filters[i].element.tenant_id = cpu_to_le32(ti << 8);
} │ }
} │ }
│
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); │ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
│
return status; │ return status;
} │
next prev up linux/drivers/net/ethernet/intel/igb/e1000_phy.c:1133 │ linux/drivers/net/ethernet/intel/e1000e/phy.c:1182
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; │ s32 ret_val;
u16 phy_data; │ u16 phy_data;
bool link; │ bool link;
│
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); │ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val) │ if (ret_val)
goto out; │ return ret_val;
│
igb_phy_force_speed_duplex_setup(hw, &phy_data); │ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
│
ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); │ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val) │ if (ret_val)
goto out; │ return ret_val;
│
/* Clear Auto-Crossover to force MDI manually. IGP requires MDI │ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed and duplex are forced. │ * forced whenever speed and duplex are forced.
*/ │ */
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); │ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
if (ret_val) │ if (ret_val)
goto out; │ return ret_val;
│
phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; │ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; │ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
│
ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); │ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
if (ret_val) │ if (ret_val)
goto out; │ return ret_val;
│
hw_dbg("IGP PSCR: %X\n", phy_data); │ e_dbg("IGP PSCR: %X\n", phy_data);
│
udelay(1); │ udelay(1);
│
if (phy->autoneg_wait_to_complete) { │ if (phy->autoneg_wait_to_complete) {
hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); │ e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
│
ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); │ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
│ 100000, &link);
if (ret_val) │ if (ret_val)
goto out; │ return ret_val;
│
if (!link) │ if (!link)
hw_dbg("Link taking longer than expected.\n"); │ e_dbg("Link taking longer than expected.\n");
│
/* Try once more */ │ /* Try once more */
ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); │ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
if (ret_val) │ 100000, &link);
goto out; │
} │ }
│
out: │
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/sfc/selftest.c:374 │ linux/drivers/net/ethernet/sfc/falcon/selftest.c:376
│
struct efx_loopback_state *state = efx->loopback_selftest; │ struct ef4_loopback_state *state = efx->loopback_selftest;
struct net_device *net_dev = efx->net_dev; │ struct net_device *net_dev = efx->net_dev;
struct efx_loopback_payload *payload = &state->payload; │ struct ef4_loopback_payload *payload = &state->payload;
│
/* Initialise the layerII header */ │ /* Initialise the layerII header */
ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); │ ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
ether_addr_copy((u8 *)&payload->header.h_source, payload_source); │ ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
payload->header.h_proto = htons(ETH_P_IP); │ payload->header.h_proto = htons(ETH_P_IP);
│
/* saddr set later and used as incrementing count */ │ /* saddr set later and used as incrementing count */
payload->ip.daddr = htonl(INADDR_LOOPBACK); │ payload->ip.daddr = htonl(INADDR_LOOPBACK);
payload->ip.ihl = 5; │ payload->ip.ihl = 5;
payload->ip.check = (__force __sum16) htons(0xdead); │ payload->ip.check = (__force __sum16) htons(0xdead);
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); │ payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
payload->ip.version = IPVERSION; │ payload->ip.version = IPVERSION;
payload->ip.protocol = IPPROTO_UDP; │ payload->ip.protocol = IPPROTO_UDP;
│
/* Initialise udp header */ │ /* Initialise udp header */
payload->udp.source = 0; │ payload->udp.source = 0;
payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - │ payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
sizeof(struct iphdr)); │ sizeof(struct iphdr));
payload->udp.check = 0; /* checksum ignored */ │ payload->udp.check = 0; /* checksum ignored */
│
/* Fill out payload */ │ /* Fill out payload */
payload->iteration = htons(ntohs(payload->iteration) + 1); │ payload->iteration = htons(ntohs(payload->iteration) + 1);
memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); │ memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
│
/* Fill out remaining state members */ │ /* Fill out remaining state members */
atomic_set(&state->rx_good, 0); │ atomic_set(&state->rx_good, 0);
atomic_set(&state->rx_bad, 0); │ atomic_set(&state->rx_bad, 0);
smp_wmb(); │ smp_wmb();
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c:1091 │ linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c:2858
│
struct vf_resources *vfres = &adapter->params.vfres; │ struct pf_resources *pfres = &adapter->params.pfres;
struct fw_pfvf_cmd cmd, rpl; │ struct fw_pfvf_cmd cmd, rpl;
int v; │ int v;
u32 word; │ u32 word;
│
/* │ /* Execute PFVF Read command to get VF resource limits; bail out early
* Execute PFVF Read command to get VF resource limits; bail out early │
* with error on command failure. │ * with error on command failure.
*/ │ */
memset(&cmd, 0, sizeof(cmd)); │ memset(&cmd, 0, sizeof(cmd));
cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | │ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
FW_CMD_REQUEST_F | │ FW_CMD_REQUEST_F |
FW_CMD_READ_F); │ FW_CMD_READ_F |
│ FW_PFVF_CMD_PFN_V(adapter->pf) |
│ FW_PFVF_CMD_VFN_V(0));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); │ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); │ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
if (v) │ if (v != FW_SUCCESS)
return v; │ return v;
│
/* │ /* Extract PF resource limits and return success.
* Extract VF resource limits and return success. │
*/ │ */
word = be32_to_cpu(rpl.niqflint_niq); │ word = be32_to_cpu(rpl.niqflint_niq);
vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); │ pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
vfres->niq = FW_PFVF_CMD_NIQ_G(word); │ pfres->niq = FW_PFVF_CMD_NIQ_G(word);
│
word = be32_to_cpu(rpl.type_to_neq); │ word = be32_to_cpu(rpl.type_to_neq);
vfres->neq = FW_PFVF_CMD_NEQ_G(word); │ pfres->neq = FW_PFVF_CMD_NEQ_G(word);
vfres->pmask = FW_PFVF_CMD_PMASK_G(word); │ pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
│
word = be32_to_cpu(rpl.tc_to_nexactf); │ word = be32_to_cpu(rpl.tc_to_nexactf);
vfres->tc = FW_PFVF_CMD_TC_G(word); │ pfres->tc = FW_PFVF_CMD_TC_G(word);
vfres->nvi = FW_PFVF_CMD_NVI_G(word); │ pfres->nvi = FW_PFVF_CMD_NVI_G(word);
vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); │ pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
│
word = be32_to_cpu(rpl.r_caps_to_nethctrl); │ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); │ pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); │ pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); │ pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx4/port.c:897 │ linux/drivers/net/ethernet/mellanox/mlx4/port.c:1004
│
struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table; │ struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table; │ struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
int ret = 0; │ int ret = 0;
int ret1; │ int ret1;
int i; │ int i;
bool update1 = false; │ bool update1 = false;
bool update2 = false; │ bool update2 = false;
│
mutex_lock(&t1->mutex); │ mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex); │ mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { │ for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if (t1->entries[i] != t2->entries[i]) { │ if (t1->entries[i] != t2->entries[i]) {
mlx4_warn(dev, "mac table is in an unexpected state when trying │ mlx4_warn(dev, "vlan table is in an unexpected state when trying
ret = -EINVAL; │ ret = -EINVAL;
goto unlock; │ goto unlock;
} │ }
} │ }
│
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { │ for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if (!t1->entries[i]) │ if (!t1->entries[i])
continue; │ continue;
t1->is_dup[i] = false; │ t1->is_dup[i] = false;
if (!t1->refs[i]) { │ if (!t1->refs[i]) {
t1->entries[i] = 0; │ t1->entries[i] = 0;
update1 = true; │ update1 = true;
} │ }
t2->is_dup[i] = false; │ t2->is_dup[i] = false;
if (!t2->refs[i]) { │ if (!t2->refs[i]) {
t2->entries[i] = 0; │ t2->entries[i] = 0;
update2 = true; │ update2 = true;
} │ }
} │ }
│
if (update1) { │ if (update1) {
ret = mlx4_set_port_mac_table(dev, 1, t1->entries); │ ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
if (ret) │ if (ret)
mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", │ mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n"
} │ }
if (update2) { │ if (update2) {
ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries); │ ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
if (ret1) { │ if (ret1) {
mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", │ mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n"
ret = ret1; │ ret = ret1;
} │ }
} │ }
unlock: │ unlock:
mutex_unlock(&t2->mutex); │ mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex); │ mutex_unlock(&t1->mutex);
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c:174 │ linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c:161
│
struct emac_sgmii *phy = &adpt->phy; │ struct emac_sgmii *phy = &adpt->phy;
void __iomem *phy_regs = phy->base; │ void __iomem *phy_regs = phy->base;
void __iomem *laned = phy->digital; │ void __iomem *laned = phy->digital;
unsigned int i; │ unsigned int i;
u32 lnstatus; │ u32 lnstatus;
│
/* PCS lane-x init */ │ /* PCS lane-x init */
emac_reg_write_all(phy->base, physical_coding_sublayer_programming, │ emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
ARRAY_SIZE(physical_coding_sublayer_programming)); │ ARRAY_SIZE(physical_coding_sublayer_programming));
│
/* SGMII lane-x init */ │ /* SGMII lane-x init */
emac_reg_write_all(phy->digital, sgmii_laned, ARRAY_SIZE(sgmii_laned)); │ emac_reg_write_all(phy->digital, sgmii_laned, ARRAY_SIZE(sgmii_laned));
│
/* Power up PCS and start reset lane state machine */ │ /* Power up PCS and start reset lane state machine */
│
writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL); │ writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL);
writel(1, laned + SGMII_LN_RSM_START); │ writel(1, laned + SGMII_LN_RSM_START);
│
/* Wait for c_ready assertion */ │ /* Wait for c_ready assertion */
for (i = 0; i < SERDES_START_WAIT_TIMES; i++) { │ for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS); │ lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS);
if (lnstatus & BIT(1)) │ if (lnstatus & BIT(1))
break; │ break;
usleep_range(100, 200); │ usleep_range(100, 200);
} │ }
│
if (i == SERDES_START_WAIT_TIMES) { │ if (i == SERDES_START_WAIT_TIMES) {
netdev_err(adpt->netdev, "SGMII failed to start\n"); │ netdev_err(adpt->netdev, "SGMII failed to start\n");
return -EIO; │ return -EIO;
} │ }
│
/* Disable digital and SERDES loopback */ │ /* Disable digital and SERDES loopback */
writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0); │ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0);
writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2); │ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2);
writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1); │ writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1);
│
/* Mask out all the SGMII Interrupt */ │ /* Mask out all the SGMII Interrupt */
writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK); │ writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:177 │ linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c:228
│
struct inet_sock *inet = inet_sk(sk); │ struct inet_sock *inet = inet_sk(sk);
struct cpl_t6_act_open_req *cpl6; │ struct cpl_t6_act_open_req6 *cpl6;
struct cpl_act_open_req *cpl; │ struct cpl_act_open_req6 *cpl;
struct sk_buff *skb; │ struct sk_buff *skb;
unsigned int len; │ unsigned int len;
int qid_atid; │ int qid_atid;
u64 options; │ u64 options;
│
len = sizeof(*cpl6); │ len = sizeof(*cpl6);
skb = alloc_skb(len, GFP_KERNEL); │ skb = alloc_skb(len, GFP_KERNEL);
if (unlikely(!skb)) │ if (unlikely(!skb))
return -ENOMEM; │ return -ENOMEM;
/* mark it a control pkt */ │ /* mark it a control pkt */
set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id); │ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
│
cpl6 = __skb_put_zero(skb, len); │ cpl6 = __skb_put_zero(skb, len);
cpl = (struct cpl_act_open_req *)cpl6; │ cpl = (struct cpl_act_open_req6 *)cpl6;
INIT_TP_WR(cpl6, 0); │ INIT_TP_WR(cpl6, 0);
qid_atid = TID_QID_V(tx_info->rx_qid) | │ qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
TID_TID_V(atid); │ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid)); │
cpl->local_port = inet->inet_sport; │ cpl->local_port = inet->inet_sport;
cpl->peer_port = inet->inet_dport; │ cpl->peer_port = inet->inet_dport;
cpl->local_ip = inet->inet_rcv_saddr; │ cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
cpl->peer_ip = inet->inet_daddr; │ cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
│ cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
│ cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
│
/* fill first 64 bit option field. */ │ /* first 64 bit option field. */
options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F | │ options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan); │ SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
cpl->opt0 = cpu_to_be64(options); │ cpl->opt0 = cpu_to_be64(options);
│
/* next 64 bit option field. */ │ /* next 64 bit option field. */
options = │ options =
TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]); │ TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
cpl->opt2 = htonl(options); │ cpl->opt2 = htonl(options);
│
return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te); │ return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c:160 │ linux/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c:213
│
struct nfp_cpp_explicit *expl; │ struct nfp_cpp_explicit *expl;
char *tmp = buff; │ const char *tmp = buff;
int err, i, incr; │ int err, i, incr;
u8 byte_mask; │ u8 byte_mask;
│
if (len & (width_read - 1)) │ if (len & (width_write - 1))
return -EINVAL; │ return -EINVAL;
│
expl = nfp_cpp_explicit_acquire(cpp); │ expl = nfp_cpp_explicit_acquire(cpp);
if (!expl) │ if (!expl)
return -EBUSY; │ return -EBUSY;
│
incr = min_t(int, 16 * width_read, 128); │ incr = min_t(int, 16 * width_write, 128);
incr = min_t(int, incr, len); │ incr = min_t(int, incr, len);
│
/* Translate a NFP_CPP_ACTION_RW to action 0 */ │ /* Translate a NFP_CPP_ACTION_RW to action 1 */
if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW) │ if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW)
cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 0, │ cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 1,
NFP_CPP_ID_TOKEN_of(cpp_id)); │ NFP_CPP_ID_TOKEN_of(cpp_id));
│
byte_mask = nfp_bytemask(width_read, addr); │ byte_mask = nfp_bytemask(width_write, addr);
│
nfp_cpp_explicit_set_target(expl, cpp_id, │ nfp_cpp_explicit_set_target(expl, cpp_id,
incr / width_read - 1, byte_mask); │ incr / width_write - 1, byte_mask);
nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PUSH, │ nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PULL,
0, NFP_SIGNAL_NONE); │ 0, NFP_SIGNAL_NONE);
│
for (i = 0; i < len; i += incr, addr += incr, tmp += incr) { │ for (i = 0; i < len; i += incr, addr += incr, tmp += incr) {
if (i + incr > len) { │ if (i + incr > len) {
incr = len - i; │ incr = len - i;
nfp_cpp_explicit_set_target(expl, cpp_id, │ nfp_cpp_explicit_set_target(expl, cpp_id,
incr / width_read - 1, │ incr / width_write - 1,
0xff); │ 0xff);
} │ }
│
err = nfp_cpp_explicit_do(expl, addr); │ err = nfp_cpp_explicit_put(expl, tmp, incr);
if (err < 0) │ if (err < 0)
goto exit_release; │ goto exit_release;
│
err = nfp_cpp_explicit_get(expl, tmp, incr); │ err = nfp_cpp_explicit_do(expl, addr);
if (err < 0) │ if (err < 0)
goto exit_release; │ goto exit_release;
} │ }
err = len; │ err = len;
exit_release: │ exit_release:
nfp_cpp_explicit_release(expl); │ nfp_cpp_explicit_release(expl);
│
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c:413 │ linux/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c:521
│
struct ice_hw *hw = &vsi->back->hw; │ struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt; │ struct ice_vsi_ctx *ctxt;
u8 tag_type; │ u8 tag_type;
int err; │ int err;
│
/* do not allow modifying VLAN stripping when a port VLAN is configured │
* on this VSI │
*/ │
if (vsi->info.port_based_outer_vlan) │ if (vsi->info.port_based_outer_vlan)
return 0; │ return 0;
│
if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type)) │ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
return -EINVAL; │ return -EINVAL;
│
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); │ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt) │ if (!ctxt)
return -ENOMEM; │ return -ENOMEM;
│
ctxt->info.valid_sections = │ ctxt->info.valid_sections =
cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); │ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
/* clear current outer VLAN strip settings */ │ /* clear current outer VLAN insertion settings */
ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags & │ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M); │ ~(ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT |
│ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
│ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M |
│ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
ctxt->info.outer_vlan_flags |= │ ctxt->info.outer_vlan_flags |=
((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH << │ ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
ICE_AQ_VSI_OUTER_VLAN_EMODE_S) | │ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & │ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) |
ICE_AQ_VSI_OUTER_TAG_TYPE_M)); │ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
│ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
│
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); │ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) │ if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN st │ dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN in
err, ice_aq_str(hw->adminq.sq_last_status)); │ err, ice_aq_str(hw->adminq.sq_last_status));
else │ else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags; │ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
│
kfree(ctxt); │ kfree(ctxt);
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c:1853 │ linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c:1901
│
│ int num_iq_stats, num_oq_stats, i, j;
struct lio *lio = GET_LIO(netdev); │ struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev; │ struct octeon_device *oct_dev = lio->oct_dev;
int num_iq_stats, num_oq_stats, i, j; │
int num_stats; │ int num_stats;
│
switch (stringset) { │ switch (stringset) {
case ETH_SS_STATS: │ case ETH_SS_STATS:
num_stats = ARRAY_SIZE(oct_stats_strings); │ num_stats = ARRAY_SIZE(oct_vf_stats_strings);
for (j = 0; j < num_stats; j++) { │ for (j = 0; j < num_stats; j++) {
sprintf(data, "%s", oct_stats_strings[j]); │ sprintf(data, "%s", oct_vf_stats_strings[j]);
data += ETH_GSTRING_LEN; │ data += ETH_GSTRING_LEN;
} │ }
│
num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); │ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { │ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) │ if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
continue; │ continue;
for (j = 0; j < num_iq_stats; j++) { │ for (j = 0; j < num_iq_stats; j++) {
sprintf(data, "tx-%d-%s", i, │ sprintf(data, "tx-%d-%s", i,
oct_iq_stats_strings[j]); │ oct_iq_stats_strings[j]);
data += ETH_GSTRING_LEN; │ data += ETH_GSTRING_LEN;
} │ }
} │ }
│
num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); │ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { │ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) │ if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
continue; │ continue;
for (j = 0; j < num_oq_stats; j++) { │ for (j = 0; j < num_oq_stats; j++) {
sprintf(data, "rx-%d-%s", i, │ sprintf(data, "rx-%d-%s", i,
oct_droq_stats_strings[j]); │ oct_droq_stats_strings[j]);
data += ETH_GSTRING_LEN; │ data += ETH_GSTRING_LEN;
} │ }
} │ }
break; │ break;
│
case ETH_SS_PRIV_FLAGS: │ case ETH_SS_PRIV_FLAGS:
lio_get_priv_flags_strings(lio, data); │ lio_get_priv_flags_strings(lio, data);
break; │ break;
default: │ default:
netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); │ netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
break; │ break;
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_ptp.c:642 │ linux/drivers/net/ethernet/intel/igb/igb_ptp.c:968
│
struct sk_buff *skb = adapter->ptp_tx_skb; │ struct sk_buff *skb = adapter->ptp_tx_skb;
│ struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps; │ struct skb_shared_hwtstamps shhwtstamps;
struct igc_hw *hw = &adapter->hw; │
int adjust = 0; │
u64 regval; │ u64 regval;
│ int adjust = 0;
│
if (WARN_ON_ONCE(!skb)) │ regval = rd32(E1000_TXSTMPL);
return; │ regval |= (u64)rd32(E1000_TXSTMPH) << 32;
│
regval = rd32(IGC_TXSTMPL); │
regval |= (u64)rd32(IGC_TXSTMPH) << 32; │
igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); │
│
switch (adapter->link_speed) { │ igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
case SPEED_10: │ /* adjust timestamp for the TX latency based on link speed */
adjust = IGC_I225_TX_LATENCY_10; │ if (adapter->hw.mac.type == e1000_i210) {
break; │ switch (adapter->link_speed) {
case SPEED_100: │ case SPEED_10:
adjust = IGC_I225_TX_LATENCY_100; │ adjust = IGB_I210_TX_LATENCY_10;
break; │ break;
case SPEED_1000: │ case SPEED_100:
adjust = IGC_I225_TX_LATENCY_1000; │ adjust = IGB_I210_TX_LATENCY_100;
break; │ break;
case SPEED_2500: │ case SPEED_1000:
adjust = IGC_I225_TX_LATENCY_2500; │ adjust = IGB_I210_TX_LATENCY_1000;
break; │ break;
│ }
} │ }
│
shhwtstamps.hwtstamp = │ shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust); │ ktime_add_ns(shhwtstamps.hwtstamp, adjust);
│
/* Clear the lock early before calling skb_tstamp_tx so that │ /* Clear the lock early before calling skb_tstamp_tx so that
* applications are not woken up before the lock bit is clear. We use │ * applications are not woken up before the lock bit is clear. We use
* a copy of the skb pointer to ensure other threads can't change it │ * a copy of the skb pointer to ensure other threads can't change it
* while we're notifying the stack. │ * while we're notifying the stack.
*/ │ */
adapter->ptp_tx_skb = NULL; │ adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); │ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
│
/* Notify the stack and free the skb after we've unlocked */ │ /* Notify the stack and free the skb after we've unlocked */
skb_tstamp_tx(skb, &shhwtstamps); │ skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb); │ dev_kfree_skb_any(skb);
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:2343 │ linux/drivers/net/ethernet/intel/igc/igc_ethtool.c:763
│
struct igb_adapter *adapter = netdev_priv(netdev); │ struct igc_adapter *adapter = netdev_priv(netdev);
u8 *p = data; │ u8 *p = data;
int i; │ int i;
│
switch (stringset) { │ switch (stringset) {
case ETH_SS_TEST: │ case ETH_SS_TEST:
memcpy(data, igb_gstrings_test, sizeof(igb_gstrings_test)); │ memcpy(data, *igc_gstrings_test,
│ IGC_TEST_LEN * ETH_GSTRING_LEN);
break; │ break;
case ETH_SS_STATS: │ case ETH_SS_STATS:
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) │ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++)
│ ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string);
│ for (i = 0; i < IGC_NETDEV_STATS_LEN; i++)
ethtool_sprintf(&p, │ ethtool_sprintf(&p,
igb_gstrings_stats[i].stat_string); │ igc_gstrings_net_stats[i].stat_string);
for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) │
ethtool_sprintf(&p, │
igb_gstrings_net_stats[i].stat_string); │
for (i = 0; i < adapter->num_tx_queues; i++) { │ for (i = 0; i < adapter->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i); │ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i); │ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
ethtool_sprintf(&p, "tx_queue_%u_restart", i); │ ethtool_sprintf(&p, "tx_queue_%u_restart", i);
} │ }
for (i = 0; i < adapter->num_rx_queues; i++) { │ for (i = 0; i < adapter->num_rx_queues; i++) {
ethtool_sprintf(&p, "rx_queue_%u_packets", i); │ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
ethtool_sprintf(&p, "rx_queue_%u_bytes", i); │ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
ethtool_sprintf(&p, "rx_queue_%u_drops", i); │ ethtool_sprintf(&p, "rx_queue_%u_drops", i);
ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); │ ethtool_sprintf(&p, "rx_queue_%u_csum_err", i);
ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); │ ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i);
} │ }
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ │ /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */
break; │ break;
case ETH_SS_PRIV_FLAGS: │ case ETH_SS_PRIV_FLAGS:
memcpy(data, igb_priv_flags_strings, │ memcpy(data, igc_priv_flags_strings,
IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); │ IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
break; │ break;
} │ }
} │
next prev up linux/drivers/net/ethernet/sfc/falcon/rx.c:649 │ linux/drivers/net/ethernet/sfc/rx.c:361
│
struct ef4_nic *efx = channel->efx; │ struct efx_nic *efx = channel->efx;
struct ef4_rx_buffer *rx_buf = │ struct efx_rx_buffer *rx_buf =
ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); │ efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
u8 *eh = ef4_rx_buf_va(rx_buf); │ u8 *eh = efx_rx_buf_va(rx_buf);
│
/* Read length from the prefix if necessary. This already │ /* Read length from the prefix if necessary. This already
* excludes the length of the prefix itself. │ * excludes the length of the prefix itself.
*/ │ */
if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN) │ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
rx_buf->len = le16_to_cpup((__le16 *) │ rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset)); │ (eh + efx->rx_packet_len_offset));
│
/* If we're in loopback test, then pass the packet directly to the │ /* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here │ * loopback layer, and free the rx_buf here
*/ │ */
if (unlikely(efx->loopback_selftest)) { │ if (unlikely(efx->loopback_selftest)) {
struct ef4_rx_queue *rx_queue; │ struct efx_rx_queue *rx_queue;
│
ef4_loopback_rx_packet(efx, eh, rx_buf->len); │ efx_loopback_rx_packet(efx, eh, rx_buf->len);
rx_queue = ef4_channel_get_rx_queue(channel); │ rx_queue = efx_channel_get_rx_queue(channel);
ef4_free_rx_buffers(rx_queue, rx_buf, │ efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags); │ channel->rx_pkt_n_frags);
goto out; │ goto out;
} │ }
│
│ if (!efx_do_xdp(efx, channel, rx_buf, &eh))
│ goto out;
│
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) │ if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EF4_RX_PKT_CSUMMED; │ rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
│
if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb) │ if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); │ efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0);
else │ else
ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); │ efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
out: │ out:
channel->rx_pkt_n_frags = 0; │ channel->rx_pkt_n_frags = 0;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwmac5.c:509 │ linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c:1116
│
u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); │ u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
u32 val = readl(ioaddr + MAC_PPS_CONTROL); │ u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
u64 period; │ u64 period;
│
if (!cfg->available) │ if (!cfg->available)
return -EINVAL; │ return -EINVAL;
if (tnsec & TRGTBUSY0) │ if (tnsec & XGMAC_TRGTBUSY0)
return -EBUSY; │ return -EBUSY;
if (!sub_second_inc || !systime_flags) │ if (!sub_second_inc || !systime_flags)
return -EINVAL; │ return -EINVAL;
│
val &= ~PPSx_MASK(index); │ val &= ~XGMAC_PPSx_MASK(index);
│
if (!enable) { │ if (!enable) {
val |= PPSCMDx(index, 0x5); │ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
val |= PPSEN0; │ writel(val, ioaddr + XGMAC_PPS_CONTROL);
writel(val, ioaddr + MAC_PPS_CONTROL); │
return 0; │ return 0;
} │ }
│
val |= PPSCMDx(index, 0x2); │ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
val |= TRGTMODSELx(index, 0x2); │ val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
val |= PPSEN0; │ val |= XGMAC_PPSEN0;
│
writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); │ writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
│
if (!(systime_flags & PTP_TCR_TSCTRLSSR)) │ if (!(systime_flags & PTP_TCR_TSCTRLSSR))
cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; │ cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); │ writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
│
period = cfg->period.tv_sec * 1000000000; │ period = cfg->period.tv_sec * 1000000000;
period += cfg->period.tv_nsec; │ period += cfg->period.tv_nsec;
│
do_div(period, sub_second_inc); │ do_div(period, sub_second_inc);
│
if (period <= 1) │ if (period <= 1)
return -EINVAL; │ return -EINVAL;
│
writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index)); │ writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
│
period >>= 1; │ period >>= 1;
if (period <= 1) │ if (period <= 1)
return -EINVAL; │ return -EINVAL;
│
writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index)); │ writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
│
/* Finally, activate it */ │ /* Finally, activate it */
writel(val, ioaddr + MAC_PPS_CONTROL); │ writel(val, ioaddr + XGMAC_PPS_CONTROL);
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:661 │ linux/drivers/net/ethernet/cavium/liquidio/lio_main.c:1218
│
struct net_device *netdev = oct->props[ifidx].netdev; │ struct net_device *netdev = oct->props[ifidx].netdev;
struct octeon_device_priv *oct_priv = │ struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv; │ (struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n; │ struct napi_struct *napi, *n;
struct lio *lio; │ struct lio *lio;
│
if (!netdev) { │ if (!netdev) {
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", │ dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
__func__, ifidx); │ __func__, ifidx);
return; │ return;
} │ }
│
lio = GET_LIO(netdev); │ lio = GET_LIO(netdev);
│
dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); │ dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
│
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) │ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
liquidio_stop(netdev); │ liquidio_stop(netdev);
│
if (oct->props[lio->ifidx].napi_enabled == 1) { │ if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) │ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi); │ napi_disable(napi);
│
oct->props[lio->ifidx].napi_enabled = 0; │ oct->props[lio->ifidx].napi_enabled = 0;
│
oct->droq[0]->ops.poll_mode = 0; │ if (OCTEON_CN23XX_PF(oct))
│ oct->droq[0]->ops.poll_mode = 0;
} │ }
│
/* Delete NAPI */ │ /* Delete NAPI */
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) │ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
netif_napi_del(napi); │ netif_napi_del(napi);
│
tasklet_enable(&oct_priv->droq_tasklet); │ tasklet_enable(&oct_priv->droq_tasklet);
│
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) │ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev); │ unregister_netdev(netdev);
│
cleanup_rx_oom_poll_fn(netdev); │ cleanup_sync_octeon_time_wq(netdev);
│
cleanup_link_status_change_wq(netdev); │ cleanup_link_status_change_wq(netdev);
│
│ cleanup_rx_oom_poll_fn(netdev);
│
lio_delete_glists(lio); │ lio_delete_glists(lio);
│
free_netdev(netdev); │ free_netdev(netdev);
│
oct->props[ifidx].gmxport = -1; │ oct->props[ifidx].gmxport = -1;
│
oct->props[ifidx].netdev = NULL; │ oct->props[ifidx].netdev = NULL;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c:2017 │ linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c:8676
│
/* something changed */ │ /* something changed */
if (!link_ok && lc->link_ok) { │ if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc; │ lc->link_down_rc = linkdnrc;
dev_warn_ratelimited(adapter->pdev_dev, │ dev_warn_ratelimited(adapter->pdev_dev,
"Port %d link down, reason: %s\n", │ "Port %d link down, reason: %s\n",
pi->port_id, │ pi->tx_chan,
t4vf_link_down_rc_str(linkdnrc)); │ t4_link_down_rc_str(linkdnrc));
} │ }
lc->link_ok = link_ok; │ lc->link_ok = link_ok;
lc->speed = speed; │ lc->speed = speed;
lc->advertised_fc = adv_fc; │ lc->advertised_fc = adv_fc;
lc->fc = fc; │ lc->fc = fc;
lc->fec = fec; │ lc->fec = fec;
│
lc->pcaps = pcaps; │
lc->lpacaps = lpacaps; │ lc->lpacaps = lpacaps;
lc->acaps = acaps & ADVERT_MASK; │ lc->acaps = acaps & ADVERT_MASK;
│
/* If we're not physically capable of Auto-Negotiation, note │ /* If we're not physically capable of Auto-Negotiation, note
* this as Auto-Negotiation disabled. Otherwise, we track │ * this as Auto-Negotiation disabled. Otherwise, we track
* what Auto-Negotiation settings we have. Note parallel │ * what Auto-Negotiation settings we have. Note parallel
* structure in init_link_config(). │ * structure in t4_link_l1cfg_core() and init_link_config().
*/ │ */
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { │ if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
lc->autoneg = AUTONEG_DISABLE; │ lc->autoneg = AUTONEG_DISABLE;
} else if (lc->acaps & FW_PORT_CAP32_ANEG) { │ } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
lc->autoneg = AUTONEG_ENABLE; │ lc->autoneg = AUTONEG_ENABLE;
} else { │ } else {
/* When Autoneg is disabled, user needs to set │ /* When Autoneg is disabled, user needs to set
* single speed. │ * single speed.
* Similar to cxgb4_ethtool.c: set_link_ksettings │ * Similar to cxgb4_ethtool.c: set_link_ksettings
*/ │ */
lc->acaps = 0; │ lc->acaps = 0;
lc->speed_caps = fwcap_to_speed(acaps); │ lc->speed_caps = fwcap_to_fwspeed(acaps);
lc->autoneg = AUTONEG_DISABLE; │ lc->autoneg = AUTONEG_DISABLE;
} │ }
│
t4vf_os_link_changed(adapter, pi->pidx, link_ok); │ t4_os_link_changed(adapter, pi->port_id, link_ok);
} │
next prev up linux/drivers/net/ethernet/intel/ice/ice_common.c:4072 │ linux/drivers/net/ethernet/intel/ice/ice_common.c:4123
│
u32 src_dword, mask; │ u64 src_qword, mask;
__le32 dest_dword; │ __le64 dest_qword;
u8 *from, *dest; │ u8 *from, *dest;
u16 shift_width; │ u16 shift_width;
│
/* copy from the next struct field */ │ /* copy from the next struct field */
from = src_ctx + ce_info->offset; │ from = src_ctx + ce_info->offset;
│
/* prepare the bits and mask */ │ /* prepare the bits and mask */
shift_width = ce_info->lsb % 8; │ shift_width = ce_info->lsb % 8;
│
/* if the field width is exactly 32 on an x86 machine, then the shift │ /* if the field width is exactly 64 on an x86 machine, then the shift
* operation will not work because the SHL instructions count is masked │ * operation will not work because the SHL instructions count is masked
* to 5 bits so the shift will do nothing │ * to 6 bits so the shift will do nothing
*/ │ */
if (ce_info->width < 32) │ if (ce_info->width < 64)
mask = BIT(ce_info->width) - 1; │ mask = BIT_ULL(ce_info->width) - 1;
else │ else
mask = (u32)~0; │ mask = (u64)~0;
│
/* don't swizzle the bits until after the mask because the mask bits │ /* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines │ * will be in a different bit position on big endian machines
*/ │ */
src_dword = *(u32 *)from; │ src_qword = *(u64 *)from;
src_dword &= mask; │ src_qword &= mask;
│
/* shift to correct alignment */ │ /* shift to correct alignment */
mask <<= shift_width; │ mask <<= shift_width;
src_dword <<= shift_width; │ src_qword <<= shift_width;
│
/* get the current bits from the target bit string */ │ /* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8); │ dest = dest_ctx + (ce_info->lsb / 8);
│
memcpy(&dest_dword, dest, sizeof(dest_dword)); │ memcpy(&dest_qword, dest, sizeof(dest_qword));
│
dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ │ dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ │ dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
│
/* put it all back */ │ /* put it all back */
memcpy(dest, &dest_dword, sizeof(dest_dword)); │ memcpy(dest, &dest_qword, sizeof(dest_qword));
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/i2c.c:183 │ linux/drivers/net/ethernet/mellanox/mlxsw/i2c.c:229
│
__be32 push_cmd_buf[MLXSW_I2C_PUSH_CMD_SIZE / 4] = { │ __be32 push_cmd_buf[MLXSW_I2C_PUSH_CMD_SIZE / 4] = {
0, cpu_to_be32(MLXSW_I2C_PUSH_IMM_CMD) │ 0, cpu_to_be32(MLXSW_I2C_PUSH_EVENT_CMD)
}; │ };
__be32 prep_cmd_buf[MLXSW_I2C_PREP_SIZE / 4] = { │ __be32 prep_cmd_buf[MLXSW_I2C_PREP_SIZE / 4] = {
0, 0, 0, 0, 0, 0, │ 0, 0, 0, 0, 0, 0,
cpu_to_be32(client->adapter->nr & 0xffff), │ cpu_to_be32(client->adapter->nr & 0xffff),
cpu_to_be32(MLXSW_I2C_SET_IMM_CMD) │ cpu_to_be32(MLXSW_I2C_SET_EVENT_CMD)
}; │ };
struct i2c_msg push_cmd = │ struct i2c_msg push_cmd =
MLXSW_I2C_WRITE_MSG(client, push_cmd_buf, │ MLXSW_I2C_WRITE_MSG(client, push_cmd_buf,
MLXSW_I2C_PUSH_CMD_SIZE); │ MLXSW_I2C_PUSH_CMD_SIZE);
struct i2c_msg prep_cmd = │ struct i2c_msg prep_cmd =
MLXSW_I2C_WRITE_MSG(client, prep_cmd_buf, MLXSW_I2C_PREP_SIZE); │ MLXSW_I2C_WRITE_MSG(client, prep_cmd_buf, MLXSW_I2C_PREP_SIZE);
│ u8 status;
int err; │ int err;
│
if (!immediate) { │ push_cmd_buf[1] = cpu_to_be32(MLXSW_I2C_PUSH_EVENT_CMD | opcode);
push_cmd_buf[1] = cpu_to_be32(MLXSW_I2C_PUSH_CMD); │ prep_cmd_buf[3] = cpu_to_be32(in_mod);
prep_cmd_buf[7] = cpu_to_be32(MLXSW_I2C_SET_CMD); │ prep_cmd_buf[7] = cpu_to_be32(MLXSW_I2C_GO_BIT | opcode);
} │
mlxsw_i2c_set_slave_addr((u8 *)prep_cmd_buf, │ mlxsw_i2c_set_slave_addr((u8 *)prep_cmd_buf,
MLXSW_I2C_CIR2_BASE); │ MLXSW_I2C_CIR2_BASE);
mlxsw_i2c_set_slave_addr((u8 *)push_cmd_buf, │ mlxsw_i2c_set_slave_addr((u8 *)push_cmd_buf,
MLXSW_I2C_CIR2_OFF_STATUS); │ MLXSW_I2C_CIR2_OFF_STATUS);
│
/* Prepare Command Interface Register for transaction */ │ /* Prepare Command Interface Register for transaction */
err = i2c_transfer(client->adapter, &prep_cmd, 1); │ err = i2c_transfer(client->adapter, &prep_cmd, 1);
if (err < 0) │ if (err < 0)
return err; │ return err;
else if (err != 1) │ else if (err != 1)
return -EIO; │ return -EIO;
│
/* Write out Command Interface Register GO bit to push transaction */ │ /* Write out Command Interface Register GO bit to push transaction */
err = i2c_transfer(client->adapter, &push_cmd, 1); │ err = i2c_transfer(client->adapter, &push_cmd, 1);
if (err < 0) │ if (err < 0)
return err; │ return err;
else if (err != 1) │ else if (err != 1)
return -EIO; │ return -EIO;
│
│ /* Wait until go bit is cleared. */
│ err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, &status);
│ if (err) {
│ dev_err(&client->dev, "HW semaphore is not released");
│ return err;
│ }
│
│ /* Validate transaction completion status. */
│ if (status) {
│ dev_err(&client->dev, "Bad transaction completion status %x\n",
│ status);
│ return -EIO;
│ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/amazon/ena/ena_netdev.c:2362 │ linux/drivers/net/ethernet/amazon/ena/ena_netdev.c:2430
│
struct ena_com_create_io_ctx ctx; │
struct ena_com_dev *ena_dev; │ struct ena_com_dev *ena_dev;
struct ena_ring *tx_ring; │ struct ena_com_create_io_ctx ctx;
│ struct ena_ring *rx_ring;
u32 msix_vector; │ u32 msix_vector;
u16 ena_qid; │ u16 ena_qid;
int rc; │ int rc;
│
ena_dev = adapter->ena_dev; │ ena_dev = adapter->ena_dev;
│
tx_ring = &adapter->tx_ring[qid]; │ rx_ring = &adapter->rx_ring[qid];
msix_vector = ENA_IO_IRQ_IDX(qid); │ msix_vector = ENA_IO_IRQ_IDX(qid);
ena_qid = ENA_IO_TXQ_IDX(qid); │ ena_qid = ENA_IO_RXQ_IDX(qid);
│
memset(&ctx, 0x0, sizeof(ctx)); │ memset(&ctx, 0x0, sizeof(ctx));
│
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; │
ctx.qid = ena_qid; │ ctx.qid = ena_qid;
ctx.mem_queue_type = ena_dev->tx_mem_queue_type; │ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
│ ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.msix_vector = msix_vector; │ ctx.msix_vector = msix_vector;
ctx.queue_size = tx_ring->ring_size; │ ctx.queue_size = rx_ring->ring_size;
ctx.numa_node = cpu_to_node(tx_ring->cpu); │ ctx.numa_node = cpu_to_node(rx_ring->cpu);
│
rc = ena_com_create_io_queue(ena_dev, &ctx); │ rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) { │ if (rc) {
netif_err(adapter, ifup, adapter->netdev, │ netif_err(adapter, ifup, adapter->netdev,
"Failed to create I/O TX queue num %d rc: %d\n", │ "Failed to create I/O RX queue num %d rc: %d\n",
qid, rc); │ qid, rc);
return rc; │ return rc;
} │ }
│
rc = ena_com_get_io_handlers(ena_dev, ena_qid, │ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
&tx_ring->ena_com_io_sq, │ &rx_ring->ena_com_io_sq,
&tx_ring->ena_com_io_cq); │ &rx_ring->ena_com_io_cq);
if (rc) { │ if (rc) {
netif_err(adapter, ifup, adapter->netdev, │ netif_err(adapter, ifup, adapter->netdev,
"Failed to get TX queue handlers. TX queue num %d rc: %d\n", │ "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
qid, rc); │ qid, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid); │ goto err;
return rc; │
} │ }
│
ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); │ ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
│
│ return rc;
│ err:
│ ena_com_destroy_io_queue(ena_dev, ena_qid);
return rc; │ return rc;
} │
next prev up linux/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c:817 │ linux/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c:869
│
u32 src_dword, mask; │ u64 src_qword, mask;
u8 *from, *dest; │ u8 *from, *dest;
u16 shift_width; │ u16 shift_width;
__le32 dest_dword; │ __le64 dest_qword;
│
/* copy from the next struct field */ │ /* copy from the next struct field */
from = src + ce_info->offset; │ from = src + ce_info->offset;
│
/* prepare the bits and mask */ │ /* prepare the bits and mask */
shift_width = ce_info->lsb % 8; │ shift_width = ce_info->lsb % 8;
│
/* if the field width is exactly 32 on an x86 machine, then the shift │ /* if the field width is exactly 64 on an x86 machine, then the shift
* operation will not work because the SHL instructions count is masked │ * operation will not work because the SHL instructions count is masked
* to 5 bits so the shift will do nothing │ * to 6 bits so the shift will do nothing
*/ │ */
if (ce_info->width < 32) │ if (ce_info->width < 64)
mask = BIT(ce_info->width) - 1; │ mask = BIT_ULL(ce_info->width) - 1;
else │ else
mask = ~(u32)0; │ mask = ~(u64)0;
│
/* don't swizzle the bits until after the mask because the mask bits │ /* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines │ * will be in a different bit position on big endian machines
*/ │ */
src_dword = *(u32 *)from; │ src_qword = *(u64 *)from;
src_dword &= mask; │ src_qword &= mask;
│
/* shift to correct alignment */ │ /* shift to correct alignment */
mask <<= shift_width; │ mask <<= shift_width;
src_dword <<= shift_width; │ src_qword <<= shift_width;
│
/* get the current bits from the target bit string */ │ /* get the current bits from the target bit string */
dest = hmc_bits + (ce_info->lsb / 8); │ dest = hmc_bits + (ce_info->lsb / 8);
│
memcpy(&dest_dword, dest, sizeof(dest_dword)); │ memcpy(&dest_qword, dest, sizeof(dest_qword));
│
dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ │ dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ │ dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
│
/* put it all back */ │ /* put it all back */
memcpy(dest, &dest_dword, sizeof(dest_dword)); │ memcpy(dest, &dest_qword, sizeof(dest_qword));
} │
next prev up linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:1132 │ linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:1075
│
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; │ struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num; │ int num;
│
if (pool > port->nrxqs * 2) { │ if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
│ (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
netdev_err(port->dev, "Invalid pool %d\n", pool); │ netdev_err(port->dev, "Invalid pool %d\n", pool);
return NULL; │ return NULL;
} │ }
│
/* Allocate buffers in case BM pool is used as long pool, but packet │ /* Allocate buffers in case BM pool is used as long pool, but packet
* size doesn't match MTU or BM pool hasn't being used yet │ * size doesn't match MTU or BM pool hasn't being used yet
*/ │ */
if (new_pool->pkt_size == 0) { │ if (new_pool->pkt_size == 0) {
int pkts_num; │ int pkts_num;
│
/* Set default buffer number or free all the buffers in case │ /* Set default buffer number or free all the buffers in case
* the pool is not empty │ * the pool is not empty
*/ │ */
pkts_num = new_pool->buf_num; │ pkts_num = new_pool->buf_num;
if (pkts_num == 0) │ if (pkts_num == 0) {
pkts_num = mvpp2_pools[type].buf_num; │ if (port->priv->percpu_pools) {
else │ if (pool < port->nrxqs)
│ pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
│ else
│ pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
│ } else {
│ pkts_num = mvpp2_pools[pool].buf_num;
│ }
│ } else {
mvpp2_bm_bufs_free(port->dev->dev.parent, │ mvpp2_bm_bufs_free(port->dev->dev.parent,
port->priv, new_pool, pkts_num); │ port->priv, new_pool, pkts_num);
│ }
│
new_pool->pkt_size = pkt_size; │ new_pool->pkt_size = pkt_size;
new_pool->frag_size = │ new_pool->frag_size =
SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + │ SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
MVPP2_SKB_SHINFO_SIZE; │ MVPP2_SKB_SHINFO_SIZE;
│
/* Allocate buffers for this pool */ │ /* Allocate buffers for this pool */
num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); │ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
if (num != pkts_num) { │ if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n", │ WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, pkts_num); │ new_pool->id, num, pkts_num);
return NULL; │ return NULL;
} │ }
} │ }
│
mvpp2_bm_pool_bufsize_set(port->priv, new_pool, │ mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); │ MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
│
return new_pool; │ return new_pool;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c:583 │ linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c:535
│
{ "CplCmdIn", 56, 8 }, │ { "CplCmdIn", 56, 8 },
{ "MpsVfVld", 55, 1 }, │ { "CplCmdOut", 48, 8 },
{ "MpsPf", 52, 3 }, │ { "ESynOut", 47, 1 },
{ "MpsVf", 44, 8 }, │ { "EAckOut", 46, 1 },
│ { "EFinOut", 45, 1 },
│ { "ERstOut", 44, 1 },
{ "SynIn", 43, 1 }, │ { "SynIn", 43, 1 },
{ "AckIn", 42, 1 }, │ { "AckIn", 42, 1 },
{ "FinIn", 41, 1 }, │ { "FinIn", 41, 1 },
{ "RstIn", 40, 1 }, │ { "RstIn", 40, 1 },
{ "DataIn", 39, 1 }, │ { "DataIn", 39, 1 },
{ "DataInVld", 38, 1 }, │ { "DataInVld", 38, 1 },
{ "PadIn", 37, 1 }, │ { "PadIn", 37, 1 },
{ "RxBufEmpty", 36, 1 }, │ { "RxBufEmpty", 36, 1 },
{ "RxDdp", 35, 1 }, │ { "RxDdp", 35, 1 },
{ "RxFbCongestion", 34, 1 }, │ { "RxFbCongestion", 34, 1 },
{ "TxFbCongestion", 33, 1 }, │ { "TxFbCongestion", 33, 1 },
{ "TxPktSumSrdy", 32, 1 }, │ { "TxPktSumSrdy", 32, 1 },
{ "RcfUlpType", 28, 4 }, │ { "RcfUlpType", 28, 4 },
{ "Eread", 27, 1 }, │ { "Eread", 27, 1 },
{ "Ebypass", 26, 1 }, │ { "Ebypass", 26, 1 },
{ "Esave", 25, 1 }, │ { "Esave", 25, 1 },
{ "Static0", 24, 1 }, │ { "Static0", 24, 1 },
{ "Cread", 23, 1 }, │ { "Cread", 23, 1 },
{ "Cbypass", 22, 1 }, │ { "Cbypass", 22, 1 },
{ "Csave", 21, 1 }, │ { "Csave", 21, 1 },
{ "CPktOut", 20, 1 }, │ { "CPktOut", 20, 1 },
{ "RxPagePoolFull", 18, 2 }, │ { "RxPagePoolFull", 18, 2 },
{ "RxLpbkPkt", 17, 1 }, │ { "RxLpbkPkt", 17, 1 },
{ "TxLpbkPkt", 16, 1 }, │ { "TxLpbkPkt", 16, 1 },
{ "RxVfValid", 15, 1 }, │ { "RxVfValid", 15, 1 },
{ "SynLearned", 14, 1 }, │ { "SynLearned", 14, 1 },
{ "SetDelEntry", 13, 1 }, │ { "SetDelEntry", 13, 1 },
{ "SetInvEntry", 12, 1 }, │ { "SetInvEntry", 12, 1 },
{ "CpcmdDvld", 11, 1 }, │ { "CpcmdDvld", 11, 1 },
{ "CpcmdSave", 10, 1 }, │ { "CpcmdSave", 10, 1 },
{ "RxPstructsFull", 8, 2 }, │ { "RxPstructsFull", 8, 2 },
{ "EpcmdDvld", 7, 1 }, │ { "EpcmdDvld", 7, 1 },
{ "EpcmdFlush", 6, 1 }, │ { "EpcmdFlush", 6, 1 },
{ "EpcmdTrimPrefix", 5, 1 }, │ { "EpcmdTrimPrefix", 5, 1 },
{ "EpcmdTrimPostfix", 4, 1 }, │ { "EpcmdTrimPostfix", 4, 1 },
{ "ERssIp4Pkt", 3, 1 }, │ { "ERssIp4Pkt", 3, 1 },
{ "ERssIp6Pkt", 2, 1 }, │ { "ERssIp6Pkt", 2, 1 },
{ "ERssTcpUdpPkt", 1, 1 }, │ { "ERssTcpUdpPkt", 1, 1 },
{ "ERssFceFipPkt", 0, 1 }, │ { "ERssFceFipPkt", 0, 1 },
{ NULL } │ { NULL }
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c:949 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c:875
│
char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN]; │ char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
struct hns3_nic_priv *priv = d->handle->priv; │ struct hns3_nic_priv *priv = d->handle->priv;
char *result[ARRAY_SIZE(tx_bd_info_items)]; │ char *result[ARRAY_SIZE(rx_bd_info_items)];
char content[HNS3_DBG_INFO_LEN]; │ char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring; │ struct hns3_enet_ring *ring;
struct hns3_desc *desc; │ struct hns3_desc *desc;
unsigned int i; │ unsigned int i;
int pos = 0; │ int pos = 0;
│
if (d->qid >= d->handle->kinfo.num_tqps) { │ if (d->qid >= d->handle->kinfo.num_tqps) {
dev_err(&d->handle->pdev->dev, │ dev_err(&d->handle->pdev->dev,
"queue%u is not in use\n", d->qid); │ "queue%u is not in use\n", d->qid);
return -EINVAL; │ return -EINVAL;
} │ }
│
for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++) │ for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++)
result[i] = &data_str[i][0]; │ result[i] = &data_str[i][0];
│
pos += scnprintf(buf + pos, len - pos, │ pos += scnprintf(buf + pos, len - pos,
"Queue %u tx bd info:\n", d->qid); │ "Queue %u rx bd info:\n", d->qid);
hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items, │ hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items,
NULL, ARRAY_SIZE(tx_bd_info_items)); │ NULL, ARRAY_SIZE(rx_bd_info_items));
pos += scnprintf(buf + pos, len - pos, "%s", content); │ pos += scnprintf(buf + pos, len - pos, "%s", content);
│
ring = &priv->ring[d->qid]; │ ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps];
for (i = 0; i < ring->desc_num; i++) { │ for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i]; │ desc = &ring->desc[i];
│
hns3_dump_tx_bd_info(priv, desc, result, i); │ hns3_dump_rx_bd_info(priv, desc, result, i);
hns3_dbg_fill_content(content, sizeof(content), │ hns3_dbg_fill_content(content, sizeof(content),
tx_bd_info_items, (const char **)result, │ rx_bd_info_items, (const char **)result,
ARRAY_SIZE(tx_bd_info_items)); │ ARRAY_SIZE(rx_bd_info_items));
pos += scnprintf(buf + pos, len - pos, "%s", content); │ pos += scnprintf(buf + pos, len - pos, "%s", content);
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c:18 │ linux/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c:20
│
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); │ u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
int i; │ int i;
│
pr_info("dwmac4: Master AXI performs %s burst length\n", │ pr_info("dwmac1000: Master AXI performs %s burst length\n",
(value & DMA_SYS_BUS_FB) ? "fixed" : "any"); │ !(value & DMA_AXI_UNDEF) ? "fixed" : "any");
│
if (axi->axi_lpi_en) │ if (axi->axi_lpi_en)
value |= DMA_AXI_EN_LPI; │ value |= DMA_AXI_EN_LPI;
if (axi->axi_xit_frm) │ if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM; │ value |= DMA_AXI_LPI_XIT_FRM;
│
value &= ~DMA_AXI_WR_OSR_LMT; │ value &= ~DMA_AXI_WR_OSR_LMT;
value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) << │ value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
DMA_AXI_WR_OSR_LMT_SHIFT; │ DMA_AXI_WR_OSR_LMT_SHIFT;
│
value &= ~DMA_AXI_RD_OSR_LMT; │ value &= ~DMA_AXI_RD_OSR_LMT;
value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) << │ value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
DMA_AXI_RD_OSR_LMT_SHIFT; │ DMA_AXI_RD_OSR_LMT_SHIFT;
│
/* Depending on the UNDEF bit the Master AXI will perform any burst │ /* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are │ * length according to the BLEN programmed (by default all BLEN are
* set). │ * set).
*/ │ */
for (i = 0; i < AXI_BLEN; i++) { │ for (i = 0; i < AXI_BLEN; i++) {
switch (axi->axi_blen[i]) { │ switch (axi->axi_blen[i]) {
case 256: │ case 256:
value |= DMA_AXI_BLEN256; │ value |= DMA_AXI_BLEN256;
break; │ break;
case 128: │ case 128:
value |= DMA_AXI_BLEN128; │ value |= DMA_AXI_BLEN128;
break; │ break;
case 64: │ case 64:
value |= DMA_AXI_BLEN64; │ value |= DMA_AXI_BLEN64;
break; │ break;
case 32: │ case 32:
value |= DMA_AXI_BLEN32; │ value |= DMA_AXI_BLEN32;
break; │ break;
case 16: │ case 16:
value |= DMA_AXI_BLEN16; │ value |= DMA_AXI_BLEN16;
break; │ break;
case 8: │ case 8:
value |= DMA_AXI_BLEN8; │ value |= DMA_AXI_BLEN8;
break; │ break;
case 4: │ case 4:
value |= DMA_AXI_BLEN4; │ value |= DMA_AXI_BLEN4;
break; │ break;
} │ }
} │ }
│
writel(value, ioaddr + DMA_SYS_BUS_MODE); │ writel(value, ioaddr + DMA_AXI_BUS_MODE);
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/mac.c:1575 │ linux/drivers/net/ethernet/intel/igb/e1000_mac.c:1481
│
u32 ledctl_blink = 0; │ u32 ledctl_blink = 0;
u32 i; │ u32 i;
│
if (hw->phy.media_type == e1000_media_type_fiber) { │ if (hw->phy.media_type == e1000_media_type_fiber) {
/* always blink LED0 for PCI-E fiber */ │ /* always blink LED0 for PCI-E fiber */
ledctl_blink = E1000_LEDCTL_LED0_BLINK | │ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); │ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
} else { │ } else {
/* Set the blink bit for each LED that's "on" (0x0E) │ /* Set the blink bit for each LED that's "on" (0x0E)
* (or "off" if inverted) in ledctl_mode2. The blink │ * (or "off" if inverted) in ledctl_mode2. The blink
* logic in hardware only works when mode is set to "on" │ * logic in hardware only works when mode is set to "on"
* so it must be changed accordingly when the mode is │ * so it must be changed accordingly when the mode is
* "off" and inverted. │ * "off" and inverted.
*/ │ */
ledctl_blink = hw->mac.ledctl_mode2; │ ledctl_blink = hw->mac.ledctl_mode2;
for (i = 0; i < 32; i += 8) { │ for (i = 0; i < 32; i += 8) {
u32 mode = (hw->mac.ledctl_mode2 >> i) & │ u32 mode = (hw->mac.ledctl_mode2 >> i) &
E1000_LEDCTL_LED0_MODE_MASK; │ E1000_LEDCTL_LED0_MODE_MASK;
u32 led_default = hw->mac.ledctl_default >> i; │ u32 led_default = hw->mac.ledctl_default >> i;
│
if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && │ if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
(mode == E1000_LEDCTL_MODE_LED_ON)) || │ (mode == E1000_LEDCTL_MODE_LED_ON)) ||
((led_default & E1000_LEDCTL_LED0_IVRT) && │ ((led_default & E1000_LEDCTL_LED0_IVRT) &&
(mode == E1000_LEDCTL_MODE_LED_OFF))) { │ (mode == E1000_LEDCTL_MODE_LED_OFF))) {
ledctl_blink &= │ ledctl_blink &=
~(E1000_LEDCTL_LED0_MODE_MASK << i); │ ~(E1000_LEDCTL_LED0_MODE_MASK << i);
ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | │ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
E1000_LEDCTL_MODE_LED_ON) << i; │ E1000_LEDCTL_MODE_LED_ON) << i;
} │ }
} │ }
} │ }
│
ew32(LEDCTL, ledctl_blink); │ wr32(E1000_LEDCTL, ledctl_blink);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_main.c:1525 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:8548
│
skb_checksum_none_assert(skb); │ skb_checksum_none_assert(skb);
│
/* Ignore Checksum bit is set */ │ /* Ignore Checksum bit is set */
if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) │ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
return; │ return;
│
/* Rx checksum disabled via ethtool */ │ /* Rx checksum disabled via ethtool */
if (!(ring->netdev->features & NETIF_F_RXCSUM)) │ if (!(ring->netdev->features & NETIF_F_RXCSUM))
return; │ return;
│
/* TCP/UDP checksum error bit is set */ │ /* TCP/UDP checksum error bit is set */
if (igc_test_staterr(rx_desc, │ if (igb_test_staterr(rx_desc,
IGC_RXDEXT_STATERR_L4E | │ E1000_RXDEXT_STATERR_TCPE |
IGC_RXDEXT_STATERR_IPE)) { │ E1000_RXDEXT_STATERR_IPE)) {
/* work around errata with sctp packets where the TCPE aka │ /* work around errata with sctp packets where the TCPE aka
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc) │ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
* packets (aka let the stack check the crc32c) │ * packets, (aka let the stack check the crc32c)
*/ │ */
if (!(skb->len == 60 && │ if (!((skb->len == 60) &&
test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { │ test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
u64_stats_update_begin(&ring->rx_syncp); │ u64_stats_update_begin(&ring->rx_syncp);
ring->rx_stats.csum_err++; │ ring->rx_stats.csum_err++;
u64_stats_update_end(&ring->rx_syncp); │ u64_stats_update_end(&ring->rx_syncp);
} │ }
/* let the stack verify checksum errors */ │ /* let the stack verify checksum errors */
return; │ return;
} │ }
/* It must be a TCP or UDP packet with a valid checksum */ │ /* It must be a TCP or UDP packet with a valid checksum */
if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | │ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
IGC_RXD_STAT_UDPCS)) │ E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY; │ skb->ip_summed = CHECKSUM_UNNECESSARY;
│
netdev_dbg(ring->netdev, "cksum success: bits %08X\n", │ dev_dbg(ring->dev, "cksum success: bits %08X\n",
le32_to_cpu(rx_desc->wb.upper.status_error)); │ le32_to_cpu(rx_desc->wb.upper.status_error));
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:181 │ linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:142
│
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); │ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg; │ struct mlx5_flow_group *fg;
u32 *in, *match; │ u32 *in, *match;
│
in = kvzalloc(inlen, GFP_KERNEL); │ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) │ if (!in)
return ERR_PTR(-ENOMEM); │ return ERR_PTR(-ENOMEM);
│
MLX5_SET(create_flow_group_in, in, match_criteria_enable, │ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2); │ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); │ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
│
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
│ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
│
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0, │ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask()); │ mlx5_eswitch_get_vport_metadata_mask());
│
MLX5_SET(create_flow_group_in, in, start_flow_index, │ MLX5_SET(create_flow_group_in, in, start_flow_index,
MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
MLX5_SET(create_flow_group_in, in, end_flow_index, │ MLX5_SET(create_flow_group_in, in, end_flow_index,
MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
│
fg = mlx5_create_flow_group(ingress_ft, in); │ fg = mlx5_create_flow_group(ingress_ft, in);
│ kvfree(in);
if (IS_ERR(fg)) │ if (IS_ERR(fg))
esw_warn(esw->dev, │ esw_warn(esw->dev,
"Failed to create bridge ingress table VLAN filter flow group ( │ "Failed to create VLAN flow group for bridge ingress table (err
PTR_ERR(fg)); │ PTR_ERR(fg));
│
kvfree(in); │
return fg; │ return fg;
} │
next prev up linux/drivers/net/ethernet/renesas/sh_eth.c:933 │ linux/drivers/net/ethernet/renesas/sh_eth.c:588
│
.soft_reset = sh_eth_soft_reset_gether, │ .soft_reset = sh_eth_soft_reset_gether,
│
.chip_reset = sh_eth_chip_reset_giga, │ .chip_reset = sh_eth_chip_reset_r8a7740,
.set_duplex = sh_eth_set_duplex, │ .set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_giga, │ .set_rate = sh_eth_set_rate_gether,
│
.register_type = SH_ETH_REG_GIGABIT, │ .register_type = SH_ETH_REG_GIGABIT,
│
.edtrr_trns = EDTRR_TRNS_GETHER, │ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD, │ .ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, │ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | │ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | │ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | │ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | │ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | │ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
EESIPR_CEEFIP | EESIPR_CELFIP | │ EESIPR_CEEFIP | EESIPR_CELFIP |
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | │ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP, │ EESIPR_PREIP | EESIPR_CERFIP,
│
.tx_check = EESR_TC1 | EESR_FTC, │ .tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | │ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | │ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE, │ EESR_TDE,
.fdr_value = 0x0000072f, │ .fdr_value = 0x0000070f,
│
.irq_flags = IRQF_SHARED, │
.apr = 1, │ .apr = 1,
.mpr = 1, │ .mpr = 1,
.tpauser = 1, │ .tpauser = 1,
.gecmr = 1, │ .gecmr = 1,
.bculr = 1, │ .bculr = 1,
.hw_swap = 1, │ .hw_swap = 1,
.rpadir = 1, │ .rpadir = 1,
.no_trimd = 1, │ .no_trimd = 1,
.no_ade = 1, │ .no_ade = 1,
.xdfar_rw = 1, │ .xdfar_rw = 1,
│ .csmr = 1,
│ .rx_csum = 1,
.tsu = 1, │ .tsu = 1,
│ .select_mii = 1,
│ .magic = 1,
.cexcr = 1, │ .cexcr = 1,
.dual_port = 1, │
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c:1565 │ linux/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c:1602
│
struct mac_device_info *mac = priv->hw; │ struct mac_device_info *mac = priv->hw;
│
dev_info(priv->device, "\tXGMAC2\n"); │ dev_info(priv->device, "\tXLGMAC\n");
│
priv->dev->priv_flags |= IFF_UNICAST_FLT; │ priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac->pcsr = priv->ioaddr; │ mac->pcsr = priv->ioaddr;
mac->multicast_filter_bins = priv->plat->multicast_filter_bins; │ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
mac->unicast_filter_entries = priv->plat->unicast_filter_entries; │ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
mac->mcast_bits_log2 = 0; │ mac->mcast_bits_log2 = 0;
│
if (mac->multicast_filter_bins) │ if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); │ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
│
mac->link.duplex = 0; │ mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII; │ mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII; │ mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII; │ mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII; │ mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500; │ mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000; │ mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000; │ mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
mac->link.speed_mask = XGMAC_CONFIG_SS_MASK; │ mac->link.speed_mask = XLGMAC_CONFIG_SS;
│
mac->mii.addr = XGMAC_MDIO_ADDR; │ mac->mii.addr = XGMAC_MDIO_ADDR;
mac->mii.data = XGMAC_MDIO_DATA; │ mac->mii.data = XGMAC_MDIO_DATA;
mac->mii.addr_shift = 16; │ mac->mii.addr_shift = 16;
mac->mii.addr_mask = GENMASK(20, 16); │ mac->mii.addr_mask = GENMASK(20, 16);
mac->mii.reg_shift = 0; │ mac->mii.reg_shift = 0;
mac->mii.reg_mask = GENMASK(15, 0); │ mac->mii.reg_mask = GENMASK(15, 0);
mac->mii.clk_csr_shift = 19; │ mac->mii.clk_csr_shift = 19;
mac->mii.clk_csr_mask = GENMASK(21, 19); │ mac->mii.clk_csr_mask = GENMASK(21, 19);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c:273 │ linux/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c:207
│
/* RX Abnormal Interrupt Summary */ │ /* TX Abnormal Interrupt Summary */
if (int_status & SXGBE_DMA_INT_STATUS_RBU) { │ if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
ret_val |= rx_bump_tc; │ ret_val |= tx_hard_error;
clear_val |= SXGBE_DMA_INT_STATUS_RBU; │ clear_val |= SXGBE_DMA_INT_STATUS_TPS;
x->rx_underflow_irq++; │ x->tx_process_stopped_irq++;
} │
│
if (int_status & SXGBE_DMA_INT_STATUS_RPS) { │
ret_val |= rx_hard_error; │
clear_val |= SXGBE_DMA_INT_STATUS_RPS; │
x->rx_process_stopped_irq++; │
} │ }
│
if (int_status & SXGBE_DMA_INT_STATUS_FBE) { │ if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
ret_val |= rx_hard_error; │ ret_val |= tx_hard_error;
x->fatal_bus_error_irq++; │ x->fatal_bus_error_irq++;
│
/* Assumption: FBE bit is the combination of │ /* Assumption: FBE bit is the combination of
* all the bus access erros and cleared when │ * all the bus access erros and cleared when
* the respective error bits cleared │ * the respective error bits cleared
*/ │ */
│
/* check for actual cause */ │ /* check for actual cause */
if (int_status & SXGBE_DMA_INT_STATUS_REB0) { │ if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
x->rx_read_transfer_err++; │ x->tx_read_transfer_err++;
clear_val |= SXGBE_DMA_INT_STATUS_REB0; │ clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
} else { │ } else {
x->rx_write_transfer_err++; │ x->tx_write_transfer_err++;
} │ }
│
if (int_status & SXGBE_DMA_INT_STATUS_REB1) { │ if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
x->rx_desc_access_err++; │ x->tx_desc_access_err++;
clear_val |= SXGBE_DMA_INT_STATUS_REB1; │ clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
} else { │ } else {
x->rx_buffer_access_err++; │ x->tx_buffer_access_err++;
} │ }
│
if (int_status & SXGBE_DMA_INT_STATUS_REB2) { │ if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
x->rx_data_transfer_err++; │ x->tx_data_transfer_err++;
clear_val |= SXGBE_DMA_INT_STATUS_REB2; │ clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
} │ }
│ }
│
│ /* context descriptor error */
│ if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
│ x->tx_ctxt_desc_err++;
│ clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_main.c:6640 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:9336
│
struct pci_dev *pdev = to_pci_dev(dev); │ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev); │ struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev); │ struct igb_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw; │ struct e1000_hw *hw = &adapter->hw;
u32 err, val; │ u32 err, val;
│
pci_set_power_state(pdev, PCI_D0); │ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev); │ pci_restore_state(pdev);
pci_save_state(pdev); │ pci_save_state(pdev);
│
if (!pci_device_is_present(pdev)) │ if (!pci_device_is_present(pdev))
return -ENODEV; │ return -ENODEV;
err = pci_enable_device_mem(pdev); │ err = pci_enable_device_mem(pdev);
if (err) { │ if (err) {
netdev_err(netdev, "Cannot enable PCI device from suspend\n"); │ dev_err(&pdev->dev,
│ "igb: Cannot enable PCI device from suspend\n");
return err; │ return err;
} │ }
pci_set_master(pdev); │ pci_set_master(pdev);
│
pci_enable_wake(pdev, PCI_D3hot, 0); │ pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0); │ pci_enable_wake(pdev, PCI_D3cold, 0);
│
if (igc_init_interrupt_scheme(adapter, true)) { │ if (igb_init_interrupt_scheme(adapter, true)) {
netdev_err(netdev, "Unable to allocate memory for queues\n"); │ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM; │ return -ENOMEM;
} │ }
│
igc_reset(adapter); │ igb_reset(adapter);
│
/* let the f/w know that the h/w is now under the control of the │ /* let the f/w know that the h/w is now under the control of the
* driver. │ * driver.
*/ │ */
igc_get_hw_control(adapter); │ igb_get_hw_control(adapter);
│
val = rd32(IGC_WUS); │ val = rd32(E1000_WUS);
if (val & WAKE_PKT_WUS) │ if (val & WAKE_PKT_WUS)
igc_deliver_wake_packet(netdev); │ igb_deliver_wake_packet(netdev);
│
wr32(IGC_WUS, ~0); │ wr32(E1000_WUS, ~0);
│
rtnl_lock(); │ if (!rpm)
│ rtnl_lock();
if (!err && netif_running(netdev)) │ if (!err && netif_running(netdev))
err = __igc_open(netdev, true); │ err = __igb_open(netdev, true);
│
if (!err) │ if (!err)
netif_device_attach(netdev); │ netif_device_attach(netdev);
rtnl_unlock(); │ if (!rpm)
│ rtnl_unlock();
│
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:12098 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:12044
│
#define HCLGE_64_BIT_REG_RTN_DATANUM 4 │ #define HCLGE_32_BIT_REG_RTN_DATANUM 8
#define HCLGE_64_BIT_DESC_NODATA_LEN 1 │ #define HCLGE_32_BIT_DESC_NODATA_LEN 2
│
struct hclge_desc *desc; │ struct hclge_desc *desc;
u64 *reg_val = data; │ u32 *reg_val = data;
__le64 *desc_data; │ __le32 *desc_data;
int nodata_len; │ int nodata_num;
int cmd_num; │ int cmd_num;
int i, k, n; │ int i, k, n;
int ret; │ int ret;
│
if (regs_num == 0) │ if (regs_num == 0)
return 0; │ return 0;
│
nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN; │ nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
cmd_num = DIV_ROUND_UP(regs_num + nodata_len, │ cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
HCLGE_64_BIT_REG_RTN_DATANUM); │ HCLGE_32_BIT_REG_RTN_DATANUM);
desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); │ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc) │ if (!desc)
return -ENOMEM; │ return -ENOMEM;
│
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); │ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); │ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
if (ret) { │ if (ret) {
dev_err(&hdev->pdev->dev, │ dev_err(&hdev->pdev->dev,
"Query 64 bit register cmd failed, ret = %d.\n", ret); │ "Query 32 bit register cmd failed, ret = %d.\n", ret);
kfree(desc); │ kfree(desc);
return ret; │ return ret;
} │ }
│
for (i = 0; i < cmd_num; i++) { │ for (i = 0; i < cmd_num; i++) {
if (i == 0) { │ if (i == 0) {
desc_data = (__le64 *)(&desc[i].data[0]); │ desc_data = (__le32 *)(&desc[i].data[0]);
n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len; │ n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
} else { │ } else {
desc_data = (__le64 *)(&desc[i]); │ desc_data = (__le32 *)(&desc[i]);
n = HCLGE_64_BIT_REG_RTN_DATANUM; │ n = HCLGE_32_BIT_REG_RTN_DATANUM;
} │ }
for (k = 0; k < n; k++) { │ for (k = 0; k < n; k++) {
*reg_val++ = le64_to_cpu(*desc_data++); │ *reg_val++ = le32_to_cpu(*desc_data++);
│
regs_num--; │ regs_num--;
if (!regs_num) │ if (!regs_num)
break; │ break;
} │ }
} │ }
│
kfree(desc); │ kfree(desc);
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:1419 │ linux/drivers/net/ethernet/intel/igb/e1000_phy.c:1334
│
struct e1000_mac_info *mac = &hw->mac; │ struct e1000_mac_info *mac = &hw->mac;
u32 ctrl; │ u32 ctrl;
│
/* Turn off flow control when forcing speed/duplex */ │ /* Turn off flow control when forcing speed/duplex */
hw->fc.current_mode = e1000_fc_none; │ hw->fc.current_mode = e1000_fc_none;
│
/* Force speed/duplex on the mac */ │ /* Force speed/duplex on the mac */
ctrl = er32(CTRL); │ ctrl = rd32(E1000_CTRL);
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); │ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ctrl &= ~E1000_CTRL_SPD_SEL; │ ctrl &= ~E1000_CTRL_SPD_SEL;
│
/* Disable Auto Speed Detection */ │ /* Disable Auto Speed Detection */
ctrl &= ~E1000_CTRL_ASDE; │ ctrl &= ~E1000_CTRL_ASDE;
│
/* Disable autoneg on the phy */ │ /* Disable autoneg on the phy */
*phy_ctrl &= ~BMCR_ANENABLE; │ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
│
/* Forcing Full or Half Duplex? */ │ /* Forcing Full or Half Duplex? */
if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { │ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
ctrl &= ~E1000_CTRL_FD; │ ctrl &= ~E1000_CTRL_FD;
*phy_ctrl &= ~BMCR_FULLDPLX; │ *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
e_dbg("Half Duplex\n"); │ hw_dbg("Half Duplex\n");
} else { │ } else {
ctrl |= E1000_CTRL_FD; │ ctrl |= E1000_CTRL_FD;
*phy_ctrl |= BMCR_FULLDPLX; │ *phy_ctrl |= MII_CR_FULL_DUPLEX;
e_dbg("Full Duplex\n"); │ hw_dbg("Full Duplex\n");
} │ }
│
/* Forcing 10mb or 100mb? */ │ /* Forcing 10mb or 100mb? */
if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { │ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
ctrl |= E1000_CTRL_SPD_100; │ ctrl |= E1000_CTRL_SPD_100;
*phy_ctrl |= BMCR_SPEED100; │ *phy_ctrl |= MII_CR_SPEED_100;
*phy_ctrl &= ~BMCR_SPEED1000; │ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
e_dbg("Forcing 100mb\n"); │ hw_dbg("Forcing 100mb\n");
} else { │ } else {
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); │ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
*phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100); │ *phy_ctrl |= MII_CR_SPEED_10;
e_dbg("Forcing 10mb\n"); │ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
│ hw_dbg("Forcing 10mb\n");
} │ }
│
hw->mac.ops.config_collision_dist(hw); │ igb_config_collision_dist(hw);
│
ew32(CTRL, ctrl); │ wr32(E1000_CTRL, ctrl);
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:2396 │ linux/drivers/net/ethernet/intel/e1000e/phy.c:2338
│
s32 ret_val; │ s32 ret_val;
u32 page = offset >> IGP_PAGE_SHIFT; │ u32 page = offset >> IGP_PAGE_SHIFT;
│
ret_val = hw->phy.ops.acquire(hw); │ ret_val = hw->phy.ops.acquire(hw);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
/* Page 800 works differently than the rest so it has its own func */ │ /* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) { │ if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, │ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
true, false); │ false, false);
goto release; │ goto release;
} │ }
│
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); │ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
│
if (offset > MAX_PHY_MULTI_PAGE_REG) { │ if (offset > MAX_PHY_MULTI_PAGE_REG) {
u32 page_shift, page_select; │ u32 page_shift, page_select;
│
/* Page select is register 31 for phy address 1 and 22 for │ /* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for │ * phy address 2 and 3. Page select is shifted only for
* phy address 1. │ * phy address 1.
*/ │ */
if (hw->phy.addr == 1) { │ if (hw->phy.addr == 1) {
page_shift = IGP_PAGE_SHIFT; │ page_shift = IGP_PAGE_SHIFT;
page_select = IGP01E1000_PHY_PAGE_SELECT; │ page_select = IGP01E1000_PHY_PAGE_SELECT;
} else { │ } else {
page_shift = 0; │ page_shift = 0;
page_select = BM_PHY_PAGE_SELECT; │ page_select = BM_PHY_PAGE_SELECT;
} │ }
│
/* Page is shifted left, PHY expects (page x 32) */ │ /* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select, │ ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
(page << page_shift)); │ (page << page_shift));
if (ret_val) │ if (ret_val)
goto release; │ goto release;
} │ }
│
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, │ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data); │ data);
│
release: │ release:
hw->phy.ops.release(hw); │ hw->phy.ops.release(hw);
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c:216 │ linux/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c:123
│
struct mss_egress_lut_addr_ctl_register lut_sel_reg; │ struct mss_ingress_lut_addr_ctl_register lut_sel_reg;
struct mss_egress_lut_ctl_register lut_op_reg; │ struct mss_ingress_lut_ctl_register lut_op_reg;
int ret; │ int ret;
│
unsigned int i; │ unsigned int i;
│
/* Select the table and row index to read */ │ /* Select the table and row index to read */
lut_sel_reg.bits_0.lut_select = table_id; │ lut_sel_reg.bits_0.lut_select = table_id;
lut_sel_reg.bits_0.lut_addr = table_index; │ lut_sel_reg.bits_0.lut_addr = table_index;
│
lut_op_reg.bits_0.lut_read = 1; │ lut_op_reg.bits_0.lut_read = 1;
lut_op_reg.bits_0.lut_write = 0; │ lut_op_reg.bits_0.lut_write = 0;
│
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR, │ MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
lut_sel_reg.word_0); │ lut_sel_reg.word_0);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_CTL_REGISTER_ADDR, │ MSS_INGRESS_LUT_CTL_REGISTER_ADDR,
lut_op_reg.word_0); │ lut_op_reg.word_0);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
│
memset(packed_record, 0, sizeof(u16) * num_words); │ memset(packed_record, 0, sizeof(u16) * num_words);
│
for (i = 0; i < num_words; i += 2) { │ for (i = 0; i < num_words; i += 2) {
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + │ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
i, │ i,
&packed_record[i]); │ &packed_record[i]);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1, │ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + │ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
i + 1, │ i + 1,
&packed_record[i + 1]); │ &packed_record[i + 1]);
if (unlikely(ret)) │ if (unlikely(ret))
return ret; │ return ret;
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:602 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:615
│
bool i = true; │ bool i = true;
efx_oword_t txd_ptr_tbl; │ ef4_oword_t txd_ptr_tbl;
struct efx_channel *channel; │ struct ef4_channel *channel;
struct efx_tx_queue *tx_queue; │ struct ef4_tx_queue *tx_queue;
│
efx_for_each_channel(channel, efx) { │ ef4_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) { │ ef4_for_each_channel_tx_queue(tx_queue, channel) {
efx_reado_table(efx, &txd_ptr_tbl, │ ef4_reado_table(efx, &txd_ptr_tbl,
FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); │ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
if (EFX_OWORD_FIELD(txd_ptr_tbl, │ if (EF4_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_FLUSH) || │ FRF_AZ_TX_DESCQ_FLUSH) ||
EFX_OWORD_FIELD(txd_ptr_tbl, │ EF4_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_EN)) { │ FRF_AZ_TX_DESCQ_EN)) {
netif_dbg(efx, hw, efx->net_dev, │ netif_dbg(efx, hw, efx->net_dev,
"flush did not complete on TXQ %d\n", │ "flush did not complete on TXQ %d\n",
tx_queue->queue); │ tx_queue->queue);
i = false; │ i = false;
} else if (atomic_cmpxchg(&tx_queue->flush_outstanding, │ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
1, 0)) { │ 1, 0)) {
/* The flush is complete, but we didn't │ /* The flush is complete, but we didn't
* receive a flush completion event │ * receive a flush completion event
*/ │ */
netif_dbg(efx, hw, efx->net_dev, │ netif_dbg(efx, hw, efx->net_dev,
"flush complete on TXQ %d, so drain " │ "flush complete on TXQ %d, so drain "
"the queue\n", tx_queue->queue); │ "the queue\n", tx_queue->queue);
/* Don't need to increment active_queues as it │ /* Don't need to increment active_queues as it
* has already been incremented for the queues │ * has already been incremented for the queues
* which did not drain │ * which did not drain
*/ │ */
efx_farch_magic_event(channel, │ ef4_farch_magic_event(channel,
EFX_CHANNEL_MAGIC_TX_DRAIN( │ EF4_CHANNEL_MAGIC_TX_DRAIN(
tx_queue)); │ tx_queue));
} │ }
} │ }
} │ }
│
return i; │ return i;
} │
next prev up linux/drivers/net/ethernet/intel/ixgbevf/ipsec.c:213 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c:429
│
struct net_device *dev = xs->xso.real_dev; │ struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data; │ unsigned char *key_data;
char *alg_name = NULL; │ char *alg_name = NULL;
int key_len; │ int key_len;
│
if (!xs->aead) { │ if (!xs->aead) {
netdev_err(dev, "Unsupported IPsec algorithm\n"); │ netdev_err(dev, "Unsupported IPsec algorithm\n");
return -EINVAL; │ return -EINVAL;
} │ }
│
if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { │ if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
netdev_err(dev, "IPsec offload requires %d bit authentication\n", │ netdev_err(dev, "IPsec offload requires %d bit authentication\n",
IXGBE_IPSEC_AUTH_BITS); │ IXGBE_IPSEC_AUTH_BITS);
return -EINVAL; │ return -EINVAL;
} │ }
│
key_data = &xs->aead->alg_key[0]; │ key_data = &xs->aead->alg_key[0];
key_len = xs->aead->alg_key_len; │ key_len = xs->aead->alg_key_len;
alg_name = xs->aead->alg_name; │ alg_name = xs->aead->alg_name;
│
if (strcmp(alg_name, aes_gcm_name)) { │ if (strcmp(alg_name, aes_gcm_name)) {
netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", │ netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
aes_gcm_name); │ aes_gcm_name);
return -EINVAL; │ return -EINVAL;
} │ }
│
/* The key bytes come down in a big endian array of bytes, so │ /* The key bytes come down in a bigendian array of bytes, so
* we don't need to do any byte swapping. │ * we don't need to do any byteswapping.
* 160 accounts for 16 byte key and 4 byte salt │ * 160 accounts for 16 byte key and 4 byte salt
*/ │ */
if (key_len > IXGBE_IPSEC_KEY_BITS) { │ if (key_len == IXGBE_IPSEC_KEY_BITS) {
*mysalt = ((u32 *)key_data)[4]; │ *mysalt = ((u32 *)key_data)[4];
} else if (key_len == IXGBE_IPSEC_KEY_BITS) { │ } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) {
*mysalt = 0; │
} else { │
netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with │ netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with
return -EINVAL; │ return -EINVAL;
│ } else {
│ netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\
│ *mysalt = 0;
} │ }
memcpy(mykey, key_data, 16); │ memcpy(mykey, key_data, 16);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/amd/amd8111e.c:1032 │ linux/drivers/net/ethernet/amd/amd8111e.c:984
│
│
tx_pkt_size = tx_data_rate/tx_pkt_rate; │ rx_pkt_size = rx_data_rate/rx_pkt_rate;
if (tx_pkt_size < 128) { │ if (rx_pkt_size < 128) {
│ if (coal_conf->rx_coal_type != NO_COALESCE) {
if (coal_conf->tx_coal_type != NO_COALESCE) { │
│ coal_conf->rx_timeout = 0;
coal_conf->tx_timeout = 0; │ coal_conf->rx_event_count = 0;
coal_conf->tx_event_count = 0; │ amd8111e_set_coalesce(dev, RX_INTR_COAL);
amd8111e_set_coalesce(dev, TX_INTR_COAL); │ coal_conf->rx_coal_type = NO_COALESCE;
coal_conf->tx_coal_type = NO_COALESCE; │
} │ }
│
} else if ((tx_pkt_size >= 128) && (tx_pkt_size < 512)) { │ } else if ((rx_pkt_size >= 128) && (rx_pkt_size < 512)) {
│
if (coal_conf->tx_coal_type != LOW_COALESCE) { │
coal_conf->tx_timeout = 1; │
coal_conf->tx_event_count = 2; │
amd8111e_set_coalesce(dev, TX_INTR_COAL); │
coal_conf->tx_coal_type = LOW_COALESCE; │
│
│ if (coal_conf->rx_coal_type != LOW_COALESCE) {
│ coal_conf->rx_timeout = 1;
│ coal_conf->rx_event_count = 4;
│ amd8111e_set_coalesce(dev, RX_INTR_COAL);
│ coal_conf->rx_coal_type = LOW_COALESCE;
} │ }
} else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)) { │ } else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)) {
│
if (coal_conf->tx_coal_type != MEDIUM_COALESCE) { │ if (coal_conf->rx_coal_type != MEDIUM_COALESCE) {
coal_conf->tx_timeout = 2; │ coal_conf->rx_timeout = 1;
coal_conf->tx_event_count = 5; │ coal_conf->rx_event_count = 4;
amd8111e_set_coalesce(dev, TX_INTR_COAL); │ amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->tx_coal_type = MEDIUM_COALESCE; │ coal_conf->rx_coal_type = MEDIUM_COALESCE;
} │ }
} else if (tx_pkt_size >= 1024) { │
if (coal_conf->tx_coal_type != HIGH_COALESCE) { │ } else if (rx_pkt_size >= 1024) {
coal_conf->tx_timeout = 4; │
coal_conf->tx_event_count = 8; │ if (coal_conf->rx_coal_type != HIGH_COALESCE) {
amd8111e_set_coalesce(dev, TX_INTR_COAL); │ coal_conf->rx_timeout = 2;
coal_conf->tx_coal_type = HIGH_COALESCE; │ coal_conf->rx_event_count = 3;
│ amd8111e_set_coalesce(dev, RX_INTR_COAL);
│ coal_conf->rx_coal_type = HIGH_COALESCE;
} │ }
} │ }
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3075 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3044
│
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); │ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
│
mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; │ mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; │ mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; │ mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; │ mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; │ mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; │ mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; │ mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; │ mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; │ mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; │ mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; │ mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; │ mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; │ mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; │ mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; │ mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; │ mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; │ mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; │ mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; │ mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; │ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
│ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
│
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); │ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3105 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3044
│
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); │ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
│
mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; │ mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; │ mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; │ mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; │ mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; │ mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; │ mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; │ mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; │ mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; │ mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; │ mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; │ mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; │ mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; │ mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; │ mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; │ mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; │ mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; │ mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; │ mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; │ mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; │ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
│ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
│
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); │ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3135 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3044
│
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); │ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
│
mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; │ mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; │ mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; │ mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; │ mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; │ mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; │ mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; │ mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; │ mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; │ mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; │ mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; │ mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; │ mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; │ mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; │ mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; │ mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; │ mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; │ mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; │ mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; │ mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; │ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
│ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
│
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); │ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3105 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3075
│
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); │ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
│
mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; │ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; │ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; │ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; │ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; │ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; │ mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; │ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; │ mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; │ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; │ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; │ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; │ mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; │ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; │ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; │ mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; │ mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; │ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; │ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; │ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; │ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
│
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); │ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3135 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3075
│
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); │ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
│
mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; │ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; │ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; │ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; │ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; │ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; │ mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; │ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; │ mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; │ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; │ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; │ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; │ mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; │ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; │ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; │ mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; │ mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; │ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; │ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; │ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; │ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
│
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); │ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3135 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c:3105
│
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); │ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
│
mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; │ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; │ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; │ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; │ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; │ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; │ mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; │ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; │ mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; │ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; │ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; │ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; │ mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; │ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; │ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; │ mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; │ mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; │ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; │ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; │ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; │ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
│
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); │ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:1360 │ linux/drivers/net/ethernet/cavium/liquidio/lio_main.c:2234
│
│ int retval;
struct octeon_soft_command *sc; │ struct octeon_soft_command *sc;
int ring_doorbell; │
struct lio *lio; │ struct lio *lio;
int retval; │ int ring_doorbell;
u32 len; │ u32 len;
│
lio = finfo->lio; │ lio = finfo->lio;
│
sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, │ sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
sizeof(struct oct_timestamp_resp)); │ sizeof(struct oct_timestamp_resp));
finfo->sc = sc; │ finfo->sc = sc;
│
if (!sc) { │ if (!sc) {
dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); │ dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
return IQ_SEND_FAILED; │ return IQ_SEND_FAILED;
} │ }
│
if (ndata->reqtype == REQTYPE_NORESP_NET) │ if (ndata->reqtype == REQTYPE_NORESP_NET)
ndata->reqtype = REQTYPE_RESP_NET; │ ndata->reqtype = REQTYPE_RESP_NET;
else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) │ else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
ndata->reqtype = REQTYPE_RESP_NET_SG; │ ndata->reqtype = REQTYPE_RESP_NET_SG;
│
sc->callback = handle_timestamp; │ sc->callback = handle_timestamp;
sc->callback_arg = finfo->skb; │ sc->callback_arg = finfo->skb;
sc->iq_no = ndata->q_no; │ sc->iq_no = ndata->q_no;
│
len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; │ if (OCTEON_CN23XX_PF(oct))
│ len = (u32)((struct octeon_instr_ih3 *)
│ (&sc->cmd.cmd3.ih3))->dlengsz;
│ else
│ len = (u32)((struct octeon_instr_ih2 *)
│ (&sc->cmd.cmd2.ih2))->dlengsz;
│
ring_doorbell = !xmit_more; │ ring_doorbell = !xmit_more;
│
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, │ retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
sc, len, ndata->reqtype); │ sc, len, ndata->reqtype);
│
if (retval == IQ_SEND_FAILED) { │ if (retval == IQ_SEND_FAILED) {
dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", │ dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
retval); │ retval);
octeon_free_soft_command(oct, sc); │ octeon_free_soft_command(oct, sc);
} else { │ } else {
netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); │ netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
} │ }
│
return retval; │ return retval;
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:2322 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:2366
│
struct hclge_rx_priv_wl_buf *req; │ struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
struct hclge_priv_buf *priv; │ struct hclge_rx_com_thrd *req;
struct hclge_desc desc[2]; │ struct hclge_desc desc[2];
│ struct hclge_tc_thrd *tc;
int i, j; │ int i, j;
int ret; │ int ret;
│
for (i = 0; i < 2; i++) { │ for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, │ hclge_cmd_setup_basic_desc(&desc[i],
false); │ HCLGE_OPC_RX_COM_THRD_ALLOC, false);
req = (struct hclge_rx_priv_wl_buf *)desc[i].data; │ req = (struct hclge_rx_com_thrd *)&desc[i].data;
│
/* The first descriptor set the NEXT bit to 1 */ │ /* The first descriptor set the NEXT bit to 1 */
if (i == 0) │ if (i == 0)
desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); │ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else │ else
desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); │ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
│
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { │ for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; │ tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
│
priv = &buf_alloc->priv_buf[idx]; │ req->com_thrd[j].high =
req->tc_wl[j].high = │ cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); │ req->com_thrd[j].high |=
req->tc_wl[j].high |= │ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); │ req->com_thrd[j].low =
req->tc_wl[j].low = │ cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); │ req->com_thrd[j].low |=
req->tc_wl[j].low |= │
cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); │ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
} │ }
} │ }
│
/* Send 2 descriptor at one time */ │ /* Send 2 descriptors at one time */
ret = hclge_cmd_send(&hdev->hw, desc, 2); │ ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) │ if (ret)
dev_err(&hdev->pdev->dev, │ dev_err(&hdev->pdev->dev,
"rx private waterline config cmd failed %d\n", │ "common threshold config cmd failed %d\n", ret);
ret); │
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:2859 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:2779
│
struct efx_farch_filter_state *state = efx->filter_state; │ struct ef4_farch_filter_state *state = efx->filter_state;
enum efx_farch_filter_table_id table_id; │ enum ef4_farch_filter_table_id table_id;
struct efx_farch_filter_table *table; │ struct ef4_farch_filter_table *table;
efx_oword_t filter; │ ef4_oword_t filter;
unsigned int filter_idx; │ unsigned int filter_idx;
│
down_write(&state->lock); │ spin_lock_bh(&efx->filter_lock);
│
for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; │ for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; │ table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
table_id++) { │ table_id++) {
table = &state->table[table_id]; │ table = &state->table[table_id];
│
for (filter_idx = 0; filter_idx < table->size; filter_idx++) { │ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (!test_bit(filter_idx, table->used_bitmap) || │ if (!test_bit(filter_idx, table->used_bitmap) ||
table->spec[filter_idx].dmaq_id >= │ table->spec[filter_idx].dmaq_id >=
efx->n_rx_channels) │ efx->n_rx_channels)
continue; │ continue;
│
if (efx->rx_scatter) │ if (efx->rx_scatter)
table->spec[filter_idx].flags |= │ table->spec[filter_idx].flags |=
EFX_FILTER_FLAG_RX_SCATTER; │ EF4_FILTER_FLAG_RX_SCATTER;
else │ else
table->spec[filter_idx].flags &= │ table->spec[filter_idx].flags &=
~EFX_FILTER_FLAG_RX_SCATTER; │ ~EF4_FILTER_FLAG_RX_SCATTER;
│
if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) │ if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF)
/* Pushed by efx_farch_filter_push_rx_config() */ │ /* Pushed by ef4_farch_filter_push_rx_config() */
continue; │ continue;
│
efx_farch_filter_build(&filter, &table->spec[filter_idx]); │ ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
efx_writeo(efx, &filter, │ ef4_writeo(efx, &filter,
table->offset + table->step * filter_idx); │ table->offset + table->step * filter_idx);
} │ }
} │ }
│
efx_farch_filter_push_rx_config(efx); │ ef4_farch_filter_push_rx_config(efx);
│
up_write(&state->lock); │ spin_unlock_bh(&efx->filter_lock);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c:1804 │ linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c:1648
│
struct mlx5dr_match_misc *misc = &value->misc; │ struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap; │ struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn; │ struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn; │ struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask; │ u8 *bit_mask = sb->bit_mask;
│ bool source_gvmi_set;
│
DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn); │ DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
│
if (sb->vhca_id_valid) { │ if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */ │ /* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) │ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
vport_dmn = dmn; │ vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == │ else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi)) │ dmn->peer_dmn->info.caps.gvmi))
vport_dmn = dmn->peer_dmn; │ vport_dmn = dmn->peer_dmn;
else │ else
return -EINVAL; │ return -EINVAL;
│
misc->source_eswitch_owner_vhca_id = 0; │ misc->source_eswitch_owner_vhca_id = 0;
} else { │ } else {
vport_dmn = dmn; │ vport_dmn = dmn;
} │ }
│
if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi)) │ source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
return 0; │ if (source_gvmi_set) {
│ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
│ misc->source_port);
│ if (!vport_cap) {
│ mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
│ misc->source_port);
│ return -EINVAL;
│ }
│
vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port); │ if (vport_cap->vport_gvmi)
if (!vport_cap) { │ MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvm
mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", │
misc->source_port); │
return -EINVAL; │
} │
│
if (vport_cap->vport_gvmi) │ misc->source_port = 0;
MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi); │ }
│
misc->source_port = 0; │
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c:171 │ linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c:205
│
if (f->fs.type) { │ if (f->fs.type) {
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W, │ set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W,
WORD_MASK, f->fs.nat_lip[15] | │ WORD_MASK, f->fs.nat_fip[15] |
f->fs.nat_lip[14] << 8 | │ f->fs.nat_fip[14] << 8 |
f->fs.nat_lip[13] << 16 | │ f->fs.nat_fip[13] << 16 |
(u64)f->fs.nat_lip[12] << 24, 1); │ (u64)f->fs.nat_fip[12] << 24, 1);
│
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1, │ set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
WORD_MASK, f->fs.nat_lip[11] | │ WORD_MASK, f->fs.nat_fip[11] |
f->fs.nat_lip[10] << 8 | │ f->fs.nat_fip[10] << 8 |
f->fs.nat_lip[9] << 16 | │ f->fs.nat_fip[9] << 16 |
(u64)f->fs.nat_lip[8] << 24, 1); │ (u64)f->fs.nat_fip[8] << 24, 1);
│
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2, │ set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
WORD_MASK, f->fs.nat_lip[7] | │ WORD_MASK, f->fs.nat_fip[7] |
f->fs.nat_lip[6] << 8 | │ f->fs.nat_fip[6] << 8 |
f->fs.nat_lip[5] << 16 | │ f->fs.nat_fip[5] << 16 |
(u64)f->fs.nat_lip[4] << 24, 1); │ (u64)f->fs.nat_fip[4] << 24, 1);
│
│ set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
│ WORD_MASK, f->fs.nat_fip[3] |
│ f->fs.nat_fip[2] << 8 |
│ f->fs.nat_fip[1] << 16 |
│ (u64)f->fs.nat_fip[0] << 24, 1);
│
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3, │
WORD_MASK, f->fs.nat_lip[3] | │
f->fs.nat_lip[2] << 8 | │
f->fs.nat_lip[1] << 16 | │
(u64)f->fs.nat_lip[0] << 24, 1); │
} else { │ } else {
set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W, │ set_tcb_field(adap, f, tid,
WORD_MASK, f->fs.nat_lip[3] | │ TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
f->fs.nat_lip[2] << 8 | │ WORD_MASK, f->fs.nat_fip[3] |
f->fs.nat_lip[1] << 16 | │ f->fs.nat_fip[2] << 8 |
(u64)f->fs.nat_lip[0] << 24, 1); │ f->fs.nat_fip[1] << 16 |
│ (u64)f->fs.nat_fip[0] << 24, 1);
} │ }
} │
next prev up linux/drivers/net/ethernet/intel/igb/e1000_phy.c:170 │ linux/drivers/net/ethernet/intel/igb/e1000_phy.c:114
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
u32 i, mdic = 0; │ u32 i, mdic = 0;
s32 ret_val = 0; │ s32 ret_val = 0;
│
if (offset > MAX_PHY_REG_ADDRESS) { │ if (offset > MAX_PHY_REG_ADDRESS) {
hw_dbg("PHY Address %d is out of range\n", offset); │ hw_dbg("PHY Address %d is out of range\n", offset);
ret_val = -E1000_ERR_PARAM; │ ret_val = -E1000_ERR_PARAM;
goto out; │ goto out;
} │ }
│
/* Set up Op-code, Phy Address, and register offset in the MDI │ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the │ * Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data. │ * PHY to retrieve the desired data.
*/ │ */
mdic = (((u32)data) | │ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
(offset << E1000_MDIC_REG_SHIFT) | │
(phy->addr << E1000_MDIC_PHY_SHIFT) | │ (phy->addr << E1000_MDIC_PHY_SHIFT) |
(E1000_MDIC_OP_WRITE)); │ (E1000_MDIC_OP_READ));
│
wr32(E1000_MDIC, mdic); │ wr32(E1000_MDIC, mdic);
│
/* Poll the ready bit to see if the MDI read completed │ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with │ * Increasing the time out as testing showed failures with
* the lower time out │ * the lower time out
*/ │ */
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { │ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
udelay(50); │ udelay(50);
mdic = rd32(E1000_MDIC); │ mdic = rd32(E1000_MDIC);
if (mdic & E1000_MDIC_READY) │ if (mdic & E1000_MDIC_READY)
break; │ break;
} │ }
if (!(mdic & E1000_MDIC_READY)) { │ if (!(mdic & E1000_MDIC_READY)) {
hw_dbg("MDI Write did not complete\n"); │ hw_dbg("MDI Read did not complete\n");
ret_val = -E1000_ERR_PHY; │ ret_val = -E1000_ERR_PHY;
goto out; │ goto out;
} │ }
if (mdic & E1000_MDIC_ERROR) { │ if (mdic & E1000_MDIC_ERROR) {
hw_dbg("MDI Error\n"); │ hw_dbg("MDI Error\n");
ret_val = -E1000_ERR_PHY; │ ret_val = -E1000_ERR_PHY;
goto out; │ goto out;
} │ }
│ *data = (u16) mdic;
│
out: │ out:
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c:1262 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c:1298
│
struct hclge_rx_priv_wl_buf *rx_priv_wl; │ struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_desc desc[2]; │ struct hclge_desc desc[2];
int pos = 0; │ int pos = 0;
int i, ret; │ int i, ret;
│
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); │ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); │ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); │ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2); │ ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) { │ if (ret) {
dev_err(&hdev->pdev->dev, │ dev_err(&hdev->pdev->dev,
"failed to dump rx priv wl buf, ret = %d\n", ret); │ "failed to dump rx common threshold, ret = %d\n", ret);
return ret; │ return ret;
} │ }
│
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; │ pos += scnprintf(buf + pos, len - pos, "\n");
│ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) │ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
pos += scnprintf(buf + pos, len - pos, │ pos += scnprintf(buf + pos, len - pos,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, │ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
le16_to_cpu(rx_priv_wl->tc_wl[i].high), │ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low)); │ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
│
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; │ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) │ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
pos += scnprintf(buf + pos, len - pos, │ pos += scnprintf(buf + pos, len - pos,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", │ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC, │ i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_priv_wl->tc_wl[i].high), │ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low)); │ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
│
return pos; │ return pos;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_ethtool.c:735 │ linux/drivers/net/ethernet/intel/igc/igc_ethtool.c:458
│
struct igb_adapter *adapter = netdev_priv(netdev); │ struct igc_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; │ struct igc_hw *hw = &adapter->hw;
u16 *eeprom_buff; │
int first_word, last_word; │ int first_word, last_word;
│ u16 *eeprom_buff;
int ret_val = 0; │ int ret_val = 0;
u16 i; │ u16 i;
│
if (eeprom->len == 0) │ if (eeprom->len == 0)
return -EINVAL; │ return -EINVAL;
│
eeprom->magic = hw->vendor_id | (hw->device_id << 16); │ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
│
first_word = eeprom->offset >> 1; │ first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1; │ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
│
eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), │ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
GFP_KERNEL); │ GFP_KERNEL);
if (!eeprom_buff) │ if (!eeprom_buff)
return -ENOMEM; │ return -ENOMEM;
│
if (hw->nvm.type == e1000_nvm_eeprom_spi) │ if (hw->nvm.type == igc_nvm_eeprom_spi) {
ret_val = hw->nvm.ops.read(hw, first_word, │ ret_val = hw->nvm.ops.read(hw, first_word,
last_word - first_word + 1, │ last_word - first_word + 1,
eeprom_buff); │ eeprom_buff);
else { │ } else {
for (i = 0; i < last_word - first_word + 1; i++) { │ for (i = 0; i < last_word - first_word + 1; i++) {
ret_val = hw->nvm.ops.read(hw, first_word + i, 1, │ ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
&eeprom_buff[i]); │ &eeprom_buff[i]);
if (ret_val) │ if (ret_val)
break; │ break;
} │ }
} │ }
│
/* Device's eeprom is always little-endian, word addressable */ │ /* Device's eeprom is always little-endian, word addressable */
for (i = 0; i < last_word - first_word + 1; i++) │ for (i = 0; i < last_word - first_word + 1; i++)
le16_to_cpus(&eeprom_buff[i]); │ le16_to_cpus(&eeprom_buff[i]);
│
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), │ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
eeprom->len); │ eeprom->len);
kfree(eeprom_buff); │ kfree(eeprom_buff);
│
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/bpf/jit.c:2171 │ linux/drivers/net/ethernet/netronome/nfp/bpf/jit.c:2059
│
const struct bpf_insn *insn = &meta->insn; │ const struct bpf_insn *insn = &meta->insn;
u64 umin, umax; │ u64 umin, umax;
u8 dst, src; │ u8 dst, src;
│
dst = insn->dst_reg * 2; │ dst = insn->dst_reg * 2;
umin = meta->umin_src; │ umin = meta->umin_src;
umax = meta->umax_src; │ umax = meta->umax_src;
if (umin == umax) │ if (umin == umax)
return __shr_imm64(nfp_prog, dst, umin); │ return __shl_imm64(nfp_prog, dst, umin);
│
src = insn->src_reg * 2; │ src = insn->src_reg * 2;
if (umax < 32) { │ if (umax < 32) {
shr_reg64_lt32(nfp_prog, dst, src); │ shl_reg64_lt32(nfp_prog, dst, src);
} else if (umin >= 32) { │ } else if (umin >= 32) {
shr_reg64_ge32(nfp_prog, dst, src); │ shl_reg64_ge32(nfp_prog, dst, src);
} else { │ } else {
/* Generate different instruction sequences depending on runtime │ /* Generate different instruction sequences depending on runtime
* value of shift amount. │ * value of shift amount.
*/ │ */
u16 label_ge32, label_end; │ u16 label_ge32, label_end;
│
label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; │ label_ge32 = nfp_prog_current_offset(nfp_prog) + 7;
emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); │ emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
shr_reg64_lt32_low(nfp_prog, dst, src); │
│ shl_reg64_lt32_high(nfp_prog, dst, src);
label_end = nfp_prog_current_offset(nfp_prog) + 6; │ label_end = nfp_prog_current_offset(nfp_prog) + 6;
emit_br(nfp_prog, BR_UNC, label_end, 2); │ emit_br(nfp_prog, BR_UNC, label_end, 2);
/* shr_reg64_lt32_high packed in delay slot. */ │ /* shl_reg64_lt32_low packed in delay slot. */
shr_reg64_lt32_high(nfp_prog, dst, src); │ shl_reg64_lt32_low(nfp_prog, dst, src);
│
if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) │ if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
return -EINVAL; │ return -EINVAL;
shr_reg64_ge32(nfp_prog, dst, src); │ shl_reg64_ge32(nfp_prog, dst, src);
│
if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) │ if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
return -EINVAL; │ return -EINVAL;
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c:928 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:2154
│
unsigned int metasize = xdp->data - xdp->data_meta; │ unsigned int metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192) │ #if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; │ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else │ #else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + │ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(xdp->data_end - │ SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start); │ xdp->data_hard_start);
#endif │ #endif
struct sk_buff *skb; │ struct sk_buff *skb;
│
/* Prefetch first cache line of first page. If xdp->data_meta │ /* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points to xdp->data, otherwise, we likely │ * is unused, this points extactly as xdp->data, otherwise we
* have a consumer accessing first few bytes of meta data, │ * likely have a consumer accessing first few bytes of meta
* and then actual data. │ * data, and then actual data.
*/ │ */
net_prefetch(xdp->data_meta); │ net_prefetch(xdp->data_meta);
│
/* build an skb around the page buffer */ │ /* build an skb to around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, truesize); │ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb)) │ if (unlikely(!skb))
return NULL; │ return NULL;
│
/* update pointers within the skb to store the data */ │ /* update pointers within the skb to store the data */
skb_reserve(skb, xdp->data - xdp->data_hard_start); │ skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data); │ __skb_put(skb, xdp->data_end - xdp->data);
if (metasize) │ if (metasize)
skb_metadata_set(skb, metasize); │ skb_metadata_set(skb, metasize);
│
│ /* record DMA address if this is the start of a chain of buffers */
│ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
│ IXGBE_CB(skb)->dma = rx_buffer->dma;
│
/* update buffer offset */ │ /* update buffer offset */
#if (PAGE_SIZE < 8192) │ #if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize; │ rx_buffer->page_offset ^= truesize;
#else │ #else
rx_buffer->page_offset += truesize; │ rx_buffer->page_offset += truesize;
#endif │ #endif
│
return skb; │ return skb;
} │
next prev up linux/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c:758 │ linux/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c:1706
│
u16 packed_record[18]; │ u16 packed_record[16];
│ int ret;
│
if (table_index >= NUMROWS_INGRESSSAKEYRECORD) │ if (table_index >= NUMROWS_EGRESSSAKEYRECORD)
return -EINVAL; │ return -EINVAL;
│
memset(packed_record, 0, sizeof(u16) * 18); │ memset(packed_record, 0, sizeof(u16) * 16);
│
packed_record[0] = rec->key[0] & 0xFFFF; │ packed_record[0] = rec->key[0] & 0xFFFF;
packed_record[1] = (rec->key[0] >> 16) & 0xFFFF; │ packed_record[1] = (rec->key[0] >> 16) & 0xFFFF;
│
packed_record[2] = rec->key[1] & 0xFFFF; │ packed_record[2] = rec->key[1] & 0xFFFF;
packed_record[3] = (rec->key[1] >> 16) & 0xFFFF; │ packed_record[3] = (rec->key[1] >> 16) & 0xFFFF;
│
packed_record[4] = rec->key[2] & 0xFFFF; │ packed_record[4] = rec->key[2] & 0xFFFF;
packed_record[5] = (rec->key[2] >> 16) & 0xFFFF; │ packed_record[5] = (rec->key[2] >> 16) & 0xFFFF;
│
packed_record[6] = rec->key[3] & 0xFFFF; │ packed_record[6] = rec->key[3] & 0xFFFF;
packed_record[7] = (rec->key[3] >> 16) & 0xFFFF; │ packed_record[7] = (rec->key[3] >> 16) & 0xFFFF;
│
packed_record[8] = rec->key[4] & 0xFFFF; │ packed_record[8] = rec->key[4] & 0xFFFF;
packed_record[9] = (rec->key[4] >> 16) & 0xFFFF; │ packed_record[9] = (rec->key[4] >> 16) & 0xFFFF;
│
packed_record[10] = rec->key[5] & 0xFFFF; │ packed_record[10] = rec->key[5] & 0xFFFF;
packed_record[11] = (rec->key[5] >> 16) & 0xFFFF; │ packed_record[11] = (rec->key[5] >> 16) & 0xFFFF;
│
packed_record[12] = rec->key[6] & 0xFFFF; │ packed_record[12] = rec->key[6] & 0xFFFF;
packed_record[13] = (rec->key[6] >> 16) & 0xFFFF; │ packed_record[13] = (rec->key[6] >> 16) & 0xFFFF;
│
packed_record[14] = rec->key[7] & 0xFFFF; │ packed_record[14] = rec->key[7] & 0xFFFF;
packed_record[15] = (rec->key[7] >> 16) & 0xFFFF; │ packed_record[15] = (rec->key[7] >> 16) & 0xFFFF;
│
packed_record[16] = rec->key_len & 0x3; │ ret = set_raw_egress_record(hw, packed_record, 8, 2,
│ ROWOFFSET_EGRESSSAKEYRECORD + table_index);
│ if (unlikely(ret))
│ return ret;
│ ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
│ ROWOFFSET_EGRESSSAKEYRECORD + table_index -
│ 32);
│ if (unlikely(ret))
│ return ret;
│
return set_raw_ingress_record(hw, packed_record, 18, 2, │ return 0;
ROWOFFSET_INGRESSSAKEYRECORD + │
table_index); │
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_main.c:5564 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:1408
│
struct net_device *netdev = adapter->netdev; │ struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev; │ struct pci_dev *pdev = adapter->pdev;
int err = 0; │ int err = 0;
│
if (adapter->flags & IGC_FLAG_HAS_MSIX) { │ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
err = igc_request_msix(adapter); │ err = igb_request_msix(adapter);
if (!err) │ if (!err)
goto request_done; │ goto request_done;
/* fall back to MSI */ │ /* fall back to MSI */
igc_free_all_tx_resources(adapter); │ igb_free_all_tx_resources(adapter);
igc_free_all_rx_resources(adapter); │ igb_free_all_rx_resources(adapter);
│
igc_clear_interrupt_scheme(adapter); │ igb_clear_interrupt_scheme(adapter);
err = igc_init_interrupt_scheme(adapter, false); │ err = igb_init_interrupt_scheme(adapter, false);
if (err) │ if (err)
goto request_done; │ goto request_done;
igc_setup_all_tx_resources(adapter); │
igc_setup_all_rx_resources(adapter); │ igb_setup_all_tx_resources(adapter);
igc_configure(adapter); │ igb_setup_all_rx_resources(adapter);
│ igb_configure(adapter);
} │ }
│
igc_assign_vector(adapter->q_vector[0], 0); │ igb_assign_vector(adapter->q_vector[0], 0);
│
if (adapter->flags & IGC_FLAG_HAS_MSI) { │ if (adapter->flags & IGB_FLAG_HAS_MSI) {
err = request_irq(pdev->irq, &igc_intr_msi, 0, │ err = request_irq(pdev->irq, igb_intr_msi, 0,
netdev->name, adapter); │ netdev->name, adapter);
if (!err) │ if (!err)
goto request_done; │ goto request_done;
│
/* fall back to legacy interrupts */ │ /* fall back to legacy interrupts */
igc_reset_interrupt_capability(adapter); │ igb_reset_interrupt_capability(adapter);
adapter->flags &= ~IGC_FLAG_HAS_MSI; │ adapter->flags &= ~IGB_FLAG_HAS_MSI;
} │ }
│
err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, │ err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
netdev->name, adapter); │ netdev->name, adapter);
│
if (err) │ if (err)
netdev_err(netdev, "Error %d getting interrupt\n", err); │ dev_err(&pdev->dev, "Error %d getting interrupt\n",
│ err);
│
request_done: │ request_done:
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c:151 │ linux/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c:107
│
struct hinic_hwif *hwif = func_to_io->hwif; │ struct hinic_hwif *hwif = func_to_io->hwif;
struct hinic_rq_ctxt_block *rq_ctxt_block; │ struct hinic_sq_ctxt_block *sq_ctxt_block;
struct pci_dev *pdev = hwif->pdev; │ struct pci_dev *pdev = hwif->pdev;
struct hinic_cmdq_buf cmdq_buf; │ struct hinic_cmdq_buf cmdq_buf;
struct hinic_rq_ctxt *rq_ctxt; │ struct hinic_sq_ctxt *sq_ctxt;
struct hinic_qp *qp; │ struct hinic_qp *qp;
u64 out_param; │ u64 out_param;
int err, i; │ int err, i;
│
err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); │ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
if (err) { │ if (err) {
dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); │ dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
return err; │ return err;
} │ }
│
rq_ctxt_block = cmdq_buf.buf; │ sq_ctxt_block = cmdq_buf.buf;
rq_ctxt = rq_ctxt_block->rq_ctxt; │ sq_ctxt = sq_ctxt_block->sq_ctxt;
│
hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ, │ hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
num_rqs, func_to_io->max_qps); │ num_sqs, func_to_io->max_qps);
for (i = 0; i < num_rqs; i++) { │ for (i = 0; i < num_sqs; i++) {
qp = &func_to_io->qps[i]; │ qp = &func_to_io->qps[i];
│
hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq, │ hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
base_qpn + qp->q_id); │ base_qpn + qp->q_id);
} │ }
│
cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs); │ cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
│
err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, │ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, │ IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
&out_param); │ &out_param);
if (err || out_param != 0) { │ if (err || out_param != 0) {
dev_err(&pdev->dev, "Failed to set RQ ctxts\n"); │ dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
err = -EFAULT; │ err = -EFAULT;
} │ }
│
hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); │ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/microchip/lan743x_main.c:240 │ linux/drivers/net/ethernet/microchip/lan743x_main.c:199
│
struct lan743x_rx *rx = context; │ struct lan743x_tx *tx = context;
struct lan743x_adapter *adapter = rx->adapter; │ struct lan743x_adapter *adapter = tx->adapter;
bool enable_flag = true; │ bool enable_flag = true;
│
│ lan743x_csr_read(adapter, INT_EN_SET);
if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { │ if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
lan743x_csr_write(adapter, INT_EN_CLR, │ lan743x_csr_write(adapter, INT_EN_CLR,
INT_BIT_DMA_RX_(rx->channel_number)); │ INT_BIT_DMA_TX_(tx->channel_number));
} │ }
│
if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) { │ if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number); │ u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
u32 dmac_int_sts; │ u32 dmac_int_sts;
u32 dmac_int_en; │ u32 dmac_int_en;
│
if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) │ if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); │ dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
else │ else
dmac_int_sts = rx_frame_bit; │ dmac_int_sts = ioc_bit;
if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) │ if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
dmac_int_en = lan743x_csr_read(adapter, │ dmac_int_en = lan743x_csr_read(adapter,
DMAC_INT_EN_SET); │ DMAC_INT_EN_SET);
else │ else
dmac_int_en = rx_frame_bit; │ dmac_int_en = ioc_bit;
│
dmac_int_en &= rx_frame_bit; │ dmac_int_en &= ioc_bit;
dmac_int_sts &= dmac_int_en; │ dmac_int_sts &= dmac_int_en;
if (dmac_int_sts & rx_frame_bit) { │ if (dmac_int_sts & ioc_bit) {
napi_schedule(&rx->napi); │ napi_schedule(&tx->napi);
enable_flag = false;/* poll funct will enable later */ │ enable_flag = false;/* poll func will enable later */
} │ }
} │ }
│
if (enable_flag) { │ if (enable_flag)
/* enable isr */ │ /* enable isr */
lan743x_csr_write(adapter, INT_EN_SET, │ lan743x_csr_write(adapter, INT_EN_SET,
INT_BIT_DMA_RX_(rx->channel_number)); │ INT_BIT_DMA_TX_(tx->channel_number));
} │
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c:106 │ linux/drivers/net/ethernet/cavium/liquidio/lio_main.c:245
│
int i; │ int i;
│
/* Disable the input and output queues now. No more packets will │ /* Disable the input and output queues now. No more packets will
* arrive from Octeon, but we should wait for all packet processing │ * arrive from Octeon, but we should wait for all packet processing
* to finish. │ * to finish.
*/ │ */
│ force_io_queues_off(oct);
│
/* To allow for in-flight requests */ │ /* To allow for in-flight requests */
schedule_timeout_uninterruptible(100); │ schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
│
if (wait_for_pending_requests(oct)) │ if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n"); │ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
│
/* Force all requests waiting to be fetched by OCTEON to complete. */ │ /* Force all requests waiting to be fetched by OCTEON to complete. */
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { │ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
struct octeon_instr_queue *iq; │ struct octeon_instr_queue *iq;
│
if (!(oct->io_qmask.iq & BIT_ULL(i))) │ if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue; │ continue;
iq = oct->instr_queue[i]; │ iq = oct->instr_queue[i];
│
if (atomic_read(&iq->instr_pending)) { │ if (atomic_read(&iq->instr_pending)) {
spin_lock_bh(&iq->lock); │ spin_lock_bh(&iq->lock);
iq->fill_cnt = 0; │ iq->fill_cnt = 0;
iq->octeon_read_index = iq->host_write_index; │ iq->octeon_read_index = iq->host_write_index;
iq->stats.instr_processed += │ iq->stats.instr_processed +=
atomic_read(&iq->instr_pending); │ atomic_read(&iq->instr_pending);
lio_process_iq_request_list(oct, iq, 0); │ lio_process_iq_request_list(oct, iq, 0);
spin_unlock_bh(&iq->lock); │ spin_unlock_bh(&iq->lock);
} │ }
} │ }
│
/* Force all pending ordered list requests to time out. */ │ /* Force all pending ordered list requests to time out. */
lio_process_ordered_list(oct, 1); │ lio_process_ordered_list(oct, 1);
│
/* We do not need to wait for output queue packets to be processed. */ │ /* We do not need to wait for output queue packets to be processed. */
} │
next prev up linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c:234 │ linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c:989
│
struct otx2_hw *hw = &vf->hw; │ struct otx2_hw *hw = &pf->hw;
struct msg_req *req; │ struct msg_req *req;
char *irq_name; │ char *irq_name;
int err; │ int err;
│
/* Register mailbox interrupt handler */ │ /* Register mailbox interrupt handler */
irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; │ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); │ snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), │ err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); │ otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
if (err) { │ if (err) {
dev_err(vf->dev, │ dev_err(pf->dev,
"RVUPF: IRQ registration failed for VFAF mbox irq\n"); │ "RVUPF: IRQ registration failed for PFAF mbox irq\n");
return err; │ return err;
} │ }
│
/* Enable mailbox interrupt for msgs coming from PF. │ /* Enable mailbox interrupt for msgs coming from AF.
* First clear to avoid spurious interrupts, if any. │ * First clear to avoid spurious interrupts, if any.
*/ │ */
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); │ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); │ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
│
if (!probe_pf) │ if (!probe_af)
return 0; │ return 0;
│
/* Check mailbox communication with PF */ │ /* Check mailbox communication with AF */
req = otx2_mbox_alloc_msg_ready(&vf->mbox); │ req = otx2_mbox_alloc_msg_ready(&pf->mbox);
if (!req) { │ if (!req) {
otx2vf_disable_mbox_intr(vf); │ otx2_disable_mbox_intr(pf);
return -ENOMEM; │ return -ENOMEM;
} │ }
│ err = otx2_sync_mbox_msg(&pf->mbox);
err = otx2_sync_mbox_msg(&vf->mbox); │
if (err) { │ if (err) {
dev_warn(vf->dev, │ dev_warn(pf->dev,
"AF not responding to mailbox, deferring probe\n"); │ "AF not responding to mailbox, deferring probe\n");
otx2vf_disable_mbox_intr(vf); │ otx2_disable_mbox_intr(pf);
return -EPROBE_DEFER; │ return -EPROBE_DEFER;
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_main.c:1060 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:8085
│
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0; │ u32 vlan_macip_lens = 0;
│ u32 fceof_saidx = 0;
u32 type_tucmd = 0; │ u32 type_tucmd = 0;
│
if (skb->ip_summed != CHECKSUM_PARTIAL) { │ if (skb->ip_summed != CHECKSUM_PARTIAL) {
csum_failed: │ csum_failed:
if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && │ if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
!tx_ring->launchtime_enable) │ IXGBE_TX_FLAGS_CC)))
return; │ return;
goto no_csum; │ goto no_csum;
} │ }
│
switch (skb->csum_offset) { │ switch (skb->csum_offset) {
case offsetof(struct tcphdr, check): │ case offsetof(struct tcphdr, check):
type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; │ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
fallthrough; │ fallthrough;
case offsetof(struct udphdr, check): │ case offsetof(struct udphdr, check):
break; │ break;
case offsetof(struct sctphdr, checksum): │ case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */ │ /* validate that this is actually an SCTP request */
if (skb_csum_is_sctp(skb)) { │ if (skb_csum_is_sctp(skb)) {
type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; │ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break; │ break;
} │ }
fallthrough; │ fallthrough;
default: │ default:
skb_checksum_help(skb); │ skb_checksum_help(skb);
goto csum_failed; │ goto csum_failed;
} │ }
│
/* update TX checksum flag */ │ /* update TX checksum flag */
first->tx_flags |= IGC_TX_FLAGS_CSUM; │ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
vlan_macip_lens = skb_checksum_start_offset(skb) - │ vlan_macip_lens = skb_checksum_start_offset(skb) -
skb_network_offset(skb); │ skb_network_offset(skb);
no_csum: │ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; │ /* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; │ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
│ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
│
igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); │ fceof_saidx |= itd->sa_idx;
│ type_tucmd |= itd->flags | itd->trailer_len;
│
│ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_main.c:1060 │ linux/drivers/net/ethernet/intel/igb/igb_main.c:5980
│
struct sk_buff *skb = first->skb; │ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0; │ u32 vlan_macip_lens = 0;
u32 type_tucmd = 0; │ u32 type_tucmd = 0;
│
if (skb->ip_summed != CHECKSUM_PARTIAL) { │ if (skb->ip_summed != CHECKSUM_PARTIAL) {
csum_failed: │ csum_failed:
if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && │ if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
!tx_ring->launchtime_enable) │ !tx_ring->launchtime_enable)
return; │ return;
goto no_csum; │ goto no_csum;
} │ }
│
switch (skb->csum_offset) { │ switch (skb->csum_offset) {
case offsetof(struct tcphdr, check): │ case offsetof(struct tcphdr, check):
type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; │ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
fallthrough; │ fallthrough;
case offsetof(struct udphdr, check): │ case offsetof(struct udphdr, check):
break; │ break;
case offsetof(struct sctphdr, checksum): │ case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */ │ /* validate that this is actually an SCTP request */
if (skb_csum_is_sctp(skb)) { │ if (skb_csum_is_sctp(skb)) {
type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; │ type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break; │ break;
} │ }
fallthrough; │ fallthrough;
default: │ default:
skb_checksum_help(skb); │ skb_checksum_help(skb);
goto csum_failed; │ goto csum_failed;
} │ }
│
/* update TX checksum flag */ │ /* update TX checksum flag */
first->tx_flags |= IGC_TX_FLAGS_CSUM; │ first->tx_flags |= IGB_TX_FLAGS_CSUM;
vlan_macip_lens = skb_checksum_start_offset(skb) - │ vlan_macip_lens = skb_checksum_start_offset(skb) -
skb_network_offset(skb); │ skb_network_offset(skb);
no_csum: │ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; │ vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; │ vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
│
igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); │ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
} │
next prev up linux/drivers/net/ethernet/intel/e1000e/phy.c:1353 │ linux/drivers/net/ethernet/intel/e1000e/phy.c:1182
│
struct e1000_phy_info *phy = &hw->phy; │ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val; │ s32 ret_val;
u16 data; │ u16 phy_data;
bool link; │ bool link;
│
ret_val = e1e_rphy(hw, MII_BMCR, &data); │ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
e1000e_phy_force_speed_duplex_setup(hw, &data); │ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
│
ret_val = e1e_wphy(hw, MII_BMCR, data); │ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
/* Disable MDI-X support for 10/100 */ │ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); │ * forced whenever speed and duplex are forced.
│ */
│ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
data &= ~IFE_PMC_AUTO_MDIX; │ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
data &= ~IFE_PMC_FORCE_MDIX; │ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
│
ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); │ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
e_dbg("IFE PMC: %X\n", data); │ e_dbg("IGP PSCR: %X\n", phy_data);
│
udelay(1); │ udelay(1);
│
if (phy->autoneg_wait_to_complete) { │ if (phy->autoneg_wait_to_complete) {
e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); │ e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
│
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, │ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
100000, &link); │ 100000, &link);
if (ret_val) │ if (ret_val)
return ret_val; │ return ret_val;
│
if (!link) │ if (!link)
e_dbg("Link taking longer than expected.\n"); │ e_dbg("Link taking longer than expected.\n");
│
/* Try once more */ │ /* Try once more */
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, │ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
100000, &link); │ 100000, &link);
if (ret_val) │
return ret_val; │
} │ }
│
return 0; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/renesas/sh_eth.c:977 │ linux/drivers/net/ethernet/renesas/sh_eth.c:588
│
.soft_reset = sh_eth_soft_reset_gether, │ .soft_reset = sh_eth_soft_reset_gether,
│
.chip_reset = sh_eth_chip_reset, │ .chip_reset = sh_eth_chip_reset_r8a7740,
.set_duplex = sh_eth_set_duplex, │ .set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether, │ .set_rate = sh_eth_set_rate_gether,
│
.register_type = SH_ETH_REG_GIGABIT, │ .register_type = SH_ETH_REG_GIGABIT,
│
.edtrr_trns = EDTRR_TRNS_GETHER, │ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD, │ .ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, │ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | │ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | │ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | │ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | │ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | │ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
│ EESIPR_CEEFIP | EESIPR_CELFIP |
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | │ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP, │ EESIPR_PREIP | EESIPR_CERFIP,
│
.tx_check = EESR_TC1 | EESR_FTC, │ .tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | │ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | │ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE, │ EESR_TDE,
│ .fdr_value = 0x0000070f,
│
.apr = 1, │ .apr = 1,
.mpr = 1, │ .mpr = 1,
.tpauser = 1, │ .tpauser = 1,
.gecmr = 1, │ .gecmr = 1,
.bculr = 1, │ .bculr = 1,
.hw_swap = 1, │ .hw_swap = 1,
│ .rpadir = 1,
.no_trimd = 1, │ .no_trimd = 1,
.no_ade = 1, │ .no_ade = 1,
.xdfar_rw = 1, │ .xdfar_rw = 1,
.tsu = 1, │
.csmr = 1, │ .csmr = 1,
.rx_csum = 1, │ .rx_csum = 1,
│ .tsu = 1,
.select_mii = 1, │ .select_mii = 1,
.magic = 1, │ .magic = 1,
.cexcr = 1, │ .cexcr = 1,
} │
next prev up linux/drivers/net/ethernet/renesas/sh_eth.c:977 │ linux/drivers/net/ethernet/renesas/sh_eth.c:933
│
.soft_reset = sh_eth_soft_reset_gether, │ .soft_reset = sh_eth_soft_reset_gether,
│
.chip_reset = sh_eth_chip_reset, │ .chip_reset = sh_eth_chip_reset_giga,
.set_duplex = sh_eth_set_duplex, │ .set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether, │ .set_rate = sh_eth_set_rate_giga,
│
.register_type = SH_ETH_REG_GIGABIT, │ .register_type = SH_ETH_REG_GIGABIT,
│
.edtrr_trns = EDTRR_TRNS_GETHER, │ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD, │ .ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, │ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | │ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | │ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | │ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | │ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | │ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
│ EESIPR_CEEFIP | EESIPR_CELFIP |
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | │ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP, │ EESIPR_PREIP | EESIPR_CERFIP,
│
.tx_check = EESR_TC1 | EESR_FTC, │ .tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | │ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | │ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE, │ EESR_TDE,
│ .fdr_value = 0x0000072f,
│
│ .irq_flags = IRQF_SHARED,
.apr = 1, │ .apr = 1,
.mpr = 1, │ .mpr = 1,
.tpauser = 1, │ .tpauser = 1,
.gecmr = 1, │ .gecmr = 1,
.bculr = 1, │ .bculr = 1,
.hw_swap = 1, │ .hw_swap = 1,
│ .rpadir = 1,
.no_trimd = 1, │ .no_trimd = 1,
.no_ade = 1, │ .no_ade = 1,
.xdfar_rw = 1, │ .xdfar_rw = 1,
.tsu = 1, │ .tsu = 1,
.csmr = 1, │
.rx_csum = 1, │
.select_mii = 1, │
.magic = 1, │
.cexcr = 1, │ .cexcr = 1,
│ .dual_port = 1,
} │
next prev up linux/drivers/net/ethernet/renesas/sh_eth.c:977 │ linux/drivers/net/ethernet/renesas/sh_eth.c:716
│
.soft_reset = sh_eth_soft_reset_gether, │ .soft_reset = sh_eth_soft_reset_gether,
│
.chip_reset = sh_eth_chip_reset, │
.set_duplex = sh_eth_set_duplex, │ .set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether, │ .set_rate = sh_eth_set_rate_gether,
│
.register_type = SH_ETH_REG_GIGABIT, │ .register_type = SH_ETH_REG_GIGABIT,
│
.edtrr_trns = EDTRR_TRNS_GETHER, │ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD, │ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, │ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
│ ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | │ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | │ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | │ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | │ EESIPR_RMAFIP | EESIPR_RRFIP |
EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | │ EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | │
EESIPR_PREIP | EESIPR_CERFIP, │ EESIPR_PREIP | EESIPR_CERFIP,
│
.tx_check = EESR_TC1 | EESR_FTC, │ .tx_check = EESR_FTC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | │ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | │ EESR_RFE | EESR_RDE | EESR_RFRMER |
EESR_TDE, │ EESR_TFE | EESR_TDE | EESR_ECI,
│ .fdr_value = 0x0000070f,
│
.apr = 1, │ .apr = 1,
.mpr = 1, │ .mpr = 1,
.tpauser = 1, │ .tpauser = 1,
.gecmr = 1, │ .gecmr = 1,
.bculr = 1, │ .bculr = 1,
.hw_swap = 1, │ .hw_swap = 1,
│ .nbst = 1,
│ .rpadir = 1,
.no_trimd = 1, │ .no_trimd = 1,
.no_ade = 1, │ .no_ade = 1,
.xdfar_rw = 1, │ .xdfar_rw = 1,
.tsu = 1, │
.csmr = 1, │ .csmr = 1,
.rx_csum = 1, │ .rx_csum = 1,
.select_mii = 1, │ .select_mii = 1,
.magic = 1, │ .magic = 1,
.cexcr = 1, │ .cexcr = 1,
} │
next prev up linux/drivers/net/ethernet/marvell/skge.h:1426 │ linux/drivers/net/ethernet/marvell/sky2.h:1407
│
PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ │ PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ │ PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
│
PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ │ PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ │ PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
/* (88E1011 only) */ │ /* (88E1011 only) */
PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */ │ PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */
/* (88E1011 only) */ │ /* (88E1011 only) */
PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */ │ PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */
/* (88E1111 only) */ │ /* (88E1111 only) */
PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ │ PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
/* !!! Errata in spec. (1 = disable) */ │ /* !!! Errata in spec. (1 = disable) */
PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ │ PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */ │ PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */
PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ │ PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ │ PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ │ PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
│
│ PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */
│
next prev up linux/drivers/net/ethernet/renesas/sh_eth.c:1020 │ linux/drivers/net/ethernet/renesas/sh_eth.c:588
│
.soft_reset = sh_eth_soft_reset_gether, │ .soft_reset = sh_eth_soft_reset_gether,
│
.chip_reset = sh_eth_chip_reset, │ .chip_reset = sh_eth_chip_reset_r8a7740,
.set_duplex = sh_eth_set_duplex, │ .set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether, │ .set_rate = sh_eth_set_rate_gether,
│
.register_type = SH_ETH_REG_GIGABIT, │ .register_type = SH_ETH_REG_GIGABIT,
│
.edtrr_trns = EDTRR_TRNS_GETHER, │ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD, │ .ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, │ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | │ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | │ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | │ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | │ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | │ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
│ EESIPR_CEEFIP | EESIPR_CELFIP |
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | │ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP, │ EESIPR_PREIP | EESIPR_CERFIP,
│
.tx_check = EESR_TC1 | EESR_FTC, │ .tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | │ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, │ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
│ EESR_TDE,
│ .fdr_value = 0x0000070f,
│
.apr = 1, │ .apr = 1,
.mpr = 1, │ .mpr = 1,
.tpauser = 1, │ .tpauser = 1,
.gecmr = 1, │ .gecmr = 1,
.bculr = 1, │ .bculr = 1,
.hw_swap = 1, │ .hw_swap = 1,
│ .rpadir = 1,
.no_trimd = 1, │ .no_trimd = 1,
.no_ade = 1, │ .no_ade = 1,
.xdfar_rw = 1, │ .xdfar_rw = 1,
│ .csmr = 1,
│ .rx_csum = 1,
.tsu = 1, │ .tsu = 1,
.irq_flags = IRQF_SHARED, │ .select_mii = 1,
.magic = 1, │ .magic = 1,
.cexcr = 1, │ .cexcr = 1,
.rx_csum = 1, │
.dual_port = 1, │
} │
next prev up linux/drivers/net/ethernet/renesas/sh_eth.c:1020 │ linux/drivers/net/ethernet/renesas/sh_eth.c:933
│
.soft_reset = sh_eth_soft_reset_gether, │ .soft_reset = sh_eth_soft_reset_gether,
│
.chip_reset = sh_eth_chip_reset, │ .chip_reset = sh_eth_chip_reset_giga,
.set_duplex = sh_eth_set_duplex, │ .set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether, │ .set_rate = sh_eth_set_rate_giga,
│
.register_type = SH_ETH_REG_GIGABIT, │ .register_type = SH_ETH_REG_GIGABIT,
│
.edtrr_trns = EDTRR_TRNS_GETHER, │ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD, │ .ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, │ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | │ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | │ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | │ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | │ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | │ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
│ EESIPR_CEEFIP | EESIPR_CELFIP |
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | │ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP, │ EESIPR_PREIP | EESIPR_CERFIP,
│
.tx_check = EESR_TC1 | EESR_FTC, │ .tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | │ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, │ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
│ EESR_TDE,
│ .fdr_value = 0x0000072f,
│
│ .irq_flags = IRQF_SHARED,
.apr = 1, │ .apr = 1,
.mpr = 1, │ .mpr = 1,
.tpauser = 1, │ .tpauser = 1,
.gecmr = 1, │ .gecmr = 1,
.bculr = 1, │ .bculr = 1,
.hw_swap = 1, │ .hw_swap = 1,
│ .rpadir = 1,
.no_trimd = 1, │ .no_trimd = 1,
.no_ade = 1, │ .no_ade = 1,
.xdfar_rw = 1, │ .xdfar_rw = 1,
.tsu = 1, │ .tsu = 1,
.irq_flags = IRQF_SHARED, │
.magic = 1, │
.cexcr = 1, │ .cexcr = 1,
.rx_csum = 1, │
.dual_port = 1, │ .dual_port = 1,
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c:597 │ linux/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c:446
│
int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink); │ int fw_log_max_fdb_encap_uplink =
│ MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
int num_fwd_destinations = 0; │ int num_fwd_destinations = 0;
│ struct mlx5_flow_rule *dst;
int num_encap = 0; │ int num_encap = 0;
int i; │
│
*extended_dest = false; │ *extended_dest = false;
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) │ if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0; │ return 0;
for (i = 0; i < fte->dests_size; i++) { │
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) │ list_for_each_entry(dst, &fte->node.children, node.list) {
│ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue; │ continue;
if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT || │ if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && │ dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) │ dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++; │ num_encap++;
num_fwd_destinations++; │ num_fwd_destinations++;
} │ }
│
if (num_fwd_destinations > 1 && num_encap > 0) │ if (num_fwd_destinations > 1 && num_encap > 0)
*extended_dest = true; │ *extended_dest = true;
│
if (*extended_dest && !fw_log_max_fdb_encap_uplink) { │ if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
mlx5_core_warn(dev, "FW does not support extended destination"); │ mlx5_core_warn(dev, "FW does not support extended destination");
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
} │ }
if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) { │ if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
mlx5_core_warn(dev, "FW does not support more than %d encaps", │ mlx5_core_warn(dev, "FW does not support more than %d encaps",
1 << fw_log_max_fdb_encap_uplink); │ 1 << fw_log_max_fdb_encap_uplink);
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
} │ }
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c:17 │ linux/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c:17
│
struct atl1c_adapter *adapter = netdev_priv(netdev); │ struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw; │ struct atl1e_hw *hw = &adapter->hw;
u32 supported, advertising; │ u32 supported, advertising;
│
supported = (SUPPORTED_10baseT_Half | │ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full | │ SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | │ SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full | │ SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg | │ SUPPORTED_Autoneg |
SUPPORTED_TP); │ SUPPORTED_TP);
if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) │ if (hw->nic_type == athr_l1e)
supported |= SUPPORTED_1000baseT_Full; │ supported |= SUPPORTED_1000baseT_Full;
│
advertising = ADVERTISED_TP; │ advertising = ADVERTISED_TP;
│
│ advertising |= ADVERTISED_Autoneg;
advertising |= hw->autoneg_advertised; │ advertising |= hw->autoneg_advertised;
│
cmd->base.port = PORT_TP; │ cmd->base.port = PORT_TP;
cmd->base.phy_address = 0; │ cmd->base.phy_address = 0;
│
if (adapter->link_speed != SPEED_0) { │ if (adapter->link_speed != SPEED_0) {
cmd->base.speed = adapter->link_speed; │ cmd->base.speed = adapter->link_speed;
if (adapter->link_duplex == FULL_DUPLEX) │ if (adapter->link_duplex == FULL_DUPLEX)
cmd->base.duplex = DUPLEX_FULL; │ cmd->base.duplex = DUPLEX_FULL;
else │ else
cmd->base.duplex = DUPLEX_HALF; │ cmd->base.duplex = DUPLEX_HALF;
} else { │ } else {
cmd->base.speed = SPEED_UNKNOWN; │ cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN; │ cmd->base.duplex = DUPLEX_UNKNOWN;
} │ }
│
cmd->base.autoneg = AUTONEG_ENABLE; │ cmd->base.autoneg = AUTONEG_ENABLE;
│
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, │ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported); │ supported);
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, │ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
advertising); │ advertising);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/renesas/sh_eth.c:1020 │ linux/drivers/net/ethernet/renesas/sh_eth.c:977
│
.soft_reset = sh_eth_soft_reset_gether, │ .soft_reset = sh_eth_soft_reset_gether,
│
.chip_reset = sh_eth_chip_reset, │ .chip_reset = sh_eth_chip_reset,
.set_duplex = sh_eth_set_duplex, │ .set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether, │ .set_rate = sh_eth_set_rate_gether,
│
.register_type = SH_ETH_REG_GIGABIT, │ .register_type = SH_ETH_REG_GIGABIT,
│
.edtrr_trns = EDTRR_TRNS_GETHER, │ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD, │ .ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, │ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | │ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | │ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | │ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | │ EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | │ EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | │ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP, │ EESIPR_PREIP | EESIPR_CERFIP,
│
.tx_check = EESR_TC1 | EESR_FTC, │ .tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | │ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, │ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
│ EESR_TDE,
│
.apr = 1, │ .apr = 1,
.mpr = 1, │ .mpr = 1,
.tpauser = 1, │ .tpauser = 1,
.gecmr = 1, │ .gecmr = 1,
.bculr = 1, │ .bculr = 1,
.hw_swap = 1, │ .hw_swap = 1,
.no_trimd = 1, │ .no_trimd = 1,
.no_ade = 1, │ .no_ade = 1,
.xdfar_rw = 1, │ .xdfar_rw = 1,
.tsu = 1, │ .tsu = 1,
.irq_flags = IRQF_SHARED, │ .csmr = 1,
│ .rx_csum = 1,
│ .select_mii = 1,
.magic = 1, │ .magic = 1,
.cexcr = 1, │ .cexcr = 1,
.rx_csum = 1, │
.dual_port = 1, │
} │
next prev up linux/drivers/net/ethernet/intel/ixgbevf/ethtool.c:677 │ linux/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c:1612
│
for (i = 0; i < test->array_len; i++) { │ for (i = 0; i < test->array_len; i++) {
bool b = false; │ bool b = false;
│
switch (test->test_type) { │ switch (test->test_type) {
case PATTERN_TEST: │ case PATTERN_TEST:
b = reg_pattern_test(adapter, data, │ b = reg_pattern_test(adapter, data,
test->reg + (i * 0x40), │ test->reg + (i * 0x40),
test->mask, │ test->mask,
test->write); │ test->write);
break; │ break;
case SET_READ_TEST: │ case SET_READ_TEST:
b = reg_set_and_check(adapter, data, │ b = reg_set_and_check(adapter, data,
test->reg + (i * 0x40), │ test->reg + (i * 0x40),
test->mask, │ test->mask,
test->write); │ test->write);
break; │ break;
case WRITE_NO_TEST: │ case WRITE_NO_TEST:
ixgbe_write_reg(&adapter->hw, │ ixgbe_write_reg(&adapter->hw,
test->reg + (i * 0x40), │ test->reg + (i * 0x40),
test->write); │ test->write);
break; │ break;
case TABLE32_TEST: │ case TABLE32_TEST:
b = reg_pattern_test(adapter, data, │ b = reg_pattern_test(adapter, data,
test->reg + (i * 4), │ test->reg + (i * 4),
test->mask, │ test->mask,
test->write); │ test->write);
break; │ break;
case TABLE64_TEST_LO: │ case TABLE64_TEST_LO:
b = reg_pattern_test(adapter, data, │ b = reg_pattern_test(adapter, data,
test->reg + (i * 8), │ test->reg + (i * 8),
test->mask, │ test->mask,
test->write); │ test->write);
break; │ break;
case TABLE64_TEST_HI: │ case TABLE64_TEST_HI:
b = reg_pattern_test(adapter, data, │ b = reg_pattern_test(adapter, data,
test->reg + 4 + (i * 8), │ (test->reg + 4) + (i * 8),
test->mask, │ test->mask,
test->write); │ test->write);
break; │ break;
} │ }
if (b) │ if (b)
return 1; │ return 1;
} │ }
test++; │ test++;
} │
next prev up linux/drivers/net/ethernet/intel/e1000/e1000_ethtool.c:1860 │ linux/drivers/net/ethernet/intel/e1000e/ethtool.c:2362
│
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, │ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo, │ .get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len, │ .get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs, │ .get_regs = e1000_get_regs,
.get_wol = e1000_get_wol, │ .get_wol = e1000_get_wol,
.set_wol = e1000_set_wol, │ .set_wol = e1000_set_wol,
.get_msglevel = e1000_get_msglevel, │ .get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel, │ .set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset, │ .nway_reset = e1000_nway_reset,
.get_link = e1000_get_link, │ .get_link = ethtool_op_get_link,
.get_eeprom_len = e1000_get_eeprom_len, │ .get_eeprom_len = e1000_get_eeprom_len,
.get_eeprom = e1000_get_eeprom, │ .get_eeprom = e1000_get_eeprom,
.set_eeprom = e1000_set_eeprom, │ .set_eeprom = e1000_set_eeprom,
.get_ringparam = e1000_get_ringparam, │ .get_ringparam = e1000_get_ringparam,
.set_ringparam = e1000_set_ringparam, │ .set_ringparam = e1000_set_ringparam,
.get_pauseparam = e1000_get_pauseparam, │ .get_pauseparam = e1000_get_pauseparam,
.set_pauseparam = e1000_set_pauseparam, │ .set_pauseparam = e1000_set_pauseparam,
.self_test = e1000_diag_test, │ .self_test = e1000_diag_test,
.get_strings = e1000_get_strings, │ .get_strings = e1000_get_strings,
.set_phys_id = e1000_set_phys_id, │ .set_phys_id = e1000_set_phys_id,
.get_ethtool_stats = e1000_get_ethtool_stats, │ .get_ethtool_stats = e1000_get_ethtool_stats,
.get_sset_count = e1000_get_sset_count, │ .get_sset_count = e1000e_get_sset_count,
.get_coalesce = e1000_get_coalesce, │ .get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce, │ .set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info, │ .get_rxnfc = e1000_get_rxnfc,
│ .get_ts_info = e1000e_get_ts_info,
│ .get_eee = e1000e_get_eee,
│ .set_eee = e1000e_set_eee,
.get_link_ksettings = e1000_get_link_ksettings, │ .get_link_ksettings = e1000_get_link_ksettings,
.set_link_ksettings = e1000_set_link_ksettings, │ .set_link_ksettings = e1000_set_link_ksettings,
│ .get_priv_flags = e1000e_get_priv_flags,
│ .set_priv_flags = e1000e_set_priv_flags,
} │
next prev up linux/drivers/net/ethernet/intel/igc/igc_phy.c:614 │ linux/drivers/net/ethernet/intel/igc/igc_phy.c:558
│
struct igc_phy_info *phy = &hw->phy; │ struct igc_phy_info *phy = &hw->phy;
u32 i, mdic = 0; │ u32 i, mdic = 0;
s32 ret_val = 0; │ s32 ret_val = 0;
│
if (offset > MAX_PHY_REG_ADDRESS) { │ if (offset > MAX_PHY_REG_ADDRESS) {
hw_dbg("PHY Address %d is out of range\n", offset); │ hw_dbg("PHY Address %d is out of range\n", offset);
ret_val = -IGC_ERR_PARAM; │ ret_val = -IGC_ERR_PARAM;
goto out; │ goto out;
} │ }
│
/* Set up Op-code, Phy Address, and register offset in the MDI │ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the │ * Control register. The MAC will take care of interfacing with the
* PHY to write the desired data. │ * PHY to retrieve the desired data.
*/ │ */
mdic = (((u32)data) | │ mdic = ((offset << IGC_MDIC_REG_SHIFT) |
(offset << IGC_MDIC_REG_SHIFT) | │
(phy->addr << IGC_MDIC_PHY_SHIFT) | │ (phy->addr << IGC_MDIC_PHY_SHIFT) |
(IGC_MDIC_OP_WRITE)); │ (IGC_MDIC_OP_READ));
│
wr32(IGC_MDIC, mdic); │ wr32(IGC_MDIC, mdic);
│
/* Poll the ready bit to see if the MDI read completed │ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with │ * Increasing the time out as testing showed failures with
* the lower time out │ * the lower time out
*/ │ */
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { │ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
udelay(50); │ udelay(50);
mdic = rd32(IGC_MDIC); │ mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY) │ if (mdic & IGC_MDIC_READY)
break; │ break;
} │ }
if (!(mdic & IGC_MDIC_READY)) { │ if (!(mdic & IGC_MDIC_READY)) {
hw_dbg("MDI Write did not complete\n"); │ hw_dbg("MDI Read did not complete\n");
ret_val = -IGC_ERR_PHY; │ ret_val = -IGC_ERR_PHY;
goto out; │ goto out;
} │ }
if (mdic & IGC_MDIC_ERROR) { │ if (mdic & IGC_MDIC_ERROR) {
hw_dbg("MDI Error\n"); │ hw_dbg("MDI Error\n");
ret_val = -IGC_ERR_PHY; │ ret_val = -IGC_ERR_PHY;
goto out; │ goto out;
} │ }
│ *data = (u16)mdic;
│
out: │ out:
return ret_val; │ return ret_val;
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:1320 │ linux/drivers/net/ethernet/intel/igc/igc_main.c:4439
│
int q_vectors = adapter->num_q_vectors; │
int rxr_remaining = adapter->num_rx_queues; │ int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues; │ int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0; │ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
│ int q_vectors = adapter->num_q_vectors;
int err; │ int err;
│
if (q_vectors >= (rxr_remaining + txr_remaining)) { │ if (q_vectors >= (rxr_remaining + txr_remaining)) {
for (; rxr_remaining; v_idx++) { │ for (; rxr_remaining; v_idx++) {
err = igb_alloc_q_vector(adapter, q_vectors, v_idx, │ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
0, 0, 1, rxr_idx); │ 0, 0, 1, rxr_idx);
│
if (err) │ if (err)
goto err_out; │ goto err_out;
│
/* update counts and index */ │ /* update counts and index */
rxr_remaining--; │ rxr_remaining--;
rxr_idx++; │ rxr_idx++;
} │ }
} │ }
│
for (; v_idx < q_vectors; v_idx++) { │ for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); │ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); │ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
│
err = igb_alloc_q_vector(adapter, q_vectors, v_idx, │ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx); │ tqpv, txr_idx, rqpv, rxr_idx);
│
if (err) │ if (err)
goto err_out; │ goto err_out;
│
/* update counts and index */ │ /* update counts and index */
rxr_remaining -= rqpv; │ rxr_remaining -= rqpv;
txr_remaining -= tqpv; │ txr_remaining -= tqpv;
rxr_idx++; │ rxr_idx++;
txr_idx++; │ txr_idx++;
} │ }
│
return 0; │ return 0;
│
err_out: │ err_out:
adapter->num_tx_queues = 0; │ adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0; │ adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0; │ adapter->num_q_vectors = 0;
│
while (v_idx--) │ while (v_idx--)
igb_free_q_vector(adapter, v_idx); │ igc_free_q_vector(adapter, v_idx);
│
return -ENOMEM; │ return -ENOMEM;
} │
next prev up linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c:1801 │ linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c:1680
│
j = lio->linfo.rxpciq[vj].s.q_no; │ if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
│ continue;
│
/* packets send to TCP/IP network stack */ │ /*packets send to TCP/IP network stack */
/* # of packets to network stack */ │ /*# of packets to network stack */
data[i++] = CVM_CAST64( │ data[i++] =
oct_dev->droq[j]->stats.rx_pkts_received); │ CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
/* # of bytes to network stack */ │ /*# of bytes to network stack */
data[i++] = CVM_CAST64( │ data[i++] =
oct_dev->droq[j]->stats.rx_bytes_received); │ CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
│ /*# of packets dropped */
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + │ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
oct_dev->droq[j]->stats.dropped_toomany + │ oct_dev->droq[j]->stats.dropped_toomany +
oct_dev->droq[j]->stats.rx_dropped); │ oct_dev->droq[j]->stats.rx_dropped);
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); │ data[i++] =
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); │ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); │ data[i++] =
│ CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
│ data[i++] =
│ CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
│
/* control and data path */ │ /*control and data path*/
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); │ data[i++] =
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); │ CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
│ data[i++] =
│ CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
data[i++] = │ data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); │ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
│
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); │
data[i++] = │ data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); │ CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
│ data[i++] =
│ CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
} │
next prev up linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c:208 │ linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c:329
│
MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0), │ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1), │ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2), │ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3), │ MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4), │ MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5), │ MLXSW_AFK_BLOCK(0x1A, mlxsw_sp_afk_element_info_mac_5b),
MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), │ MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), │ MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), │ MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
MLXSW_AFK_BLOCK(0x3C, mlxsw_sp_afk_element_info_ipv4_4), │ MLXSW_AFK_BLOCK(0x35, mlxsw_sp_afk_element_info_ipv4_4b),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), │ MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), │ MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2), │ MLXSW_AFK_BLOCK(0x47, mlxsw_sp_afk_element_info_ipv6_2b),
MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3), │ MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4), │ MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5), │ MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0), │ MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2), │ MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1693 │ linux/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c:1742
│
│ struct net_device *netdev = rq->netdev;
│ struct mlx5e_priv *priv = netdev_priv(netdev);
│ struct mlx5e_rep_priv *rpriv = priv->ppriv;
│ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_wq_cyc *wq = &rq->wqe.wq; │ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi; │ struct mlx5e_wqe_frag_info *wi;
struct sk_buff *skb; │ struct sk_buff *skb;
u32 cqe_bcnt; │ u32 cqe_bcnt;
u16 ci; │ u16 ci;
│
ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); │ ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
wi = get_frag(rq, ci); │ wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt); │ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
│
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { │ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
mlx5e_handle_rx_err_cqe(rq, cqe); │ mlx5e_handle_rx_err_cqe(rq, cqe);
goto free_wqe; │ goto free_wqe;
} │ }
│
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, │ skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear, │ mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear, │ mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt); │ rq, cqe, wi, cqe_bcnt);
if (!skb) { │ if (!skb) {
/* probably for XDP */ │ /* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { │ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
/* do not return page to cache, │ /* do not return page to cache,
* it will be returned on XDP_TX completion. │ * it will be returned on XDP_TX completion.
*/ │ */
goto wq_cyc_pop; │ goto wq_cyc_pop;
} │ }
goto free_wqe; │ goto free_wqe;
} │ }
│
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); │ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
│
if (mlx5e_cqe_regb_chain(cqe)) │ if (rep->vlan && skb_vlan_tag_present(skb))
if (!mlx5e_tc_update_skb(cqe, skb)) { │ skb_vlan_pop(skb);
dev_kfree_skb_any(skb); │
goto free_wqe; │
} │
│
napi_gro_receive(rq->cq.napi, skb); │ mlx5e_rep_tc_receive(cqe, rq, skb);
│
free_wqe: │ free_wqe:
mlx5e_free_rx_wqe(rq, wi, true); │ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop: │ wq_cyc_pop:
mlx5_wq_cyc_pop(wq); │ mlx5_wq_cyc_pop(wq);
} │
next prev up linux/drivers/net/ethernet/amd/xgbe/xgbe-dev.c:2078 │ linux/drivers/net/ethernet/amd/xgbe/xgbe-dev.c:2119
│
switch (int_id) { │ switch (int_id) {
case XGMAC_INT_DMA_CH_SR_TI: │ case XGMAC_INT_DMA_CH_SR_TI:
XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); │ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
break; │ break;
case XGMAC_INT_DMA_CH_SR_TPS: │ case XGMAC_INT_DMA_CH_SR_TPS:
XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); │ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
break; │ break;
case XGMAC_INT_DMA_CH_SR_TBU: │ case XGMAC_INT_DMA_CH_SR_TBU:
XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); │ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
break; │ break;
case XGMAC_INT_DMA_CH_SR_RI: │ case XGMAC_INT_DMA_CH_SR_RI:
XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); │ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
break; │ break;
case XGMAC_INT_DMA_CH_SR_RBU: │ case XGMAC_INT_DMA_CH_SR_RBU:
XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); │ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
break; │ break;
case XGMAC_INT_DMA_CH_SR_RPS: │ case XGMAC_INT_DMA_CH_SR_RPS:
XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); │ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
break; │ break;
case XGMAC_INT_DMA_CH_SR_TI_RI: │ case XGMAC_INT_DMA_CH_SR_TI_RI:
XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); │ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); │ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
break; │ break;
case XGMAC_INT_DMA_CH_SR_FBE: │ case XGMAC_INT_DMA_CH_SR_FBE:
XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); │ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
break; │ break;
case XGMAC_INT_DMA_ALL: │ case XGMAC_INT_DMA_ALL:
channel->curr_ier |= channel->saved_ier; │ channel->saved_ier = channel->curr_ier;
│ channel->curr_ier = 0;
break; │ break;
default: │ default:
return -1; │ return -1;
} │ }
│
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); │ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/flower/conntrack.c:696 │ linux/drivers/net/ethernet/netronome/nfp/flower/conntrack.c:748
│
offset = key_map[FLOW_PAY_GRE]; │ offset = key_map[FLOW_PAY_UDP_TUN];
key = kdata + offset; │ key = kdata + offset;
msk = mdata + offset; │ msk = mdata + offset;
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { │ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
struct nfp_flower_ipv6_gre_tun *gre_match; │ struct nfp_flower_ipv6_udp_tun *udp_match;
struct nfp_ipv6_addr_entry *entry; │ struct nfp_ipv6_addr_entry *entry;
struct in6_addr *dst; │ struct in6_addr *dst;
│
for (i = 0; i < _CT_TYPE_MAX; i++) { │ for (i = 0; i < _CT_TYPE_MAX; i++) {
nfp_flower_compile_ipv6_gre_tun((void *)key, │ nfp_flower_compile_ipv6_udp_tun((void *)key,
(void *)msk, rules[i]); │ (void *)msk, rules[i]);
} │ }
gre_match = (struct nfp_flower_ipv6_gre_tun *)key; │ udp_match = (struct nfp_flower_ipv6_udp_tun *)key;
dst = &gre_match->ipv6.dst; │ dst = &udp_match->ipv6.dst;
│
entry = nfp_tunnel_add_ipv6_off(priv->app, dst); │ entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
if (!entry) { │ if (!entry) {
err = -ENOMEM; │ err = -ENOMEM;
goto ct_offload_err; │ goto ct_offload_err;
} │ }
│
flow_pay->nfp_tun_ipv6 = entry; │ flow_pay->nfp_tun_ipv6 = entry;
} else { │ } else {
__be32 dst; │ __be32 dst;
│
for (i = 0; i < _CT_TYPE_MAX; i++) { │ for (i = 0; i < _CT_TYPE_MAX; i++) {
nfp_flower_compile_ipv4_gre_tun((void *)key, │ nfp_flower_compile_ipv4_udp_tun((void *)key,
(void *)msk, rules[i]); │ (void *)msk, rules[i]);
} │ }
dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst; │ dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst;
│
/* Store the tunnel destination in the rule data. │ /* Store the tunnel destination in the rule data.
* This must be present and be an exact match. │ * This must be present and be an exact match.
*/ │ */
flow_pay->nfp_tun_ipv4_addr = dst; │ flow_pay->nfp_tun_ipv4_addr = dst;
nfp_tunnel_add_ipv4_off(priv->app, dst); │ nfp_tunnel_add_ipv4_off(priv->app, dst);
│ }
│
│ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
│ offset = key_map[FLOW_PAY_GENEVE_OPT];
│ key = kdata + offset;
│ msk = mdata + offset;
│ for (i = 0; i < _CT_TYPE_MAX; i++)
│ nfp_flower_compile_geneve_opt(key, msk, rules[i]);
} │ }
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/nfp_net_common.c:1923 │ linux/drivers/net/ethernet/netronome/nfp/nfp_net_common.c:1895
│
.ndo_init = nfp_app_ndo_init, │ .ndo_init = nfp_app_ndo_init,
.ndo_uninit = nfp_app_ndo_uninit, │ .ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_net_netdev_open, │ .ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close, │ .ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx, │ .ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64, │ .ndo_get_stats64 = nfp_net_stat64,
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, │ .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, │ .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
.ndo_set_vf_mac = nfp_app_set_vf_mac, │ .ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan, │ .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, │ .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
.ndo_set_vf_trust = nfp_app_set_vf_trust, │ .ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config, │ .ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state, │ .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
.ndo_setup_tc = nfp_port_setup_tc, │ .ndo_setup_tc = nfp_port_setup_tc,
.ndo_tx_timeout = nfp_net_tx_timeout, │ .ndo_tx_timeout = nfp_net_tx_timeout,
.ndo_set_rx_mode = nfp_net_set_rx_mode, │ .ndo_set_rx_mode = nfp_net_set_rx_mode,
.ndo_change_mtu = nfp_net_change_mtu, │ .ndo_change_mtu = nfp_net_change_mtu,
.ndo_set_mac_address = nfp_net_set_mac_address, │ .ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features, │ .ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check, │ .ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name, │ .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp, │ .ndo_bpf = nfp_net_xdp,
│ .ndo_xsk_wakeup = nfp_net_xsk_wakeup,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port, │ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
} │
next prev up linux/drivers/net/ethernet/intel/igb/igb_main.c:5851 │ linux/drivers/net/ethernet/intel/igc/igc_main.c:1024
│
struct e1000_adv_tx_context_desc *context_desc; │ struct igc_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use; │ u16 i = tx_ring->next_to_use;
struct timespec64 ts; │
│
context_desc = IGB_TX_CTXTDESC(tx_ring, i); │ context_desc = IGC_TX_CTXTDESC(tx_ring, i);
│
i++; │ i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; │ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
│
/* set bits to identify this as an advanced context descriptor */ │ /* set bits to identify this as an advanced context descriptor */
type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; │ type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
│
/* For 82575, context index must be unique per ring. */ │ /* For i225, context index must be unique per ring. */
if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) │ if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
mss_l4len_idx |= tx_ring->reg_idx << 4; │ mss_l4len_idx |= tx_ring->reg_idx << 4;
│
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); │ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); │ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); │ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
│
/* We assume there is always a valid tx time available. Invalid times │ /* We assume there is always a valid Tx time available. Invalid times
* should have been handled by the upper layers. │ * should have been handled by the upper layers.
*/ │ */
if (tx_ring->launchtime_enable) { │ if (tx_ring->launchtime_enable) {
ts = ktime_to_timespec64(first->skb->tstamp); │ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
│ ktime_t txtime = first->skb->tstamp;
│
skb_txtime_consumed(first->skb); │ skb_txtime_consumed(first->skb);
context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); │ context_desc->launch_time = igc_tx_launchtime(adapter,
│ txtime);
} else { │ } else {
context_desc->seqnum_seed = 0; │ context_desc->launch_time = 0;
} │ }
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:218 │ linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:142
│
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); │ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg; │ struct mlx5_flow_group *fg;
u32 *in, *match; │ u32 *in, *match;
│
in = kvzalloc(inlen, GFP_KERNEL); │ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) │ if (!in)
return ERR_PTR(-ENOMEM); │ return ERR_PTR(-ENOMEM);
│
MLX5_SET(create_flow_group_in, in, match_criteria_enable, │ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2); │ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); │ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
│
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
│ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
│ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
│
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0, │ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask()); │ mlx5_eswitch_get_vport_metadata_mask());
│
MLX5_SET(create_flow_group_in, in, start_flow_index, │ MLX5_SET(create_flow_group_in, in, start_flow_index,
MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
MLX5_SET(create_flow_group_in, in, end_flow_index, │ MLX5_SET(create_flow_group_in, in, end_flow_index,
MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
│
fg = mlx5_create_flow_group(ingress_ft, in); │ fg = mlx5_create_flow_group(ingress_ft, in);
│ kvfree(in);
if (IS_ERR(fg)) │ if (IS_ERR(fg))
esw_warn(esw->dev, │ esw_warn(esw->dev,
"Failed to create MAC flow group for bridge ingress table (err= │ "Failed to create VLAN flow group for bridge ingress table (err
PTR_ERR(fg)); │ PTR_ERR(fg));
│
kvfree(in); │
return fg; │ return fg;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:218 │ linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:181
│
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); │ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg; │ struct mlx5_flow_group *fg;
u32 *in, *match; │ u32 *in, *match;
│
in = kvzalloc(inlen, GFP_KERNEL); │ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) │ if (!in)
return ERR_PTR(-ENOMEM); │ return ERR_PTR(-ENOMEM);
│
MLX5_SET(create_flow_group_in, in, match_criteria_enable, │ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2); │ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); │ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
│
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
│ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
│
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0, │ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask()); │ mlx5_eswitch_get_vport_metadata_mask());
│
MLX5_SET(create_flow_group_in, in, start_flow_index, │ MLX5_SET(create_flow_group_in, in, start_flow_index,
MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
MLX5_SET(create_flow_group_in, in, end_flow_index, │ MLX5_SET(create_flow_group_in, in, end_flow_index,
MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
│
fg = mlx5_create_flow_group(ingress_ft, in); │ fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg)) │ if (IS_ERR(fg))
esw_warn(esw->dev, │ esw_warn(esw->dev,
"Failed to create MAC flow group for bridge ingress table (err= │ "Failed to create bridge ingress table VLAN filter flow group (
PTR_ERR(fg)); │ PTR_ERR(fg));
│
kvfree(in); │ kvfree(in);
return fg; │ return fg;
} │
next prev up linux/drivers/net/ethernet/dec/tulip/de4x5.c:4614 │ linux/drivers/net/ethernet/dec/tulip/de4x5.c:4694
│
struct de4x5_private *lp = netdev_priv(dev); │ struct de4x5_private *lp = netdev_priv(dev);
u_char len = (*p & BLOCK_LEN)+1; │ u_char len = (*p & BLOCK_LEN)+1;
│
/* Recursively figure out the info blocks */ │ /* Recursively figure out the info blocks */
if (--count > lp->tcount) { │ if (--count > lp->tcount) {
if (*(p+len) < 128) { │ if (*(p+len) < 128) {
return dc_infoblock[COMPACT](dev, count, p+len); │ return dc_infoblock[COMPACT](dev, count, p+len);
} else { │ } else {
return dc_infoblock[*(p+len+1)](dev, count, p+len); │ return dc_infoblock[*(p+len+1)](dev, count, p+len);
} │ }
} │ }
│
p += 2; │ p += 2;
if (lp->state == INITIALISED) { │ if (lp->state == INITIALISED) {
lp->ibn = 1; │ lp->ibn = 3;
lp->active = *p++; │ lp->active = *p++;
lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1); │ if (MOTO_SROM_BUG) lp->active = 0;
lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1); │ /* if (MOTO_SROM_BUG) statement indicates lp->active could
│ * be 8 (i.e. the size of array lp->phy) */
│ if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
│ return -EINVAL;
│ lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
│ lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; │ lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; │ lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; │ lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
lp->phy[lp->active].ttm = get_unaligned_le16(p); │ lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
│ lp->phy[lp->active].mci = *p;
return 0; │ return 0;
} else if ((lp->media == INIT) && (lp->timeout < 0)) { │ } else if ((lp->media == INIT) && (lp->timeout < 0)) {
lp->ibn = 1; │ lp->ibn = 3;
lp->active = *p; │ lp->active = *p;
│ if (MOTO_SROM_BUG) lp->active = 0;
lp->infoblock_csr6 = OMR_MII_100; │ lp->infoblock_csr6 = OMR_MII_100;
lp->useMII = true; │ lp->useMII = true;
lp->infoblock_media = ANS; │ lp->infoblock_media = ANS;
│
de4x5_switch_mac_port(dev); │ de4x5_switch_mac_port(dev);
} │ }
│
return dc21140m_autoconf(dev); │ return dc2114x_autoconf(dev);
} │
next prev up linux/drivers/net/ethernet/ti/cpsw.c:1207 │ linux/drivers/net/ethernet/ti/cpsw_new.c:1189
│
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, │ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = cpsw_get_drvinfo, │ .get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel, │ .get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel, │ .set_msglevel = cpsw_set_msglevel,
.get_link = ethtool_op_get_link, │ .get_link = ethtool_op_get_link,
.get_ts_info = cpsw_get_ts_info, │ .get_ts_info = cpsw_get_ts_info,
.get_coalesce = cpsw_get_coalesce, │ .get_coalesce = cpsw_get_coalesce,
.set_coalesce = cpsw_set_coalesce, │ .set_coalesce = cpsw_set_coalesce,
.get_sset_count = cpsw_get_sset_count, │ .get_sset_count = cpsw_get_sset_count,
.get_strings = cpsw_get_strings, │ .get_strings = cpsw_get_strings,
.get_ethtool_stats = cpsw_get_ethtool_stats, │ .get_ethtool_stats = cpsw_get_ethtool_stats,
.get_pauseparam = cpsw_get_pauseparam, │ .get_pauseparam = cpsw_get_pauseparam,
.set_pauseparam = cpsw_set_pauseparam, │ .set_pauseparam = cpsw_set_pauseparam,
.get_wol = cpsw_get_wol, │ .get_wol = cpsw_get_wol,
.set_wol = cpsw_set_wol, │ .set_wol = cpsw_set_wol,
.get_regs_len = cpsw_get_regs_len, │ .get_regs_len = cpsw_get_regs_len,
.get_regs = cpsw_get_regs, │ .get_regs = cpsw_get_regs,
.begin = cpsw_ethtool_op_begin, │ .begin = cpsw_ethtool_op_begin,
.complete = cpsw_ethtool_op_complete, │ .complete = cpsw_ethtool_op_complete,
.get_channels = cpsw_get_channels, │ .get_channels = cpsw_get_channels,
.set_channels = cpsw_set_channels, │ .set_channels = cpsw_set_channels,
.get_link_ksettings = cpsw_get_link_ksettings, │ .get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings, │ .set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee, │ .get_eee = cpsw_get_eee,
.set_eee = cpsw_set_eee, │ .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset, │ .nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam, │ .get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam, │ .set_ringparam = cpsw_set_ringparam,
} │
next prev up linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:7188 │ linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c:7082
│
int remaining_ports_count; │ int remaining_ports_count;
unsigned long port_map; │ unsigned long port_map;
int size_remainder; │ int size_remainder;
int port, size; │ int port, size;
│
/* The loopback requires fixed 1kB of the FIFO space assignment. */ │ /* The loopback requires fixed 4kB of the FIFO space assignment. */
mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, │ mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
MVPP22_TX_FIFO_DATA_SIZE_1KB); │ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); │ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
│
/* Set TX FIFO size to 0 for inactive ports. */ │ /* Set RX FIFO size to 0 for inactive ports. */
for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) │ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
mvpp22_tx_fifo_set_hw(priv, port, 0); │ mvpp22_rx_fifo_set_hw(priv, port, 0);
│
/* Assign remaining TX FIFO space among all active ports. */ │ /* Assign remaining RX FIFO space among all active ports. */
size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB; │ size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
remaining_ports_count = hweight_long(port_map); │ remaining_ports_count = hweight_long(port_map);
│
for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { │ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
if (remaining_ports_count == 1) │ if (remaining_ports_count == 1)
size = min(size_remainder, │ size = size_remainder;
MVPP22_TX_FIFO_DATA_SIZE_10KB); │
else if (port == 0) │ else if (port == 0)
size = MVPP22_TX_FIFO_DATA_SIZE_10KB; │ size = max(size_remainder / remaining_ports_count,
│ MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
│ else if (port == 1)
│ size = max(size_remainder / remaining_ports_count,
│ MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
else │ else
size = size_remainder / remaining_ports_count; │ size = size_remainder / remaining_ports_count;
│
size_remainder -= size; │ size_remainder -= size;
remaining_ports_count--; │ remaining_ports_count--;
│
mvpp22_tx_fifo_set_hw(priv, port, size); │ mvpp22_rx_fifo_set_hw(priv, port, size);
} │ }
│
│ mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
│ MVPP2_RX_FIFO_PORT_MIN_PKT);
│ mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/nfd3/dp.c:468 │ linux/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c:273
│
struct nfp_net_r_vector *r_vec = tx_ring->r_vec; │ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp; │ u32 done_pkts = 0, done_bytes = 0, reused = 0;
u32 done_pkts = 0, done_bytes = 0; │
bool done_all; │ bool done_all;
int idx, todo; │ int idx, todo;
u32 qcp_rd_p; │ u32 qcp_rd_p;
│
/* Work out how many descriptors have been transmitted */ │ if (tx_ring->wr_p == tx_ring->rd_p)
qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp); │ return true;
│
│ /* Work out how many descriptors have been transmitted. */
│ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
│
if (qcp_rd_p == tx_ring->qcp_rd_p) │ if (qcp_rd_p == tx_ring->qcp_rd_p)
return true; │ return true;
│
todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); │ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
│
done_all = todo <= NFP_NET_XDP_MAX_COMPLETE; │ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
todo = min(todo, NFP_NET_XDP_MAX_COMPLETE); │ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
│
tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo); │ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
│
done_pkts = todo; │ done_pkts = todo;
while (todo--) { │ while (todo--) {
│ struct nfp_nfd3_tx_buf *txbuf;
│
idx = D_IDX(tx_ring, tx_ring->rd_p); │ idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_ring->rd_p++; │ tx_ring->rd_p++;
│
done_bytes += tx_ring->txbufs[idx].real_len; │ txbuf = &tx_ring->txbufs[idx];
│ if (unlikely(!txbuf->real_len))
│ continue;
│
│ done_bytes += txbuf->real_len;
│ txbuf->real_len = 0;
│
│ if (txbuf->is_xsk_tx) {
│ nfp_nfd3_xsk_tx_free(txbuf);
│ reused++;
│ }
} │ }
│
u64_stats_update_begin(&r_vec->tx_sync); │ u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_bytes += done_bytes; │ r_vec->tx_bytes += done_bytes;
r_vec->tx_pkts += done_pkts; │ r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync); │ u64_stats_update_end(&r_vec->tx_sync);
│
│ xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused);
│
WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, │ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
"XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", │ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); │ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
│
return done_all; │ return done_all;
} │
next prev up linux/drivers/net/ethernet/huawei/hinic/hinic_main.c:240 │ linux/drivers/net/ethernet/huawei/hinic/hinic_main.c:144
│
int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); │ int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev; │ struct net_device *netdev = nic_dev->netdev;
│
if (nic_dev->rxqs) │ if (nic_dev->txqs)
return -EINVAL; │ return -EINVAL;
│
nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs, │ nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
sizeof(*nic_dev->rxqs), GFP_KERNEL); │ sizeof(*nic_dev->txqs), GFP_KERNEL);
if (!nic_dev->rxqs) │ if (!nic_dev->txqs)
return -ENOMEM; │ return -ENOMEM;
│
hinic_rq_dbgfs_init(nic_dev); │ hinic_sq_dbgfs_init(nic_dev);
│
for (i = 0; i < num_rxqs; i++) { │ for (i = 0; i < num_txqs; i++) {
struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i); │ struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
│
err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); │ err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev);
if (err) { │ if (err) {
netif_err(nic_dev, drv, netdev, │ netif_err(nic_dev, drv, netdev,
"Failed to init rxq\n"); │ "Failed to init Txq\n");
goto err_init_rxq; │ goto err_init_txq;
} │ }
│
err = hinic_rq_debug_add(nic_dev, i); │ err = hinic_sq_debug_add(nic_dev, i);
if (err) { │ if (err) {
netif_err(nic_dev, drv, netdev, │ netif_err(nic_dev, drv, netdev,
"Failed to add RQ%d debug\n", i); │ "Failed to add SQ%d debug\n", i);
goto err_add_rq_dbg; │ goto err_add_sq_dbg;
} │ }
} │ }
│
return 0; │ return 0;
│
err_add_rq_dbg: │ err_add_sq_dbg:
hinic_clean_rxq(&nic_dev->rxqs[i]); │ hinic_clean_txq(&nic_dev->txqs[i]);
err_init_rxq: │ err_init_txq:
for (j = 0; j < i; j++) { │ for (j = 0; j < i; j++) {
hinic_rq_debug_rem(nic_dev->rxqs[j].rq); │ hinic_sq_debug_rem(nic_dev->txqs[j].sq);
hinic_clean_rxq(&nic_dev->rxqs[j]); │ hinic_clean_txq(&nic_dev->txqs[j]);
} │ }
│
hinic_rq_dbgfs_uninit(nic_dev); │ hinic_sq_dbgfs_uninit(nic_dev);
│
devm_kfree(&netdev->dev, nic_dev->rxqs); │ devm_kfree(&netdev->dev, nic_dev->txqs);
return err; │ return err;
} │
next prev up linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c:7811 │ linux/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c:7910
│
│ int ret = 0;
struct fw_vi_mac_cmd c; │ struct fw_vi_mac_cmd c;
struct fw_vi_mac_raw *p = &c.u.raw; │ struct fw_vi_mac_raw *p = &c.u.raw;
u32 val; │ u32 val;
│
memset(&c, 0, sizeof(c)); │ memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | │ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F | │ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_CMD_EXEC_V(0) | │
FW_VI_MAC_CMD_VIID_V(viid)); │ FW_VI_MAC_CMD_VIID_V(viid));
val = FW_CMD_LEN16_V(1) | │ val = FW_CMD_LEN16_V(1) |
FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW); │ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | │ c.freemacs_to_len16 = cpu_to_be32(val);
FW_CMD_LEN16_V(val)); │
│
p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) | │ /* Specify that this is an inner mac address */
FW_VI_MAC_ID_BASED_FREE); │ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
│
/* Lookup Type. Outer header: 0, Inner header: 1 */ │ /* Lookup Type. Outer header: 0, Inner header: 1 */
p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) | │ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
DATAPORTNUM_V(port_id)); │ DATAPORTNUM_V(port_id));
/* Lookup mask and port mask */ │ /* Lookup mask and port mask */
p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) | │ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
DATAPORTNUM_V(DATAPORTNUM_M)); │ DATAPORTNUM_V(DATAPORTNUM_M));
│
/* Copy the address and the mask */ │ /* Copy the address and the mask */
memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN); │ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN); │ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
│
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); │ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
│ if (ret == 0) {
│ ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
│ if (ret != idx)
│ ret = -ENOMEM;
│ }
│
│ return ret;
} │
next prev up linux/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c:360 │ linux/drivers/net/ethernet/qlogic/qed/qed_iscsi.c:453
│
struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL; │ struct iscsi_conn_update_ramrod_params *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; │ struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data; │ struct qed_sp_init_data init_data;
int rc = -EINVAL; │ int rc;
u32 dval; │ u32 dval;
│
/* Get SPQ entry */ │ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); │ memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_conn->icid; │ init_data.cid = p_conn->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; │ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode; │ init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_addr; │ init_data.p_comp_data = p_comp_addr;
│
rc = qed_sp_init_request(p_hwfn, &p_ent, │ rc = qed_sp_init_request(p_hwfn, &p_ent,
NVMETCP_RAMROD_CMD_ID_UPDATE_CONN, │ ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
PROTOCOLID_TCP_ULP, &init_data); │ PROTOCOLID_TCP_ULP, &init_data);
if (rc) │ if (rc)
return rc; │ return rc;
│
p_ramrod = &p_ent->ramrod.nvmetcp_conn_update; │ p_ramrod = &p_ent->ramrod.iscsi_conn_update;
│
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); │ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->flags = p_conn->update_flag; │ p_ramrod->flags = p_conn->update_flag;
p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size); │ p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
dval = p_conn->max_recv_pdu_length; │ dval = p_conn->max_recv_pdu_length;
p_ramrod->max_recv_pdu_length = cpu_to_le32(dval); │ p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
dval = p_conn->max_send_pdu_length; │ dval = p_conn->max_send_pdu_length;
p_ramrod->max_send_pdu_length = cpu_to_le32(dval); │ p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length); │ dval = p_conn->first_seq_length;
│ p_ramrod->first_seq_length = cpu_to_le32(dval);
│ p_ramrod->exp_stat_sn = cpu_to_le32(p_conn->exp_stat_sn);
│
return qed_spq_post(p_hwfn, p_ent, NULL); │ return qed_spq_post(p_hwfn, p_ent, NULL);
} │
next prev up linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c:11931 │ linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c:11967
│
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, │ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
.addr = 0xff, │ .addr = 0xff,
.def_md_devad = 0, │ .def_md_devad = 0,
.flags = FLAGS_FAN_FAILURE_DET_REQ | │ .flags = (FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL, │ FLAGS_REARM_LATCH_SIGNAL |
│ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, │ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, │ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0, │ .mdio_ctrl = 0,
.supported = (SUPPORTED_10baseT_Half | │ .supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full | │ SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | │ SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full | │ SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full | │ SUPPORTED_1000baseT_Full |
SUPPORTED_10000baseT_Full | │ SUPPORTED_10000baseT_Full |
SUPPORTED_TP | │ SUPPORTED_TP |
SUPPORTED_Autoneg | │ SUPPORTED_Autoneg |
SUPPORTED_Pause | │ SUPPORTED_Pause |
SUPPORTED_Asym_Pause), │ SUPPORTED_Asym_Pause),
.media_type = ETH_PHY_BASE_T, │ .media_type = ETH_PHY_BASE_T,
.ver_addr = 0, │ .ver_addr = 0,
.req_flow_ctrl = 0, │ .req_flow_ctrl = 0,
.req_line_speed = 0, │ .req_line_speed = 0,
.speed_cap_mask = 0, │ .speed_cap_mask = 0,
.req_duplex = 0, │ .req_duplex = 0,
.rsrv = 0, │ .rsrv = 0,
.config_init = bnx2x_8481_config_init, │ .config_init = bnx2x_848x3_config_init,
.read_status = bnx2x_848xx_read_status, │ .read_status = bnx2x_848xx_read_status,
.link_reset = bnx2x_8481_link_reset, │ .link_reset = bnx2x_848x3_link_reset,
.config_loopback = NULL, │ .config_loopback = NULL,
.format_fw_ver = bnx2x_848xx_format_ver, │ .format_fw_ver = bnx2x_848xx_format_ver,
.hw_reset = bnx2x_8481_hw_reset, │ .hw_reset = NULL,
.set_link_led = bnx2x_848xx_set_link_led, │ .set_link_led = bnx2x_848xx_set_link_led,
.phy_specific_func = NULL │ .phy_specific_func = bnx2x_848xx_specific_func
} │
next prev up linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c:12004 │ linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c:11967
│
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, │ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
.addr = 0xff, │ .addr = 0xff,
.def_md_devad = 0, │ .def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ | │ .flags = (FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL | │ FLAGS_REARM_LATCH_SIGNAL |
FLAGS_TX_ERROR_CHECK), │ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, │ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, │ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0, │ .mdio_ctrl = 0,
.supported = (SUPPORTED_100baseT_Half | │ .supported = (SUPPORTED_10baseT_Half |
│ SUPPORTED_10baseT_Full |
│ SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full | │ SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full | │ SUPPORTED_1000baseT_Full |
SUPPORTED_10000baseT_Full | │ SUPPORTED_10000baseT_Full |
SUPPORTED_TP | │ SUPPORTED_TP |
SUPPORTED_Autoneg | │ SUPPORTED_Autoneg |
SUPPORTED_Pause | │ SUPPORTED_Pause |
SUPPORTED_Asym_Pause), │ SUPPORTED_Asym_Pause),
.media_type = ETH_PHY_BASE_T, │ .media_type = ETH_PHY_BASE_T,
.ver_addr = 0, │ .ver_addr = 0,
.req_flow_ctrl = 0, │ .req_flow_ctrl = 0,
.req_line_speed = 0, │ .req_line_speed = 0,
.speed_cap_mask = 0, │ .speed_cap_mask = 0,
.req_duplex = 0, │ .req_duplex = 0,
.rsrv = 0, │ .rsrv = 0,
.config_init = bnx2x_848x3_config_init, │ .config_init = bnx2x_848x3_config_init,
.read_status = bnx2x_848xx_read_status, │ .read_status = bnx2x_848xx_read_status,
.link_reset = bnx2x_848x3_link_reset, │ .link_reset = bnx2x_848x3_link_reset,
.config_loopback = NULL, │ .config_loopback = NULL,
.format_fw_ver = bnx2x_848xx_format_ver, │ .format_fw_ver = bnx2x_848xx_format_ver,
.hw_reset = bnx2x_84833_hw_reset_phy, │ .hw_reset = NULL,
.set_link_led = bnx2x_848xx_set_link_led, │ .set_link_led = bnx2x_848xx_set_link_led,
.phy_specific_func = bnx2x_848xx_specific_func │ .phy_specific_func = bnx2x_848xx_specific_func
} │
next prev up linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c:12004 │ linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c:11931
│
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, │ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
.addr = 0xff, │ .addr = 0xff,
.def_md_devad = 0, │ .def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ | │ .flags = FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL | │ FLAGS_REARM_LATCH_SIGNAL,
FLAGS_TX_ERROR_CHECK), │
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, │ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, │ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0, │ .mdio_ctrl = 0,
.supported = (SUPPORTED_100baseT_Half | │ .supported = (SUPPORTED_10baseT_Half |
│ SUPPORTED_10baseT_Full |
│ SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full | │ SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full | │ SUPPORTED_1000baseT_Full |
SUPPORTED_10000baseT_Full | │ SUPPORTED_10000baseT_Full |
SUPPORTED_TP | │ SUPPORTED_TP |
SUPPORTED_Autoneg | │ SUPPORTED_Autoneg |
SUPPORTED_Pause | │ SUPPORTED_Pause |
SUPPORTED_Asym_Pause), │ SUPPORTED_Asym_Pause),
.media_type = ETH_PHY_BASE_T, │ .media_type = ETH_PHY_BASE_T,
.ver_addr = 0, │ .ver_addr = 0,
.req_flow_ctrl = 0, │ .req_flow_ctrl = 0,
.req_line_speed = 0, │ .req_line_speed = 0,
.speed_cap_mask = 0, │ .speed_cap_mask = 0,
.req_duplex = 0, │ .req_duplex = 0,
.rsrv = 0, │ .rsrv = 0,
.config_init = bnx2x_848x3_config_init, │ .config_init = bnx2x_8481_config_init,
.read_status = bnx2x_848xx_read_status, │ .read_status = bnx2x_848xx_read_status,
.link_reset = bnx2x_848x3_link_reset, │ .link_reset = bnx2x_8481_link_reset,
.config_loopback = NULL, │ .config_loopback = NULL,
.format_fw_ver = bnx2x_848xx_format_ver, │ .format_fw_ver = bnx2x_848xx_format_ver,
.hw_reset = bnx2x_84833_hw_reset_phy, │ .hw_reset = bnx2x_8481_hw_reset,
.set_link_led = bnx2x_848xx_set_link_led, │ .set_link_led = bnx2x_848xx_set_link_led,
.phy_specific_func = bnx2x_848xx_specific_func │ .phy_specific_func = NULL
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:254 │ linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:181
│
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); │ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg; │ struct mlx5_flow_group *fg;
u32 *in, *match; │ u32 *in, *match;
│
in = kvzalloc(inlen, GFP_KERNEL); │ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) │ if (!in)
return ERR_PTR(-ENOMEM); │ return ERR_PTR(-ENOMEM);
│
MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADE │ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
│ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); │ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
│
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid); │
│ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
│ mlx5_eswitch_get_vport_metadata_mask());
│
MLX5_SET(create_flow_group_in, in, start_flow_index, │ MLX5_SET(create_flow_group_in, in, start_flow_index,
MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
MLX5_SET(create_flow_group_in, in, end_flow_index, │ MLX5_SET(create_flow_group_in, in, end_flow_index,
MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
│
fg = mlx5_create_flow_group(egress_ft, in); │ fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg)) │ if (IS_ERR(fg))
esw_warn(esw->dev, │ esw_warn(esw->dev,
"Failed to create VLAN flow group for bridge egress table (err= │ "Failed to create bridge ingress table VLAN filter flow group (
PTR_ERR(fg)); │ PTR_ERR(fg));
│
kvfree(in); │ kvfree(in);
return fg; │ return fg;
} │
next prev up linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:254 │ linux/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:218
│
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); │ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg; │ struct mlx5_flow_group *fg;
u32 *in, *match; │ u32 *in, *match;
│
in = kvzalloc(inlen, GFP_KERNEL); │ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) │ if (!in)
return ERR_PTR(-ENOMEM); │ return ERR_PTR(-ENOMEM);
│
MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADE │ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
│ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); │ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
│
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0); │ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag); │
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid); │ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
│ mlx5_eswitch_get_vport_metadata_mask());
│
MLX5_SET(create_flow_group_in, in, start_flow_index, │ MLX5_SET(create_flow_group_in, in, start_flow_index,
MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
MLX5_SET(create_flow_group_in, in, end_flow_index, │ MLX5_SET(create_flow_group_in, in, end_flow_index,
MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO); │ MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
│
fg = mlx5_create_flow_group(egress_ft, in); │ fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg)) │ if (IS_ERR(fg))
esw_warn(esw->dev, │ esw_warn(esw->dev,
"Failed to create VLAN flow group for bridge egress table (err= │ "Failed to create MAC flow group for bridge ingress table (err=
PTR_ERR(fg)); │ PTR_ERR(fg));
│
kvfree(in); │ kvfree(in);
return fg; │ return fg;
} │
next prev up linux/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c:2578 │ linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:11101
│
struct pci_dev *pdev = hdev->pdev; │ struct pci_dev *pdev = hdev->pdev;
struct hclgevf_hw *hw; │ struct hclge_hw *hw;
int ret; │ int ret;
│
ret = pci_enable_device(pdev); │ ret = pci_enable_device(pdev);
if (ret) { │ if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n"); │ dev_err(&pdev->dev, "failed to enable PCI device\n");
return ret; │ return ret;
} │ }
│
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); │ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) { │ if (ret) {
dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); │ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
goto err_disable_device; │ if (ret) {
│ dev_err(&pdev->dev,
│ "can't set consistent PCI DMA");
│ goto err_disable_device;
│ }
│ dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
} │ }
│
ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); │ ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
if (ret) { │ if (ret) {
dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); │ dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
goto err_disable_device; │ goto err_disable_device;
} │ }
│
pci_set_master(pdev); │ pci_set_master(pdev);
hw = &hdev->hw; │ hw = &hdev->hw;
hw->hw.io_base = pci_iomap(pdev, 2, 0); │ hw->hw.io_base = pcim_iomap(pdev, 2, 0);
if (!hw->hw.io_base) { │ if (!hw->hw.io_base) {
dev_err(&pdev->dev, "can't map configuration register space\n"); │ dev_err(&pdev->dev, "Can't map configuration register space\n");
ret = -ENOMEM; │ ret = -ENOMEM;
goto err_clr_master; │ goto err_clr_master;
} │ }
│
ret = hclgevf_dev_mem_map(hdev); │ ret = hclge_dev_mem_map(hdev);
if (ret) │ if (ret)
goto err_unmap_io_base; │ goto err_unmap_io_base;
│
│ hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
│
return 0; │ return 0;
│
err_unmap_io_base: │ err_unmap_io_base:
pci_iounmap(pdev, hdev->hw.hw.io_base); │ pcim_iounmap(pdev, hdev->hw.hw.io_base);
err_clr_master: │ err_clr_master:
pci_clear_master(pdev); │ pci_clear_master(pdev);
pci_release_regions(pdev); │ pci_release_regions(pdev);
err_disable_device: │ err_disable_device:
pci_disable_device(pdev); │ pci_disable_device(pdev);
│
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c:623 │ linux/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c:669
│
unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; │ unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; │ unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
struct stmmac_packet_attrs attr = { }; │ struct stmmac_packet_attrs attr = { };
int ret, tries = 256; │ int ret, tries = 256;
│
if (stmmac_filter_check(priv)) │ if (stmmac_filter_check(priv))
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) │ if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) │ if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
│
while (--tries) { │ while (--tries) {
/* We only need to check the mc_addr for collisions */ │ /* We only need to check the uc_addr for collisions */
mc_addr[ETH_ALEN - 1] = tries; │ uc_addr[ETH_ALEN - 1] = tries;
if (stmmac_hash_check(priv, mc_addr)) │ if (stmmac_perfect_check(priv, uc_addr))
break; │ break;
} │ }
│
if (!tries) │ if (!tries)
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
│
ret = dev_uc_add(priv->dev, uc_addr); │ ret = dev_mc_add(priv->dev, mc_addr);
if (ret) │ if (ret)
return ret; │ return ret;
│
attr.dst = uc_addr; │ attr.dst = mc_addr;
│
/* Shall receive packet */ │ /* Shall receive packet */
ret = __stmmac_test_loopback(priv, &attr); │ ret = __stmmac_test_loopback(priv, &attr);
if (ret) │ if (ret)
goto cleanup; │ goto cleanup;
│
attr.dst = mc_addr; │ attr.dst = uc_addr;
│
/* Shall NOT receive packet */ │ /* Shall NOT receive packet */
ret = __stmmac_test_loopback(priv, &attr); │ ret = __stmmac_test_loopback(priv, &attr);
ret = ret ? 0 : -EINVAL; │ ret = ret ? 0 : -EINVAL;
│
cleanup: │ cleanup:
dev_uc_del(priv->dev, uc_addr); │ dev_mc_del(priv->dev, mc_addr);
return ret; │ return ret;
} │
next prev up linux/drivers/net/ethernet/nvidia/forcedeth.c:3737 │ linux/drivers/net/ethernet/nvidia/forcedeth.c:3849
│
struct net_device *dev = (struct net_device *) data; │ struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev); │ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev); │ u8 __iomem *base = get_hwbase(dev);
u32 events; │ u32 events;
int i; │ int i;
unsigned long flags; │ unsigned long flags;
│
for (i = 0;; i++) { │ for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; │ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
writel(events, base + NvRegMSIXIrqStatus); │ writel(events, base + NvRegMSIXIrqStatus);
netdev_dbg(dev, "tx irq events: %08x\n", events); │ netdev_dbg(dev, "rx irq events: %08x\n", events);
if (!(events & np->irqmask)) │ if (!(events & np->irqmask))
break; │ break;
│
spin_lock_irqsave(&np->lock, flags); │ if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); │ if (unlikely(nv_alloc_rx_optimized(dev))) {
spin_unlock_irqrestore(&np->lock, flags); │ spin_lock_irqsave(&np->lock, flags);
│ if (!np->in_shutdown)
│ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
│ spin_unlock_irqrestore(&np->lock, flags);
│ }
│ }
│
if (unlikely(i > max_interrupt_work)) { │ if (unlikely(i > max_interrupt_work)) {
spin_lock_irqsave(&np->lock, flags); │ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */ │ /* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); │ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base); │ pci_push(base);
│
if (!np->in_shutdown) { │ if (!np->in_shutdown) {
np->nic_poll_irq |= NVREG_IRQ_TX_ALL; │ np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); │ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} │ }
spin_unlock_irqrestore(&np->lock, flags); │ spin_unlock_irqrestore(&np->lock, flags);
netdev_dbg(dev, "%s: too many iterations (%d)\n", │ netdev_dbg(dev, "%s: too many iterations (%d)\n",
__func__, i); │ __func__, i);
break; │ break;
} │ }
│
} │ }
│
return IRQ_RETVAL(i); │ return IRQ_RETVAL(i);
} │
next prev up linux/drivers/net/ethernet/sfc/ethtool.c:123 │ linux/drivers/net/ethernet/sfc/falcon/ethtool.c:603
│
struct efx_nic *efx = netdev_priv(net_dev); │ struct ef4_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel; │ struct ef4_channel *channel;
unsigned int tx_usecs, rx_usecs; │ unsigned int tx_usecs, rx_usecs;
bool adaptive, rx_may_override_tx; │ bool adaptive, rx_may_override_tx;
int rc; │ int rc;
│
efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); │ ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
│
if (coalesce->rx_coalesce_usecs != rx_usecs) │ if (coalesce->rx_coalesce_usecs != rx_usecs)
rx_usecs = coalesce->rx_coalesce_usecs; │ rx_usecs = coalesce->rx_coalesce_usecs;
else │ else
rx_usecs = coalesce->rx_coalesce_usecs_irq; │ rx_usecs = coalesce->rx_coalesce_usecs_irq;
│
adaptive = coalesce->use_adaptive_rx_coalesce; │ adaptive = coalesce->use_adaptive_rx_coalesce;
│
/* If channels are shared, TX IRQ moderation can be quietly │ /* If channels are shared, TX IRQ moderation can be quietly
* overridden unless it is changed from its old value. │ * overridden unless it is changed from its old value.
*/ │ */
rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs && │ rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
coalesce->tx_coalesce_usecs_irq == tx_usecs); │ coalesce->tx_coalesce_usecs_irq == tx_usecs);
if (coalesce->tx_coalesce_usecs != tx_usecs) │ if (coalesce->tx_coalesce_usecs != tx_usecs)
tx_usecs = coalesce->tx_coalesce_usecs; │ tx_usecs = coalesce->tx_coalesce_usecs;
else │ else
tx_usecs = coalesce->tx_coalesce_usecs_irq; │ tx_usecs = coalesce->tx_coalesce_usecs_irq;
│
rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, │ rc = ef4_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
rx_may_override_tx); │ rx_may_override_tx);
if (rc != 0) │ if (rc != 0)
return rc; │ return rc;
│
efx_for_each_channel(channel, efx) │ ef4_for_each_channel(channel, efx)
efx->type->push_irq_moderation(channel); │ efx->type->push_irq_moderation(channel);
│
return 0; │ return 0;
} │
next prev up linux/drivers/net/ethernet/netronome/nfp/flower/match.c:605 │ linux/drivers/net/ethernet/netronome/nfp/flower/match.c:649
│
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { │ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
struct nfp_flower_ipv6_gre_tun *gre_match; │ struct nfp_flower_ipv6_udp_tun *udp_match;
struct nfp_ipv6_addr_entry *entry; │ struct nfp_ipv6_addr_entry *entry;
struct in6_addr *dst; │ struct in6_addr *dst;
│
nfp_flower_compile_ipv6_gre_tun((void *)ext, │ nfp_flower_compile_ipv6_udp_tun((void *)ext,
(void *)msk, rule); │ (void *)msk, rule);
gre_match = (struct nfp_flower_ipv6_gre_tun *)ext; │ udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
dst = &gre_match->ipv6.dst; │ dst = &udp_match->ipv6.dst;
ext += sizeof(struct nfp_flower_ipv6_gre_tun); │ ext += sizeof(struct nfp_flower_ipv6_udp_tun);
msk += sizeof(struct nfp_flower_ipv6_gre_tun); │ msk += sizeof(struct nfp_flower_ipv6_udp_tun);
│
entry = nfp_tunnel_add_ipv6_off(app, dst); │ entry = nfp_tunnel_add_ipv6_off(app, dst);
if (!entry) │ if (!entry)
return -EOPNOTSUPP; │ return -EOPNOTSUPP;
│
nfp_flow->nfp_tun_ipv6 = entry; │ nfp_flow->nfp_tun_ipv6 = entry;
} else { │ } else {
__be32 dst; │ __be32 dst;
│
nfp_flower_compile_ipv4_gre_tun((void *)ext, │ nfp_flower_compile_ipv4_udp_tun((void *)ext,
(void *)msk, rule); │ (void *)msk, rule);
dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst; │ dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
ext += sizeof(struct nfp_flower_ipv4_gre_tun); │ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_gre_tun); │ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
│
/* Store the tunnel destination in the rule data. │ /* Store the tunnel destination in the rule data.
* This must be present and be an exact match. │ * This must be present and be an exact match.
*/ │ */
nfp_flow->nfp_tun_ipv4_addr = dst; │ nfp_flow->nfp_tun_ipv4_addr = dst;
nfp_tunnel_add_ipv4_off(app, dst); │ nfp_tunnel_add_ipv4_off(app, dst);
│ }
│
│ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
│ nfp_flower_compile_geneve_opt(ext, msk, rule);
} │ }
} │
next prev up linux/drivers/net/ethernet/sfc/farch.c:1145 │ linux/drivers/net/ethernet/sfc/falcon/farch.c:1154
│
struct efx_nic *efx = channel->efx; │ struct ef4_nic *efx = channel->efx;
struct efx_rx_queue *rx_queue = │ struct ef4_rx_queue *rx_queue =
efx_channel_has_rx_queue(channel) ? │ ef4_channel_has_rx_queue(channel) ?
efx_channel_get_rx_queue(channel) : NULL; │ ef4_channel_get_rx_queue(channel) : NULL;
unsigned magic, code; │ unsigned magic, code;
│
magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); │ magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
code = _EFX_CHANNEL_MAGIC_CODE(magic); │ code = _EF4_CHANNEL_MAGIC_CODE(magic);
│
if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { │ if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) {
channel->event_test_cpu = raw_smp_processor_id(); │ channel->event_test_cpu = raw_smp_processor_id();
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { │ } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) {
/* The queue must be empty, so we won't receive any rx │ /* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the │ * events, so ef4_process_channel() won't refill the
* queue. Refill it here */ │ * queue. Refill it here */
efx_fast_push_rx_descriptors(rx_queue, true); │ ef4_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { │ } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel); │ ef4_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { │ } else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) {
efx_farch_handle_drain_event(channel); │ ef4_farch_handle_drain_event(channel);
} else { │ } else {
netif_dbg(efx, hw, efx->net_dev, "channel %d received " │ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
"generated event "EFX_QWORD_FMT"\n", │ "generated event "EF4_QWORD_FMT"\n",
channel->channel, EFX_QWORD_VAL(*event)); │ channel->channel, EF4_QWORD_VAL(*event));
} │ }
} │