From <> Subject [PATCH] net: add new QCA alx ethernet driver Date Thu, 9 Aug 2012 18:14:30 +0800 From: Cloud Ren <cjren@qca.qualcomm.com>



This driver support two new ethernet chipsets:

1969:1091 - AR8161 Gigabit Ethernet

1969:1090 - AR8162 Fast Ethernet



Qualcomm Atheros(QCA) is commiting to fix all bugs found on this driver.

This driver is also permissively licensed thereby enabling developers of

other OSes to cherry pick this driver to port to their OS.



Test including build/install/uninstall/dhcp/ping/iperf/wol/reboot/etc.

is passed on the supported chipsets.



This driver and patch also have addressed all sparse and checkpatch

warnings.



Signed-off-by: Cloud Ren <cjren@qca.qualcomm.com>

Signed-off-by: Xiong Huang <xiong@qca.qualcomm.com>

Signed-off-by: Hao-Ran Liu(Joseph Liu) <hao-ran.liu@canonical.com>

Signed-off-by: Joe Perches <joe@perches.com>

Tested-by: David Liu <dwliu@qca.qualcomm.com>

Signed-off-by: Luis R. Rodriguez <rodrigue@qca.qualcomm.com>

---

MAINTAINERS | 11 +

drivers/net/ethernet/atheros/Kconfig | 38 +-

drivers/net/ethernet/atheros/Makefile | 1 +

drivers/net/ethernet/atheros/alx/Makefile | 7 +

drivers/net/ethernet/atheros/alx/alx.h | 746 +++++

drivers/net/ethernet/atheros/alx/alx_abs.c | 1055 +++++++

drivers/net/ethernet/atheros/alx/alx_cifs.c | 307 ++

drivers/net/ethernet/atheros/alx/alx_cifs.h | 69 +

drivers/net/ethernet/atheros/alx/alx_dfs.c | 878 ++++++

drivers/net/ethernet/atheros/alx/alx_dfs.h | 182 ++

drivers/net/ethernet/atheros/alx/alx_ethtool.c | 337 +++

drivers/net/ethernet/atheros/alx/alx_hw.c | 1042 +++++++

drivers/net/ethernet/atheros/alx/alx_hw.h | 2132 +++++++++++++

drivers/net/ethernet/atheros/alx/alx_hwcom.h | 128 +

drivers/net/ethernet/atheros/alx/alx_main.c | 3830 ++++++++++++++++++++++++

drivers/net/ethernet/atheros/alx/alx_sw.h | 478 +++

16 files changed, 11238 insertions(+), 3 deletions(-)

create mode 100644 drivers/net/ethernet/atheros/alx/Makefile

create mode 100644 drivers/net/ethernet/atheros/alx/alx.h

create mode 100644 drivers/net/ethernet/atheros/alx/alx_abs.c

create mode 100644 drivers/net/ethernet/atheros/alx/alx_cifs.c

create mode 100644 drivers/net/ethernet/atheros/alx/alx_cifs.h

create mode 100644 drivers/net/ethernet/atheros/alx/alx_dfs.c

create mode 100644 drivers/net/ethernet/atheros/alx/alx_dfs.h

create mode 100644 drivers/net/ethernet/atheros/alx/alx_ethtool.c

create mode 100644 drivers/net/ethernet/atheros/alx/alx_hw.c

create mode 100644 drivers/net/ethernet/atheros/alx/alx_hw.h

create mode 100644 drivers/net/ethernet/atheros/alx/alx_hwcom.h

create mode 100644 drivers/net/ethernet/atheros/alx/alx_main.c

create mode 100644 drivers/net/ethernet/atheros/alx/alx_sw.h



diff --git a/MAINTAINERS b/MAINTAINERS

index 1b71f6c..eb12e1b 100644

--- a/MAINTAINERS

+++ b/MAINTAINERS

@@ -1290,6 +1290,17 @@ W: http://atl1.sourceforge.net

S: Maintained

F: drivers/net/ethernet/atheros/



+ALX ETHERNET DRIVERS

+M: Cloud Ren <cjren@qca.qualcomm.com>

+M: Stevent Li <steventl@qca.qualcomm.com>

+M: Wu Ken <kenw@qca.qualcomm.com>

+M: David Liu <dwliu@qca.qualcomm.com>

+L: netdev@vger.kernel.org

+L: nic-devel@qualcomm.com

+W: http://www.linuxfoundation.org/collaborate/workgroups/networking/alx

+S: Supported

+F: drivers/net/ethernet/atheros/alx/

+

ATM

M: Chas Williams <chas@cmf.nrl.navy.mil>

L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers)

diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig

index 1ed886d..8dfd634 100644

--- a/drivers/net/ethernet/atheros/Kconfig

+++ b/drivers/net/ethernet/atheros/Kconfig

@@ -56,15 +56,47 @@ config ATL1E

will be called atl1e.



config ATL1C

- tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)"

- depends on PCI && EXPERIMENTAL

+ tristate "Atheros L1C/L1D Gigabit Ethernet support"

+ depends on PCI

select CRC32

select NET_CORE

select MII

---help---

- This driver supports the Atheros L1C gigabit ethernet adapter.

+ This driver supports the Atheros L1C/L1D ethernet adapter.

+ This driver supports following chipsets:

+

+ 1969:1063 - AR8131 Gigabit Ethernet

+ 1969:1062 - AR8132 Fast Ethernet (10/100 Mbit/s)

+ 1969:2062 - AR8152 v2.0 Fast Ethernet

+ 1969:2060 - AR8152 v1.1 Fast Ethernet

+ 1969:1073 - AR8151 v1.0 Gigabit Ethernet

+ 1969:1083 - AR8151 v2.0 Gigabit Ethernet



To compile this driver as a module, choose M here. The module

will be called atl1c.



+config ALX

+ tristate "Qualcomm Atheros L1F Gigabit Ethernet support"

+ depends on PCI

+ select CRC32

+ select NET_CORE

+ select MII

+ ---help---

+ This driver supports the Qualcomm Atheros L1F ethernet adapter.

+ This driver supports following chipsets:

+

+ 1969:1091 - AR8161 Gigabit Ethernet

+ 1969:1090 - AR8162 Fast Ethernet

+

+ To compile this driver as a module, choose M here. The module

+ will be called alx.

+

+config ALX_DEBUGFS

+ bool "Qualcomm Atheros debugging interface"

+ depends on ALX && DEBUG_FS

+ ---help---

+ This option adds ability to debug and test L1F. It can

+ support Qualcomm Atheros tools, including diagnostic, memcfg

+ and SWOI.

+

endif # NET_VENDOR_ATHEROS

diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile

index e7e76fb..5cf1c65 100644

--- a/drivers/net/ethernet/atheros/Makefile

+++ b/drivers/net/ethernet/atheros/Makefile

@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/

obj-$(CONFIG_ATL2) += atlx/

obj-$(CONFIG_ATL1E) += atl1e/

obj-$(CONFIG_ATL1C) += atl1c/

+obj-$(CONFIG_ALX) += alx/

diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile

new file mode 100644

index 0000000..66acb3f

--- /dev/null

+++ b/drivers/net/ethernet/atheros/alx/Makefile

@@ -0,0 +1,7 @@

+obj-$(CONFIG_ALX) += alx.o

+alx-objs := alx_main.o alx_ethtool.o alx_abs.o alx_hw.o

+ifdef CONFIG_ALX_DEBUGFS

+alx-objs += alx_dfs.o

+alx-objs += alx_cifs.o

+endif

+ccflags-y += -D__CHECK_ENDIAN__

diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h

new file mode 100644

index 0000000..69709e7

--- /dev/null

+++ b/drivers/net/ethernet/atheros/alx/alx.h

@@ -0,0 +1,746 @@

+/*

+ * Copyright (c) 2012 Qualcomm Atheros, Inc.

+ *

+ * Permission to use, copy, modify, and/or distribute this software for any

+ * purpose with or without fee is hereby granted, provided that the above

+ * copyright notice and this permission notice appear in all copies.

+ *

+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES

+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF

+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR

+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES

+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN

+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF

+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ */

+

+#ifndef _ALX_H_

+#define _ALX_H_

+

+#include <linux/netdevice.h>

+

+#include "alx_sw.h"

+#include "alx_dfs.h"

+

+/*

+ * Definition to enable some features

+ */

+#undef CONFIG_ALX_MSIX

+#undef CONFIG_ALX_MSI

+#undef CONFIG_ALX_MTQ

+#undef CONFIG_ALX_MRQ

+#undef CONFIG_ALX_RSS

+/* #define CONFIG_ALX_MSIX */

+#define CONFIG_ALX_MSI

+#define CONFIG_ALX_MTQ

+#define CONFIG_ALX_MRQ

+#ifdef CONFIG_ALX_MRQ

+#define CONFIG_ALX_RSS

+#endif

+

+#define ALX_MSG_DEFAULT 0

+

+/* Logging functions and macros */

+#define alx_err(adpt, fmt, ...) \

+ netdev_err(adpt->netdev, fmt, ##__VA_ARGS__)

+

+#define ALX_VLAN_TO_TAG(_vlan, _tag) \

+ do { \

+ _tag = ((((_vlan) >> 8) & 0xFF) | (((_vlan) & 0xFF) << 8)); \

+ } while (0)

+

+#define ALX_TAG_TO_VLAN(_tag, _vlan) \

+ do { \

+ _vlan = ((((_tag) >> 8) & 0xFF) | (((_tag) & 0xFF) << 8)); \

+ } while (0)

+

+

+#define BAR_0 0

+#define BAR_1 1

+#define BAR_5 5

+

+

+#define ALX_DEF_RX_BUF_SIZE 1536

+#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)

+#define ALX_MAX_TSO_PKT_SIZE (7*1024)

+

+#define ALX_MAX_ETH_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE

+#define ALX_MIN_ETH_FRAME_SIZE 68

+

+

+#define ALX_MAX_RX_QUEUES 8

+#define ALX_MAX_TX_QUEUES 4

+#define ALX_MAX_HANDLED_INTRS 5

+

+#define ALX_WATCHDOG_TIME (5 * HZ)

+

+/*

+ * RRD : definition

+ */

+

+/* general parameter format of rrd */

+struct alx_sw_rrdes_general {

+#if defined(__LITTLE_ENDIAN_BITFIELD)

+ /* dword 0 */

+ u32 xsum:16;

+ u32 nor:4; /* number of RFD */

+ u32 si:12; /* start index of rfd-ring */

+ /* dword 1 */

+ u32 hash;

+ /* dword 2 */

+ u32 vlan_tag:16; /* vlan-tag */

+ u32 pid:8; /* Header Length of Header-Data Split. WORD unit */

+ u32 reserve0:1;

+ u32 rss_cpu:3; /* CPU number used by RSS */

+ u32 rss_flag:4; /* rss_flag 0, TCP(IPv6) flag for RSS hash algrithm

+ * rss_flag 1, IPv6 flag for RSS hash algrithm

+ * rss_flag 2, TCP(IPv4) flag for RSS hash algrithm

+ * rss_flag 3, IPv4 flag for RSS hash algrithm */

+ /* dword 3 */

+ u32 pkt_len:14; /* length of the packet */

+ u32 l4f:1; /* L4(TCP/UDP) checksum failed */

+ u32 ipf:1; /* IP checksum failed */

+ u32 vlan_flag:1; /* vlan tag */

+ u32 reserve:3;

+ u32 res:1; /* received error summary */

+ u32 crc:1; /* crc error */

+ u32 fae:1; /* frame alignment error */

+ u32 trunc:1; /* truncated packet, larger than MTU */

+ u32 runt:1; /* runt packet */

+ u32 icmp:1; /* incomplete packet due to insufficient rx-desc*/

+ u32 bar:1; /* broadcast address received */

+ u32 mar:1; /* multicast address received */

+ u32 type:1; /* ethernet type */

+ u32 fov:1; /* fifo overflow*/

+ u32 lene:1; /* length error */

+ u32 update:1; /* update*/

+#elif defined(__BIG_ENDIAN_BITFIELD)

+ /* dword 0 */

+ u32 si:12;

+ u32 nor:4;

+ u32 xsum:16;

+ /* dword 1 */

+ u32 hash;

+ /* dword 2 */

+ u32 rss_flag:4;

+ u32 rss_cpu:3;

+ u32 reserve0:1;

+ u32 pid:8;

+ u32 vlan_tag:16;

+ /* dword 3 */

+ u32 update:1;

+ u32 lene:1;

+ u32 fov:1;

+ u32 type:1;

+ u32 mar:1;

+ u32 bar:1;

+ u32 icmp:1;

+ u32 runt:1;

+ u32 trunc:1;

+ u32 fae:1;

+ u32 crc:1;

+ u32 res:1;

+ u32 reserve1:3;

+ u32 vlan_flag:1;

+ u32 ipf:1;

+ u32 l4f:1;

+ u32 pkt_len:14;

+#else

+#error "Please fix <asm/byteorder.h>"

+#endif

+};

+

+union alx_hw_rrdesc {

+ /* dword flat format */

+ struct {

+ __le32 dw0;

+ __le32 dw1;

+ __le32 dw2;

+ __le32 dw3;

+ } dfmt;

+

+ /* qword flat format */

+ struct {

+ __le64 qw0;

+ __le64 qw1;

+ } qfmt;

+};

+

+/*

+ * XXX: we should not use this guy, best to just

+ * do all le32_to_cpu() conversions on the spot.

+ */

+union alx_sw_rrdesc {

+ struct alx_sw_rrdes_general genr;

+

+ /* dword flat format */

+ struct {

+ u32 dw0;

+ u32 dw1;

+ u32 dw2;

+ u32 dw3;

+ } dfmt;

+

+ /* qword flat format */

+ struct {

+ u64 qw0;

+ u64 qw1;

+ } qfmt;

+};

+

+/*

+ * RFD : definition

+ */

+

+/* general parameter format of rfd */

+struct alx_sw_rfdes_general {

+ u64 addr;

+};

+

+union alx_hw_rfdesc {

+ /* dword flat format */

+ struct {

+ __le32 dw0;

+ __le32 dw1;

+ } dfmt;

+

+ /* qword flat format */

+ struct {

+ __le64 qw0;

+ } qfmt;

+};

+

+/*

+ * XXX: we should not use this guy, best to just

+ * do all le32_to_cpu() conversions on the spot.

+ */

+union alx_sw_rfdesc {

+ struct alx_sw_rfdes_general genr;

+

+ /* dword flat format */

+ struct {

+ u32 dw0;

+ u32 dw1;

+ } dfmt;

+

+ /* qword flat format */

+ struct {

+ u64 qw0;

+ } qfmt;

+};

+

+/*

+ * TPD : definition

+ */

+

+/* general parameter format of tpd */

+struct alx_sw_tpdes_general {

+#if defined(__LITTLE_ENDIAN_BITFIELD)

+ /* dword 0 */

+ u32 buffer_len:16; /* include 4-byte CRC */

+ u32 vlan_tag:16;

+ /* dword 1 */

+ u32 l4hdr_offset:8; /* l4 header offset to the 1st byte of packet */

+ u32 c_csum:1;

+ u32 ip_csum:1;

+ u32 tcp_csum:1;

+ u32 udp_csum:1;

+ u32 lso:1;

+ u32 lso_v2:1;

+ u32 vtagged:1; /* vlan-id tagged already */

+ u32 instag:1; /* insert vlan tag */

+

+ u32 ipv4:1; /* ipv4 packet */

+ u32 type:1; /* type of packet (ethernet_ii(0) or snap(1)) */

+ u32 reserve:12;

+ u32 epad:1; /* even byte padding when this packet */

+ u32 last_frag:1; /* last fragment(buffer) of the packet */

+

+ u64 addr;

+#elif defined(__BIG_ENDIAN_BITFIELD)

+ /* dword 0 */

+ u32 vlan_tag:16;

+ u32 buffer_len:16;

+ /* dword 1 */

+ u32 last_frag:1;

+ u32 epad:1;

+ u32 reserve:12;

+ u32 type:1;

+ u32 ipv4:1;

+ u32 instag:1;

+ u32 vtagged:1;

+ u32 lso_v2:1;

+ u32 lso:1;

+ u32 udp_csum:1;

+ u32 tcp_csum:1;

+ u32 ip_csum:1;

+ u32 c_csum:1;

+ u32 l4hdr_offset:8;

+

+ u64 addr;

+#else

+#error "Please fix <asm/byteorder.h>"

+#endif

+};

+

+/* custom checksum parameter format of tpd */

+struct alx_sw_tpdes_checksum {

+#if defined(__LITTLE_ENDIAN_BITFIELD)

+ /* dword 0 */

+ u32 buffer_len:16;

+ u32 vlan_tag:16;

+ /* dword 1 */

+ u32 payld_offset:8; /* payload offset to the 1st byte of packet */

+ u32 c_csum:1; /* do custom checksum offload */

+ u32 ip_csum:1; /* do ip(v4) header checksum offload */

+ u32 tcp_csum:1; /* do tcp checksum offload, both ipv4 and ipv6 */

+ u32 udp_csum:1; /* do udp checksum offlaod, both ipv4 and ipv6 */

+ u32 lso:1;

+ u32 lso_v2:1;

+ u32 vtagged:1; /* vlan-id tagged already */

+ u32 instag:1; /* insert vlan tag */

+ u32 ipv4:1; /* ipv4 packet */

+ u32 type:1; /* type of packet (ethernet_ii(0) or snap(1)) */

+ u32 cxsum_offset:8; /* checksum offset to the 1st byte of packet */

+ u32 reserve:4;

+ u32 epad:1; /* even byte padding when this packet */

+ u32 last_frag:1; /* last fragment(buffer) of the packet */

+

+ u64 addr;

+#elif defined(__BIG_ENDIAN_BITFIELD)

+ /* dword 0 */

+ u32 vlan_tag:16;

+ u32 buffer_len:16;

+ /* dword 1 */

+ u32 last_frag:1;

+ u32 epad:1;

+ u32 reserve:4;

+ u32 cxsum_offset:8;

+ u32 type:1;

+ u32 ipv4:1;

+ u32 instag:1;

+ u32 vtagged:1;

+ u32 lso_v2:1;

+ u32 lso:1;

+ u32 udp_csum:1;

+ u32 tcp_csum:1;

+ u32 ip_csum:1;

+ u32 c_csum:1;

+ u32 payld_offset:8;

+

+ u64 addr;

+#else

+#error "Please fix <asm/byteorder.h>"

+#endif

+};

+

+

+/* tcp large send format (v1/v2) of tpd */

+struct alx_sw_tpdes_tso {

+#if defined(__LITTLE_ENDIAN_BITFIELD)

+ /* dword 0 */

+ u32 buffer_len:16; /* include 4-byte CRC */

+ u32 vlan_tag:16;

+ /* dword 1 */

+ u32 tcphdr_offset:8; /* tcp hdr offset to the 1st byte of packet */

+ u32 c_csum:1;

+ u32 ip_csum:1;

+ u32 tcp_csum:1;

+ u32 udp_csum:1;

+ u32 lso:1; /* do tcp large send (ipv4 only) */

+ u32 lso_v2:1; /* must be 0 in this format */

+ u32 vtagged:1; /* vlan-id tagged already */

+ u32 instag:1; /* insert vlan tag */

+ u32 ipv4:1; /* ipv4 packet */

+ u32 type:1; /* type of packet (ethernet_ii(1) or snap(0)) */

+ u32 mss:13; /* mss if do tcp large send */

+ u32 last_frag:1; /* last fragment(buffer) of the packet */

+

+ u32 pkt_len; /* packet length in ext tpd */

+ u32 reserve;

+#elif defined(__BIG_ENDIAN_BITFIELD)

+ /* dword 0 */

+ u32 vlan_tag:16;

+ u32 buffer_len:16;

+ /* dword 1 */

+ u32 last_frag:1;

+ u32 mss:13;

+ u32 type:1;

+ u32 ipv4:1;

+ u32 instag:1;

+ u32 vtagged:1;

+ u32 lso_v2:1;

+ u32 lso:1;

+ u32 udp_csum:1;

+ u32 tcp_csum:1;

+ u32 ip_csum:1;

+ u32 c_csum:1;

+ u32 tcphdr_offset:8;

+

+ u32 pkt_len;

+ u32 reserve;

+#else

+#error "Please fix <asm/byteorder.h>"

+#endif

+};

+

+union alx_hw_tpdesc {

+ /* dword flat format */

+ struct {

+ __le32 dw0;

+ __le32 dw1;

+ __le32 dw2;

+ __le32 dw3;

+ } dfmt;

+

+ /* qword flat format */

+ struct {

+ __le64 qw0;

+ __le64 qw1;

+ } qfmt;

+};

+

+/*

+ * XXX: we should not use this guy, best to just

+ * do all le32_to_cpu() conversions on the spot.

+ */

+union alx_sw_tpdesc {

+ struct alx_sw_tpdes_general genr;

+ struct alx_sw_tpdes_checksum csum;

+ struct alx_sw_tpdes_tso tso;

+

+ /* dword flat format */

+ struct {

+ u32 dw0;

+ u32 dw1;

+ u32 dw2;

+ u32 dw3;

+ } dfmt;

+

+ /* qword flat format */

+ struct {

+ u64 qw0;

+ u64 qw1;

+ } qfmt;

+};

+

+#define ALX_RRD(_que, _i) \

+ (&(((union alx_hw_rrdesc *)(_que)->rrq.rrdesc)[(_i)]))

+#define ALX_RFD(_que, _i) \

+ (&(((union alx_hw_rfdesc *)(_que)->rfq.rfdesc)[(_i)]))

+#define ALX_TPD(_que, _i) \

+ (&(((union alx_hw_tpdesc *)(_que)->tpq.tpdesc)[(_i)]))

+

+

+/*

+ * alx_ring_header represents a single, contiguous block of DMA space

+ * mapped for the three descriptor rings (tpd, rfd, rrd) and the two

+ * message blocks (cmb, smb) described below

+ */

+struct alx_ring_header {

+ void *desc; /* virtual address */

+ dma_addr_t dma; /* physical address*/

+ unsigned int size; /* length in bytes */

+ unsigned int used;

+};

+

+

+/*

+ * alx_buffer is wrapper around a pointer to a socket buffer

+ * so a DMA handle can be stored along with the skb

+ */

+struct alx_buffer {

+ struct sk_buff *skb; /* socket buffer */

+ u16 length; /* rx buffer length */

+ dma_addr_t dma;

+};

+

+struct alx_sw_buffer {

+ struct sk_buff *skb; /* socket buffer */

+ u32 vlan_tag:16;

+ u32 vlan_flag:1;

+ u32 reserved:15;

+};

+

+/* receive free descriptor (rfd) queue */

+struct alx_rfd_queue {

+ struct alx_buffer *rfbuff;

+ union alx_hw_rfdesc *rfdesc; /* virtual address */

+ dma_addr_t rfdma; /* physical address */

+ u16 size; /* length in bytes */

+ u16 count; /* number of descriptors in the ring */

+ u16 produce_idx; /* it's written to rxque->produce_reg */

+ u16 consume_idx; /* unused*/

+};

+

+/* receive return desciptor (rrd) queue */

+struct alx_rrd_queue {

+ union alx_hw_rrdesc *rrdesc; /* virtual address */

+ dma_addr_t rrdma; /* physical address */

+ u16 size; /* length in bytes */

+ u16 count; /* number of descriptors in the ring */

+ u16 produce_idx; /* unused */

+ u16 consume_idx; /* rxque->consume_reg */

+};

+

+/* software desciptor (swd) queue */

+struct alx_swd_queue {

+ struct alx_sw_buffer *swbuff;

+ u16 count; /* number of descriptors in the ring */

+ u16 produce_idx;

+ u16 consume_idx;

+};

+

+/* rx queue */

+struct alx_rx_queue {

+ struct device *dev; /* device for dma mapping */

+ struct net_device *netdev; /* netdev ring belongs to */

+ struct alx_msix_param *msix;

+ struct alx_rrd_queue rrq;

+ struct alx_rfd_queue rfq;

+ struct alx_swd_queue swq;

+

+ u16 que_idx; /* index in multi rx queues*/

+ u16 max_packets; /* max work per interrupt */

+ u16 produce_reg;

+ u16 consume_reg;

+ u32 flags;

+};

+#define ALX_RX_FLAG_SW_QUE 0x00000001

+#define ALX_RX_FLAG_HW_QUE 0x00000002

+#define CHK_RX_FLAG(_flag) CHK_FLAG(rxque, RX, _flag)

+#define SET_RX_FLAG(_flag) SET_FLAG(rxque, RX, _flag)

+#define CLI_RX_FLAG(_flag) CLI_FLAG(rxque, RX, _flag)

+

+#define GET_RF_BUFFER(_rque, _i) (&((_rque)->rfq.rfbuff[(_i)]))

+#define GET_SW_BUFFER(_rque, _i) (&((_rque)->swq.swbuff[(_i)]))

+

+

+/* transimit packet descriptor (tpd) ring */

+struct alx_tpd_queue {

+ struct alx_buffer *tpbuff;

+ union alx_hw_tpdesc *tpdesc; /* virtual address */

+ dma_addr_t tpdma; /* physical address */

+

+ u16 size; /* length in bytes */

+ u16 count; /* number of descriptors in the ring */

+ u16 produce_idx;

+ u16 consume_idx;

+ u16 last_produce_idx;

+};

+

+/* tx queue */

+struct alx_tx_queue {

+ struct device *dev; /* device for dma mapping */

+ struct net_device *netdev; /* netdev ring belongs to */

+ struct alx_tpd_queue tpq;

+ struct alx_msix_param *msix;

+

+ u16 que_idx; /* needed for multiqueue queue management */

+ u16 max_packets; /* max packets per interrupt */

+ u16 produce_reg;

+ u16 consume_reg;

+};

+#define GET_TP_BUFFER(_tque, _i) (&((_tque)->tpq.tpbuff[(_i)]))

+

+

+/*

+ * definition for array allocations.

+ */

+#define ALX_MAX_MSIX_INTRS 16

+#define ALX_MAX_RX_QUEUES 8

+#define ALX_MAX_TX_QUEUES 4

+

+enum alx_msix_type {

+ alx_msix_type_rx,

+ alx_msix_type_tx,

+ alx_msix_type_other,

+};

+#define ALX_MSIX_TYPE_OTH_TIMER 0

+#define ALX_MSIX_TYPE_OTH_ALERT 1

+#define ALX_MSIX_TYPE_OTH_SMB 2

+#define ALX_MSIX_TYPE_OTH_PHY 3

+

+/* ALX_MAX_MSIX_INTRS of these are allocated,

+ * but we only use one per queue-specific vector.

+ */

+struct alx_msix_param {

+ struct alx_adapter *adpt;

+ unsigned int vec_idx; /* index in HW interrupt vector */

+ char name[IFNAMSIZ + 9];

+

+ /* msix interrupts for queue */

+ u8 rx_map[ALX_MAX_RX_QUEUES];

+ u8 tx_map[ALX_MAX_TX_QUEUES];

+ u8 rx_count; /* Rx ring count assigned to this vector */

+ u8 tx_count; /* Tx ring count assigned to this vector */

+

+ struct napi_struct napi;

+ cpumask_var_t affinity_mask;

+ u32 flags;

+};

+

+#define ALX_MSIX_FLAG_RX0 0x00000001

+#define ALX_MSIX_FLAG_RX1 0x00000002

+#define ALX_MSIX_FLAG_RX2 0x00000004

+#define ALX_MSIX_FLAG_RX3 0x00000008

+#define ALX_MSIX_FLAG_RX4 0x00000010

+#define ALX_MSIX_FLAG_RX5 0x00000020

+#define ALX_MSIX_FLAG_RX6 0x00000040

+#define ALX_MSIX_FLAG_RX7 0x00000080

+#define ALX_MSIX_FLAG_TX0 0x00000100

+#define ALX_MSIX_FLAG_TX1 0x00000200

+#define ALX_MSIX_FLAG_TX2 0x00000400

+#define ALX_MSIX_FLAG_TX3 0x00000800

+#define ALX_MSIX_FLAG_TIMER 0x00001000

+#define ALX_MSIX_FLAG_ALERT 0x00002000

+#define ALX_MSIX_FLAG_SMB 0x00004000

+#define ALX_MSIX_FLAG_PHY 0x00008000

+

+#define ALX_MSIX_FLAG_RXS (\

+ ALX_MSIX_FLAG_RX0 |\

+ ALX_MSIX_FLAG_RX1 |\

+ ALX_MSIX_FLAG_RX2 |\

+ ALX_MSIX_FLAG_RX3 |\

+ ALX_MSIX_FLAG_RX4 |\

+ ALX_MSIX_FLAG_RX5 |\

+ ALX_MSIX_FLAG_RX6 |\

+ ALX_MSIX_FLAG_RX7)

+#define ALX_MSIX_FLAG_TXS (\

+ ALX_MSIX_FLAG_TX0 |\

+ ALX_MSIX_FLAG_TX1 |\

+ ALX_MSIX_FLAG_TX2 |\

+ ALX_MSIX_FLAG_TX3)

+#define ALX_MSIX_FLAG_ALL (\

+ ALX_MSIX_FLAG_RXS |\

+ ALX_MSIX_FLAG_TXS |\

+ ALX_MSIX_FLAG_TIMER |\

+ ALX_MSIX_FLAG_ALERT |\

+ ALX_MSIX_FLAG_SMB |\

+ ALX_MSIX_FLAG_PHY)

+

+#define CHK_MSIX_FLAG(_flag) CHK_FLAG(msix, MSIX, _flag)

+#define SET_MSIX_FLAG(_flag) SET_FLAG(msix, MSIX, _flag)

+#define CLI_MSIX_FLAG(_flag) CLI_FLAG(msix, MSIX, _flag)

+

+/*

+ *board specific private data structure

+ */

+struct alx_adapter {

+ struct net_device *netdev;

+ struct pci_dev *pdev;

+ struct net_device_stats net_stats;

+ bool netdev_registered;

+ u16 bd_number; /* board number;*/

+

+ struct alx_msix_param *msix[ALX_MAX_MSIX_INTRS];

+ struct msix_entry *msix_entries;

+ int num_msix_rxques;

+ int num_msix_txques;

+ int num_msix_noques; /* true count of msix_noques for device */

+ int num_msix_intrs;

+

+ int min_msix_intrs;

+ int max_msix_intrs;

+

+ /* All Descriptor memory */

+ struct alx_ring_header ring_header;

+

+ /* TX */

+ struct alx_tx_queue *tx_queue[ALX_MAX_TX_QUEUES];

+ /* RX */

+ struct alx_rx_queue *rx_queue[ALX_MAX_RX_QUEUES];

+

+ u16 num_txques;

+ u16 num_rxques; /* equal max(num_hw_rxques, num_sw_rxques) */

+ u16 num_hw_rxques;

+ u16 num_sw_rxques;

+ u16 max_rxques;

+ u16 max_txques;

+

+ u16 num_txdescs;

+ u16 num_rxdescs;

+

+ u32 rxbuf_size;

+

+ /* structs defined in alx_hw.h */

+ struct alx_hw hw;

+ struct alx_hw_stats hw_stats;

+

+ u32 *config_space;

+

+ struct work_struct alx_task;

+ struct timer_list alx_timer;

+

+ unsigned long link_jiffies;

+

+ u32 wol;

+ bool cifs;

+ int bars;

+ bool ioport;

+ spinlock_t tx_lock;

+ spinlock_t rx_lock;

+ atomic_t irq_sem;

+

+#ifdef CONFIG_ALX_DEBUGFS

+ struct alx_debugfs_param dfs;

+#endif

+ u16 msg_enable;

+ unsigned long flags;

+};

+

+#define ALX_ADPT_FLAG_MSI_CAP 0x00000001

+#define ALX_ADPT_FLAG_MSI_EN 0x00000002

+#define ALX_ADPT_FLAG_MSIX_CAP 0x00000004

+#define ALX_ADPT_FLAG_MSIX_EN 0x00000008

+#define ALX_ADPT_FLAG_MRQ_CAP 0x00000010

+#define ALX_ADPT_FLAG_MRQ_EN 0x00000020

+#define ALX_ADPT_FLAG_MTQ_CAP 0x00000040

+#define ALX_ADPT_FLAG_MTQ_EN 0x00000080

+#define ALX_ADPT_FLAG_SRSS_CAP 0x00000100

+#define ALX_ADPT_FLAG_SRSS_EN 0x00000200

+#define ALX_ADPT_FLAG_FIXED_MSIX 0x00000400

+

+#define ALX_ADPT_FLAG_TASK_REINIT_REQ 0x00010000 /* reinit */

+#define ALX_ADPT_FLAG_TASK_LSC_REQ 0x00020000

+

+#define ALX_ADPT_FLAG_STATE_TESTING 0x00100000

+#define ALX_ADPT_FLAG_STATE_RESETTING 0x00200000

+#define ALX_ADPT_FLAG_STATE_DOWN 0x00400000

+#define ALX_ADPT_FLAG_STATE_WATCH_DOG 0x00800000

+#define ALX_ADPT_FLAG_STATE_DIAG_RUNNING 0x01000000

+#define ALX_ADPT_FLAG_STATE_INACTIVE 0x02000000

+

+

+#define CHK_ADPT_FLAG(_flag) CHK_FLAG(adpt, ADPT, _flag)

+#define SET_ADPT_FLAG(_flag) SET_FLAG(adpt, ADPT, _flag)

+#define CLI_ADPT_FLAG(_flag) CLI_FLAG(adpt, ADPT, _flag)

+

+/* default to trying for four seconds */

+#define ALX_TRY_LINK_TIMEOUT (4 * HZ)

+

+

+#define ALX_OPEN_CTRL_IRQ_EN 0x00000001

+#define ALX_OPEN_CTRL_RESET_MAC 0x00000002

+#define ALX_OPEN_CTRL_RESET_PHY 0x00000004

+#define ALX_OPEN_CTRL_RESET_ALL (\

+ ALX_OPEN_CTRL_RESET_MAC |\

+ ALX_OPEN_CTRL_RESET_PHY)

+

+/* needed by alx_ethtool.c */

+extern char alx_drv_name[];

+extern void alx_reinit_locked(struct alx_adapter *adpt);

+extern void alx_set_ethtool_ops(struct net_device *netdev);

+#ifdef ETHTOOL_OPS_COMPAT

+extern int ethtool_ioctl(struct ifreq *ifr);

+#endif

+

+/* needed by alx_dfs.c */

+int alx_validate_mac_addr(u8 *mac_addr);

+bool alx_set_tpdesc(struct alx_tx_queue *txque, union alx_sw_tpdesc *stpd);

+void alx_set_tpdesc_lastfrag(struct alx_tx_queue *txque);

+int alx_open_internal(struct alx_adapter *adpt, u32 ctrl);

+void alx_stop_internal(struct alx_adapter *adpt, u32 ctrl);

+void alx_disable_intr(struct alx_adapter *adpt);

+

+#endif /* _ALX_H_ */

diff --git a/drivers/net/ethernet/atheros/alx/alx_abs.c b/drivers/net/ethernet/atheros/alx/alx_abs.c

new file mode 100644

index 0000000..b039795

--- /dev/null

+++ b/drivers/net/ethernet/atheros/alx/alx_abs.c

@@ -0,0 +1,1055 @@

+/*

+ * Copyright (c) 2012 Qualcomm Atheros, Inc.

+ *

+ * Permission to use, copy, modify, and/or distribute this software for any

+ * purpose with or without fee is hereby granted, provided that the above

+ * copyright notice and this permission notice appear in all copies.

+ *

+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES

+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF

+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR

+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES

+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN

+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF

+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ */

+

+#include <linux/pci_regs.h>

+#include <linux/mii.h>

+#include <linux/netdevice.h>

+#include <linux/crc32.h>

+

+#include "alx_hw.h"

+

+#define ALF_REV_ID_AR8161_B0 0x10

+

+/* definition for MSIX */

+#define ALF_MSIX_ENTRY_BASE 0x2000

+#define ALF_MSIX_ENTRY_SIZE 16

+#define ALF_MSIX_MSG_LOADDR_OFF 0

+#define ALF_MSIX_MSG_HIADDR_OFF 4

+#define ALF_MSIX_MSG_DATA_OFF 8

+#define ALF_MSIX_MSG_CTRL_OFF 12

+

+#define ALF_MSIX_INDEX_RXQ0 0

+#define ALF_MSIX_INDEX_RXQ1 1

+#define ALF_MSIX_INDEX_RXQ2 2

+#define ALF_MSIX_INDEX_RXQ3 3

+#define ALF_MSIX_INDEX_RXQ4 4

+#define ALF_MSIX_INDEX_RXQ5 5

+#define ALF_MSIX_INDEX_RXQ6 6

+#define ALF_MSIX_INDEX_RXQ7 7

+#define ALF_MSIX_INDEX_TXQ0 8

+#define ALF_MSIX_INDEX_TXQ1 9

+#define ALF_MSIX_INDEX_TXQ2 10

+#define ALF_MSIX_INDEX_TXQ3 11

+#define ALF_MSIX_INDEX_TIMER 12

+#define ALF_MSIX_INDEX_ALERT 13

+#define ALF_MSIX_INDEX_SMB 14

+#define ALF_MSIX_INDEX_PHY 15

+

+

+#define ALF_SRAM_BASE L1F_SRAM0

+#define ALF_SRAM(_i, _type) \

+ (ALF_SRAM_BASE + ((_i) * sizeof(_type)))

+

+#define ALF_MIB_BASE L1F_MIB_BASE

+#define ALF_MIB(_i, _type) \

+ (ALF_MIB_BASE + ((_i) * sizeof(_type)))

+

+/* definition for RSS */

+#define ALF_RSS_KEY_BASE L1F_RSS_KEY0

+#define ALF_RSS_IDT_BASE L1F_RSS_IDT_TBL0

+#define ALF_RSS_KEY(_i, _type) \

+ (ALF_RSS_KEY_BASE + ((_i) * sizeof(_type)))

+#define ALF_RSS_TBL(_i, _type) \

+ (L1F_RSS_IDT_TBL0 + ((_i) * sizeof(_type)))

+

+

+/* NIC */

+int alf_identify_nic(struct alx_hw *hw)

+{

+ u32 drv;

+

+ if (hw->pci_revid < ALX_REV_ID_AR8161_V2_0)

+ return 0;

+

+ /* check from V2_0(b0) to ... */

+ switch (hw->pci_revid) {

+ default:

+ alx_mem_r32(hw, L1F_DRV, &drv);

+ if (drv & LX_DRV_DISABLE)

+ return -EINVAL;

+ break;

+ }

+ return 0;

+}

+

+

+/* PHY */

+int alf_read_phy_reg(struct alx_hw *hw, u16 reg_addr, u16 *phy_data)

+{

+ unsigned long flags;

+ int retval = 0;

+

+ spin_lock_irqsave(&hw->mdio_lock, flags);

+

+ retval = l1f_read_phy(hw, false, ALX_MDIO_DEV_TYPE_NORM, false,

+ reg_addr, phy_data);

+ if (retval)

+ alx_hw_err(hw, "error:%u when read phy reg

", retval);

+

+ spin_unlock_irqrestore(&hw->mdio_lock, flags);

+ return retval;

+}

+

+

+int alf_write_phy_reg(struct alx_hw *hw, u16 reg_addr, u16 phy_data)

+{

+ unsigned long flags;

+ int retval = 0;

+

+ spin_lock_irqsave(&hw->mdio_lock, flags);

+

+ retval = l1f_write_phy(hw, false, ALX_MDIO_DEV_TYPE_NORM, false,

+ reg_addr, phy_data);

+ if (retval)

+ alx_hw_err(hw, "error;%u, when write phy reg

", retval);

+

+ spin_unlock_irqrestore(&hw->mdio_lock, flags);

+ return retval;

+}

+

+#ifdef CONFIG_ALX_DEBUGFS

+

+int alf_read_ext_phy_reg(struct alx_hw *hw, u8 type, u16 reg_addr,

+ u16 *phy_data)

+{

+ unsigned long flags;

+ int retval = 0;

+

+ spin_lock_irqsave(&hw->mdio_lock, flags);

+

+ retval = l1f_read_phy(hw, true, type, false, reg_addr, phy_data);

+ if (retval)

+ alx_hw_err(hw, "error:%u, when read ext phy reg

", retval);

+

+ spin_unlock_irqrestore(&hw->mdio_lock, flags);

+ return retval;

+}

+

+

+int alf_write_ext_phy_reg(struct alx_hw *hw, u8 type, u16 reg_addr,

+ u16 phy_data)

+{

+ unsigned long flags;

+ int retval = 0;

+

+ spin_lock_irqsave(&hw->mdio_lock, flags);

+

+ retval = l1f_write_phy(hw, true, type, false, reg_addr, phy_data);

+ if (retval)

+ alx_hw_err(hw, "error:%u, when write ext phy reg

", retval);

+

+

+ spin_unlock_irqrestore(&hw->mdio_lock, flags);

+ return retval;

+}

+

+#endif

+

+int alf_init_phy(struct alx_hw *hw)

+{

+ u16 phy_id[2];

+ int retval;

+

+ spin_lock_init(&hw->mdio_lock);

+

+ retval = alf_read_phy_reg(hw, MII_PHYSID1, &phy_id[0]);

+ if (retval)

+ return retval;

+ retval = alf_read_phy_reg(hw, MII_PHYSID2, &phy_id[1]);

+ if (retval)

+ return retval;

+ memcpy(&hw->phy_id, phy_id, sizeof(hw->phy_id));

+

+ hw->autoneg_advertised = LX_LC_ALL;

+

+ return retval;

+}

+

+

+int alf_reset_phy(struct alx_hw *hw)

+{

+ int retval = 0;

+

+ CLI_HW_FLAG(PWSAVE_EN);

+ CLI_HW_FLAG(AZ_EN);

+ CLI_HW_FLAG(PTP_EN);

+

+ if (CHK_HW_FLAG(PWSAVE_CAP))

+ SET_HW_FLAG(PWSAVE_EN);

+

+ if (CHK_HW_FLAG(AZ_CAP))

+ SET_HW_FLAG(AZ_EN);

+

+ if (CHK_HW_FLAG(PTP_CAP))

+ SET_HW_FLAG(PTP_EN);

+

+ alx_hw_info(hw, "reset PHY, pws = %d, az = %d, ptp = %d

",

+ CHK_HW_FLAG(PWSAVE_EN), CHK_HW_FLAG(AZ_EN),

+ CHK_HW_FLAG(PTP_EN));

+ retval = l1f_reset_phy(hw, CHK_HW_FLAG(PWSAVE_EN), CHK_HW_FLAG(AZ_EN),

+ CHK_HW_FLAG(PTP_EN));

+ if (retval)

+ alx_hw_err(hw, "error when reset phy

");

+

+ return retval;

+}

+

+

+/* LINK */

+int alf_setup_phy_link(struct alx_hw *hw, u8 speed, bool autoneg,

+ bool fc)

+{

+ int retval = 0;

+

+ if (!CHK_HW_FLAG(GIGA_CAP))

+ speed &= ~(LX_LC_1000F | LX_LC_1000H);

+

+ alx_hw_info(hw, "speed = 0x%x, autoneg = %d

", speed, autoneg);

+ if (l1f_init_phy_spdfc(hw, autoneg, speed, fc)) {

+ alx_hw_err(hw, "error when init phy speed and fc

");

+ retval = -EINVAL;

+ }

+

+ return retval;

+}

+

+

+int alf_check_phy_link(struct alx_hw *hw, u8 *speed, bool *link_up)

+{

+ u16 bmsr, giga;

+ int retval;

+

+ alf_read_phy_reg(hw, MII_BMSR, &bmsr);

+ retval = alf_read_phy_reg(hw, MII_BMSR, &bmsr);

+ if (retval)

+ return retval;

+

+ if (!(bmsr & BMSR_LSTATUS)) {

+ *link_up = false;

+ *speed = 0;

+ return retval;

+ }

+ *link_up = true;

+

+ /* Read PHY Specific Status Register (17) */

+ retval = alf_read_phy_reg(hw, L1F_MII_GIGA_PSSR, &giga);

+ if (retval)

+ return retval;

+

+

+ if (!(giga & L1F_GIGA_PSSR_SPD_DPLX_RESOLVED)) {

+ alx_hw_err(hw, "error for speed duplex resolved

");

+ return -EINVAL;

+ }

+

+ switch (giga & L1F_GIGA_PSSR_SPEED) {

+ case L1F_GIGA_PSSR_1000MBS:

+ if (giga & L1F_GIGA_PSSR_DPLX)

+ *speed = LX_LC_1000F;

+ else

+ alx_hw_err(hw, "1000M half is invalid

");

+ break;

+ case L1F_GIGA_PSSR_100MBS:

+ if (giga & L1F_GIGA_PSSR_DPLX)

+ *speed = LX_LC_100F;

+ else

+ *speed = LX_LC_100H;

+ break;

+ case L1F_GIGA_PSSR_10MBS:

+ if (giga & L1F_GIGA_PSSR_DPLX)

+ *speed = LX_LC_10F;

+ else

+ *speed = LX_LC_10H;

+ break;

+ default:

+ *speed = 0;

+ retval = -EINVAL;

+ break;

+ }

+ return retval;

+}

+

+int alf_post_phy_link(struct alx_hw *hw, bool az_en, bool link_up, u8 speed)

+{

+ return l1f_post_phy_link(hw, az_en, link_up, speed);

+}

+

+/*

+ * 1. stop_mac

+ * 2. reset mac & dma by reg1400(MASTER)

+ * 3. control speed/duplex, hash-alg

+ * 4. clock switch setting

+ */

+int alf_reset_mac(struct alx_hw *hw)

+{

+ int retval = 0;

+

+ retval = l1f_reset_mac(hw);

+ if (retval)

+ alx_hw_err(hw, "error(%d) when reset mac

", retval);

+

+ return retval;

+}

+

+

+int alf_start_mac(struct alx_hw *hw)

+{

+ u16 en_ctrl = 0;

+ int retval = 0;

+

+ /* set link speed param */

+ switch (hw->link_speed) {

+ case LX_LC_1000F:

+ en_ctrl |= LX_MACSPEED_1000;

+ /* fall through */

+ case LX_LC_100F:

+ case LX_LC_10F:

+ en_ctrl |= LX_MACDUPLEX_FULL;

+ break;

+ }

+

+ /* set fc param*/

+ switch (hw->cur_fc_mode) {

+ case alx_fc_full:

+ en_ctrl |= LX_FC_RXEN; /* Flow Control RX Enable */

+ en_ctrl |= LX_FC_TXEN; /* Flow Control TX Enable */

+ break;

+ case alx_fc_rx_pause:

+ en_ctrl |= LX_FC_RXEN; /* Flow Control RX Enable */

+ break;

+ case alx_fc_tx_pause:

+ en_ctrl |= LX_FC_TXEN; /* Flow Control TX Enable */

+ break;

+ default:

+ break;

+ }

+

+ if (hw->fc_single_pause)

+ en_ctrl |= LX_SINGLE_PAUSE;

+

+ en_ctrl |= LX_FLT_DIRECT; /* RX Enable; and TX Always Enable */

+ en_ctrl |= LX_ADD_FCS;

+

+ en_ctrl |= hw->flags & ALX_HW_FLAG_LX_MASK;

+

+ if (l1f_enable_mac(hw, true, en_ctrl)) {

+ alx_hw_err(hw, "error when start mac

");

+ retval = -EINVAL;

+ }

+ return retval;

+}

+

+

+/*

+ * 1. stop RXQ (reg15A0) and TXQ (reg1590)

+ * 2. stop MAC (reg1480)

+ */

+int alf_stop_mac(struct alx_hw *hw)

+{

+ if (l1f_enable_mac(hw, false, 0)) {

+ alx_hw_err(hw, "error when stop mac

");

+ return -EINVAL;

+ }

+ return 0;

+}

+

+

+int alf_init_mac(struct alx_hw *hw, u16 rxbuf_sz, u16 rx_qnum,

+ u16 rxring_sz, u16 tx_qnum, u16 txring_sz)

+{

+ int retval = 0;

+

+ l1f_init_mac_misc(hw, hw->mac_addr, hw->smb_timer, hw->imt_mod, true);

+

+ retval = l1f_init_mac_rtx_ring_desc(hw, hw->dma.rfdmem_hi[0],

+ hw->dma.rfdmem_lo[0], hw->dma.rrdmem_lo[0],

+ rxring_sz, rxbuf_sz,

+ hw->dma.tpdmem_hi[0], hw->dma.tpdmem_lo,

+ tx_qnum, txring_sz);

+ if (retval) {

+ alx_hw_err(hw, "error(%d) when init mac

", retval);

+ return retval;

+ }

+

+ /* the mtu equals hw->mtu + length of ether header */

+ l1f_init_mac_rtx_queue(hw, hw->mtu + ALX_ETH_LENGTH_OF_HEADER);

+

+ l1f_init_mac_dma(hw);

+

+ return retval;

+}

+

+

+int alf_reset_pcie(struct alx_hw *hw, bool l0s_en, bool l1_en)

+{

+ int retval = 0;

+

+ if (!CHK_HW_FLAG(L0S_CAP))

+ l0s_en = false;

+

+ if (l0s_en)

+ SET_HW_FLAG(L0S_EN);

+ else

+ CLI_HW_FLAG(L0S_EN);

+

+

+ if (!CHK_HW_FLAG(L1_CAP))

+ l1_en = false;

+

+ if (l1_en)

+ SET_HW_FLAG(L1_EN);

+ else

+ CLI_HW_FLAG(L1_EN);

+

+ if (l1f_reset_pcie(hw, l0s_en, l1_en)) {

+ alx_hw_err(hw, "error when reset pcie

");

+ retval = -EINVAL;

+ }

+ return retval;

+}

+

+

+int alf_config_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)

+{

+ int retval = 0;

+

+ if (!CHK_HW_FLAG(L0S_CAP))

+ l0s_en = false;

+

+ if (l0s_en)

+ SET_HW_FLAG(L0S_EN);

+ else

+ CLI_HW_FLAG(L0S_EN);

+

+ if (!CHK_HW_FLAG(L1_CAP))

+ l1_en = false;

+

+ if (l1_en)

+ SET_HW_FLAG(L1_EN);

+ else

+ CLI_HW_FLAG(L1_EN);

+

+ if (l1f_enable_aspm(hw, l0s_en, l1_en, 0)) {

+ alx_hw_err(hw, "error when enable aspm

");

+ retval = -EINVAL;

+ }

+ return retval;

+}

+

+

+int alf_config_wol(struct alx_hw *hw, u32 wufc)

+{

+ u32 wol;

+ int retval = 0;

+

+ wol = 0;

+ /* turn on magic packet event */

+ if (wufc & ALX_WOL_MAGIC) {

+ wol |= L1F_WOL0_MAGIC_EN | L1F_WOL0_PME_MAGIC_EN;

+ /* magic packet maybe Broadcast&multicast&Unicast frame */

+ /* mac |= MAC_CTRL_BC_EN; */

+ }

+

+ /* turn on link up event */

+ if (wufc & ALX_WOL_PHY) {

+ wol |= L1F_WOL0_LINK_EN | L1F_WOL0_PME_LINK;

+ /* only link up can wake up */

+ retval = alf_write_phy_reg(hw, L1F_MII_IER, L1F_IER_LINK_UP);

+ }

+ alx_mem_w32(hw, L1F_WOL0, wol);

+ return retval;

+}

+

+void alf_update_mac_filter(struct alx_hw *hw)

+{

+ u32 mac;

+ u32 flg_hw_map[] = {

+ ALX_HW_FLAG_BROADCAST_EN, L1F_MAC_CTRL_BRD_EN,

+ ALX_HW_FLAG_VLANSTRIP_EN, L1F_MAC_CTRL_VLANSTRIP,

+ ALX_HW_FLAG_PROMISC_EN, L1F_MAC_CTRL_PROMISC_EN,

+ ALX_HW_FLAG_MULTIALL_EN, L1F_MAC_CTRL_MULTIALL_EN,

+ ALX_HW_FLAG_LOOPBACK_EN, L1F_MAC_CTRL_LPBACK_EN

+ };

+ int i;

+

+

+ alx_mem_r32(hw, L1F_MAC_CTRL, &mac);

+

+ for (i = 0; i < ARRAY_SIZE(flg_hw_map); i += 2) {

+ if (hw->flags & flg_hw_map[i])

+ mac |= flg_hw_map[i + 1];

+ else

+ mac &= ~flg_hw_map[i + 1];

+ }

+

+ alx_mem_w32(hw, L1F_MAC_CTRL, mac);

+}

+

+

+int alf_config_pow_save(struct alx_hw *hw, u8 speed, bool wol_en,

+ bool tx_en, bool rx_en, bool pws_en)

+{

+ int retval = 0;

+

+ if (l1f_powersaving(hw, speed, wol_en, tx_en, rx_en, pws_en)) {

+ alx_hw_err(hw, "error when set power saving

");

+ retval = -EINVAL;

+ }

+ return retval;

+}

+

+

+/* RAR, Multicast, VLAN */

+void alf_set_mac_addr(struct alx_hw *hw, u8 *addr)

+{

+ u32 sta;

+

+ /*

+ * for example: 00-0B-6A-F6-00-DC

+ * 0<-->6AF600DC, 1<-->000B.

+ */

+

+ /* low dword */

+ sta = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];

+ alx_mem_w32(hw, L1F_STAD0, sta);

+

+ /* hight dword */

+ sta = addr[0] << 8 | addr[1];

+ alx_mem_w32(hw, L1F_STAD1, sta);

+}

+

+

+int alf_get_mac_addr(struct alx_hw *hw, u8 *addr)

+{

+ int retval = 0;

+

+ if (l1f_get_perm_macaddr(hw, addr)) {

+ alx_hw_err(hw, "error when get permanent mac address

");

+ retval = -EINVAL;

+ }

+ return retval;

+}

+

+

+int alf_set_mc_addr(struct alx_hw *hw, u8 *addr)

+{

+ u32 crc32, bit, reg, mta;

+

+ /*

+ * set hash value for a multicast address hash calcu processing.

+ * 1. calcu 32bit CRC for multicast address

+ * 2. reverse crc with MSB to LSB

+ */

+ crc32 = ALX_ETH_CRC(addr, ALX_ETH_LENGTH_OF_ADDRESS);

+

+ /*

+ * The HASH Table is a register array of 2 32-bit registers.

+ * It is treated like an array of 64 bits. We want to set

+ * bit BitArray[hash_value]. So we figure out what register

+ * the bit is in, read it, OR in the new bit, then write

+ * back the new value. The register is determined by the

+ * upper 7 bits of the hash value and the bit within that

+ * register are determined by the lower 5 bits of the value.

+ */

+ reg = (crc32 >> 31) & 0x1;

+ bit = (crc32 >> 26) & 0x1F;

+

+ alx_mem_r32(hw, L1F_HASH_TBL0 + (reg<<2), &mta);

+ mta |= (0x1 << bit);

+ alx_mem_w32(hw, L1F_HASH_TBL0 + (reg<<2), mta);

+ return 0;

+}

+

+

+int alf_clear_mc_addr(struct alx_hw *hw)

+{

+ alx_mem_w32(hw, L1F_HASH_TBL0, 0);

+ alx_mem_w32(hw, L1F_HASH_TBL1, 0);

+ return 0;

+}

+

+

+/* RTX, IRQ */

+void alf_config_tx(struct alx_hw *hw)

+{

+ u32 wrr;

+

+ alx_mem_r32(hw, L1F_WRR, &wrr);

+ switch (hw->wrr_mode) {

+ case alx_wrr_mode_none:

+ FIELD_SETL(wrr, L1F_WRR_PRI, L1F_WRR_PRI_RESTRICT_NONE);

+ break;

+ case alx_wrr_mode_high:

+ FIELD_SETL(wrr, L1F_WRR_PRI, L1F_WRR_PRI_RESTRICT_HI);

+ break;

+ case alx_wrr_mode_high2:

+ FIELD_SETL(wrr, L1F_WRR_PRI, L1F_WRR_PRI_RESTRICT_HI2);

+ break;

+ case alx_wrr_mode_all:

+ FIELD_SETL(wrr, L1F_WRR_PRI, L1F_WRR_PRI_RESTRICT_ALL);

+ break;

+ }

+ FIELD_SETL(wrr, L1F_WRR_PRI0, hw->wrr_prio0);

+ FIELD_SETL(wrr, L1F_WRR_PRI1, hw->wrr_prio1);

+ FIELD_SETL(wrr, L1F_WRR_PRI2, hw->wrr_prio2);

+ FIELD_SETL(wrr, L1F_WRR_PRI3, hw->wrr_prio3);

+ alx_mem_w32(hw, L1F_WRR, wrr);

+}

+

+

+int alf_config_msix(struct alx_hw *hw, u16 num_intrs, bool msix_en, bool msi_en)

+{

+ u32 map[2];

+ u32 type;

+ int msix_idx;

+

+ if (!msix_en)

+ goto configure_legacy;

+

+ memset(map, 0, sizeof(map));

+ for (msix_idx = 0; msix_idx < num_intrs; msix_idx++) {

+ switch (msix_idx) {

+ case ALF_MSIX_INDEX_RXQ0:

+ FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_RXQ0,

+ ALF_MSIX_INDEX_RXQ0);

+ break;

+ case ALF_MSIX_INDEX_RXQ1:

+ FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_RXQ1,

+ ALF_MSIX_INDEX_RXQ1);

+ break;

+ case ALF_MSIX_INDEX_RXQ2:

+ FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_RXQ2,

+ ALF_MSIX_INDEX_RXQ2);

+ break;

+ case ALF_MSIX_INDEX_RXQ3:

+ FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_RXQ3,

+ ALF_MSIX_INDEX_RXQ3);

+ break;

+ case ALF_MSIX_INDEX_RXQ4:

+ FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_RXQ4,

+ ALF_MSIX_INDEX_RXQ4);

+ break;

+ case ALF_MSIX_INDEX_RXQ5:

+ FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_RXQ5,

+ ALF_MSIX_INDEX_RXQ5);

+ break;

+ case ALF_MSIX_INDEX_RXQ6:

+ FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_RXQ6,

+ ALF_MSIX_INDEX_RXQ6);

+ break;

+ case ALF_MSIX_INDEX_RXQ7:

+ FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_RXQ7,

+ ALF_MSIX_INDEX_RXQ7);

+ break;

+ case ALF_MSIX_INDEX_TXQ0:

+ FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_TXQ0,

+ ALF_MSIX_INDEX_TXQ0);

+ break;

+ case ALF_MSIX_INDEX_TXQ1:

+ FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_TXQ1,

+ ALF_MSIX_INDEX_TXQ1);

+ break;

+ case ALF_MSIX_INDEX_TXQ2:

+ FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_TXQ2,

+ ALF_MSIX_INDEX_TXQ2);

+ break;

+ case ALF_MSIX_INDEX_TXQ3:

+ FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_TXQ3,

+ ALF_MSIX_INDEX_TXQ3);

+ break;

+ case ALF_MSIX_INDEX_TIMER:

+ FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_TIMER,

+ ALF_MSIX_INDEX_TIMER);

+ break;

+ case ALF_MSIX_INDEX_ALERT:

+ FIELD_SETL(map[0], L1F_MSI_MAP_TBL1_ALERT,

+ ALF_MSIX_INDEX_ALERT);

+ break;

+ case ALF_MSIX_INDEX_SMB:

+ FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_SMB,

+ ALF_MSIX_INDEX_SMB);

+ break;

+ case ALF_MSIX_INDEX_PHY:

+ FIELD_SETL(map[1], L1F_MSI_MAP_TBL2_PHY,

+ ALF_MSIX_INDEX_PHY);

+ break;

+ default:

+ break;

+

+ }

+

+ }

+

+ alx_mem_w32(hw, L1F_MSI_MAP_TBL1, map[0]);

+ alx_mem_w32(hw, L1F_MSI_MAP_TBL2, map[1]);

+

+ /* 0 to alert, 1 to timer */

+ type = (L1F_MSI_ID_MAP_DMAW |

+ L1F_MSI_ID_MAP_DMAR |

+ L1F_MSI_ID_MAP_PCIELNKDW |

+ L1F_MSI_ID_MAP_PCIECERR |

+ L1F_MSI_ID_MAP_PCIENFERR |

+ L1F_MSI_ID_MAP_PCIEFERR |

+ L1F_MSI_ID_MAP_PCIEUR);

+

+ alx_mem_w32(hw, L1F_MSI_ID_MAP, type);

+ return 0;

+

+configure_legacy:

+ alx_mem_w32(hw, L1F_MSI_MAP_TBL1, 0x0);

+ alx_mem_w32(hw, L1F_MSI_MAP_TBL2, 0x0);

+ alx_mem_w32(hw, L1F_MSI_ID_MAP, 0x0);

+ if (msi_en) {

+ u32 msi;

+ alx_mem_r32(hw, 0x1920, &msi);

+ msi |= 0x10000;

+ alx_mem_w32(hw, 0x1920, msi);

+ }

+ return 0;

+}

+

+

+/*

+ * Interrupt

+ */

+int alf_ack_phy_intr(struct alx_hw *hw)

+{

+ u16 isr;

+ return alf_read_phy_reg(hw, L1F_MII_ISR, &isr);

+}

+

+

+int alf_enable_legacy_intr(struct alx_hw *hw)

+{

+ u16 cmd;

+

+ alx_cfg_r16(hw, PCI_COMMAND, &cmd);

+ cmd &= ~PCI_COMMAND_INTX_DISABLE;

+ alx_cfg_w16(hw, PCI_COMMAND, cmd);

+

+ alx_mem_w32(hw, L1F_ISR, ~((u32) L1F_ISR_DIS));

+ alx_mem_w32(hw, L1F_IMR, hw->intr_mask);

+ return 0;

+}

+

+

+int alf_disable_legacy_intr(struct alx_hw *hw)

+{

+ alx_mem_w32(hw, L1F_ISR, L1F_ISR_DIS);

+ alx_mem_w32(hw, L1F_IMR, 0);

+ alx_mem_flush(hw);

+ return 0;

+}

+

+

+int alf_enable_msix_intr(struct alx_hw *hw, u8 entry_idx)

+{

+ u32 ctrl_reg;

+

+ ctrl_reg = ALF_MSIX_ENTRY_BASE + (entry_idx * ALF_MSIX_ENTRY_SIZE) +

+ ALF_MSIX_MSG_CTRL_OFF;

+

+ alx_mem_w32(hw, ctrl_reg, 0x0);

+ alx_mem_flush(hw);

+ return 0;

+}

+

+

+int alf_disable_msix_intr(struct alx_hw *hw, u8 entry_idx)

+{

+ u32 ctrl_reg;

+

+ ctrl_reg = ALF_MSIX_ENTRY_BASE + (entry_idx * ALF_MSIX_ENTRY_SIZE) +

+ ALF_MSIX_MSG_CTRL_OFF;

+

+ alx_mem_w32(hw, ctrl_reg, 0x1);

+ alx_mem_flush(hw);

+ return 0;

+}

+

+

+/* RSS */

+int alf_config_rss(struct alx_hw *hw, bool rss_en)

+{

+ int key_len_by_u8 = sizeof(hw->rss_key);

+ int idt_len_by_u32 = sizeof(hw->rss_idt) / sizeof(u32);

+ u32 rxq0;

+ int i;

+

+ /* Fill out hash function keys */

+ for (i = 0; i < key_len_by_u8; i++) {

+ alx_mem_w8(hw, ALF_RSS_KEY(i, u8),

+ hw->rss_key[key_len_by_u8 - i - 1]);

+ }

+

+ /* Fill out redirection table */

+ for (i = 0; i < idt_len_by_u32; i++)

+ alx_mem_w32(hw, ALF_RSS_TBL(i, u32), hw->rss_idt[i]);

+

+ alx_mem_w32(hw, L1F_RSS_BASE_CPU_NUM, hw->rss_base_cpu);

+

+ alx_mem_r32(hw, L1F_RXQ0, &rxq0);

+ if (hw->rss_hstype & ALX_RSS_HSTYP_IPV4_EN)

+ rxq0 |= L1F_RXQ0_RSS_HSTYP_IPV4_EN;

+ else

+ rxq0 &= ~L1F_RXQ0_RSS_HSTYP_IPV4_EN;

+

+ if (hw->rss_hstype & ALX_RSS_HSTYP_TCP4_EN)

+ rxq0 |= L1F_RXQ0_RSS_HSTYP_IPV4_TCP_EN;

+ else

+ rxq0 &= ~L1F_RXQ0_RSS_HSTYP_IPV4_TCP_EN;

+

+ if (hw->rss_hstype & ALX_RSS_HSTYP_IPV6_EN)

+ rxq0 |= L1F_RXQ0_RSS_HSTYP_IPV6_EN;

+ else

+ rxq0 &= ~L1F_RXQ0_RSS_HSTYP_IPV6_EN;

+

+ if (hw->rss_hstype & ALX_RSS_HSTYP_TCP6_EN)

+ rxq0 |= L1F_RXQ0_RSS_HSTYP_IPV6_TCP_EN;

+ else

+ rxq0 &= ~L1F_RXQ0_RSS_HSTYP_IPV6_TCP_EN;

+

+ FIELD_SETL(rxq0, L1F_RXQ0_RSS_MODE, hw->rss_mode);

+ FIELD_SETL(rxq0, L1F_RXQ0_IDT_TBL_SIZE, hw->rss_idt_size);

+

+ if (rss_en)

+ rxq0 |= L1F_RXQ0_RSS_HASH_EN;

+ else

+ rxq0 &= ~L1F_RXQ0_RSS_HASH_EN;

+

+ alx_mem_w32(hw, L1F_RXQ0, rxq0);

+ return 0;

+}

+

+

+/* fc */

+static int alf_get_fc_mode(struct alx_hw *hw, enum alx_fc_mode *mode)

+{

+ u16 bmsr, giga;

+ int i;

+ int retval = 0;

+

+ for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {

+ alf_read_phy_reg(hw, MII_BMSR, &bmsr);

+ alf_read_phy_reg(hw, MII_BMSR, &bmsr);

+ if (bmsr & BMSR_LSTATUS) {

+ /* Read phy Specific Status Register (17) */

+ retval = alf_read_phy_reg(hw, L1F_MII_GIGA_PSSR, &giga);

+ if (retval)

+ return retval;

+

+ if (!(giga & L1F_GIGA_PSSR_SPD_DPLX_RESOLVED)) {

+ alx_hw_err(hw,

+ "error for speed duplex resolved

");

+ return -EINVAL;

+ }

+

+ if ((giga & L1F_GIGA_PSSR_FC_TXEN) &&

+ (giga & L1F_GIGA_PSSR_FC_RXEN)) {

+ *mode = alx_fc_full;

+ } else if (giga & L1F_GIGA_PSSR_FC_TXEN) {

+ *mode = alx_fc_tx_pause;

+ } else if (giga & L1F_GIGA_PSSR_FC_RXEN) {

+ *mode = alx_fc_rx_pause;

+ } else {

+ *mode = alx_fc_none;

+ }

+ break;

+ }

+ mdelay(100);

+ }

+

+ if (i == ALX_MAX_SETUP_LNK_CYCLE) {

+ alx_hw_err(hw, "error when get flow control mode

");

+ retval = -EINVAL;

+ }

+ return retval;

+}

+

+

+int alf_config_fc(struct alx_hw *hw)

+{

+ u32 mac;

+ int retval = 0;

+

+ if (hw->disable_fc_autoneg) {

+ hw->fc_was_autonegged = false;

+ hw->cur_fc_mode = hw->req_fc_mode;

+ } else {

+ hw->fc_was_autonegged = true;

+ retval = alf_get_fc_mode(hw, &hw->cur_fc_mode);

+ if (retval)

+ return retval;

+ }

+

+ alx_mem_r32(hw, L1F_MAC_CTRL, &mac);

+

+ switch (hw->cur_fc_mode) {

+ case alx_fc_none: /* 0 */

+ mac &= ~(L1F_MAC_CTRL_RXFC_EN | L1F_MAC_CTRL_TXFC_EN);

+ break;

+ case alx_fc_rx_pause: /* 1 */

+ mac &= ~L1F_MAC_CTRL_TXFC_EN;

+ mac |= L1F_MAC_CTRL_RXFC_EN;

+ break;

+ case alx_fc_tx_pause: /* 2 */

+ mac |= L1F_MAC_CTRL_TXFC_EN;

+ mac &= ~L1F_MAC_CTRL_RXFC_EN;

+ break;

+ case alx_fc_full: /* 3 */

+ case alx_fc_default: /* 4 */

+ mac |= (L1F_MAC_CTRL_TXFC_EN | L1F_MAC_CTRL_RXFC_EN);

+ break;

+ default:

+ alx_hw_err(hw, "flow control param set incorrectly

");

+ return -EINVAL;

+ break;

+ }

+

+ alx_mem_w32(hw, L1F_MAC_CTRL, mac);

+

+ return retval;

+}

+

+

+/* ethtool */

+void alf_get_ethtool_regs(struct alx_hw *hw, void *buff)

+{

+ int i;

+ u32 *val = buff;

+ static const u32 reg[] = {

+ /* 0 */

+ L1F_DEV_CAP, L1F_DEV_CTRL, L1F_LNK_CAP, L1F_LNK_CTRL,

+ L1F_UE_SVRT, L1F_EFLD, L1F_SLD, L1F_PPHY_MISC1,

+ L1F_PPHY_MISC2, L1F_PDLL_TRNS1,

+

+ /* 10 */

+ L1F_TLEXTN_STATS, L1F_EFUSE_CTRL, L1F_EFUSE_DATA, L1F_SPI_OP1,

+ L1F_SPI_OP2, L1F_SPI_OP3, L1F_EF_CTRL, L1F_EF_ADDR,

+ L1F_EF_DATA, L1F_SPI_ID,

+

+ /* 20 */

+ L1F_SPI_CFG_START, L1F_PMCTRL, L1F_LTSSM_CTRL, L1F_MASTER,

+ L1F_MANU_TIMER, L1F_IRQ_MODU_TIMER, L1F_PHY_CTRL, L1F_MAC_STS,

+ L1F_MDIO, L1F_MDIO_EXTN,

+

+ /* 30 */

+ L1F_PHY_STS, L1F_BIST0, L1F_BIST1, L1F_SERDES,

+ L1F_LED_CTRL, L1F_LED_PATN, L1F_LED_PATN2, L1F_SYSALV,

+ L1F_PCIERR_INST, L1F_LPI_DECISN_TIMER,

+

+ /* 40 */

+ L1F_LPI_CTRL, L1F_LPI_WAIT, L1F_HRTBT_VLAN, L1F_HRTBT_CTRL,

+ L1F_RXPARSE, L1F_MAC_CTRL, L1F_GAP, L1F_STAD1,

+ L1F_LED_CTRL, L1F_HASH_TBL0,

+

+ /* 50 */

+ L1F_HASH_TBL1, L1F_HALFD, L1F_DMA, L1F_WOL0,

+ L1F_WOL1, L1F_WOL2, L1F_WRR, L1F_HQTPD,

+ L1F_CPUMAP1, L1F_CPUMAP2,

+

+ /* 60 */

+ L1F_MISC, L1F_RX_BASE_ADDR_HI, L1F_RFD_ADDR_LO, L1F_RFD_RING_SZ,

+ L1F_RFD_BUF_SZ, L1F_RRD_ADDR_LO, L1F_RRD_RING_SZ,

+ L1F_RFD_PIDX, L1F_RFD_CIDX, L1F_RXQ0,

+

+ /* 70 */

+ L1F_RXQ1, L1F_RXQ2, L1F_RXQ3, L1F_TX_BASE_ADDR_HI,

+ L1F_TPD_PRI0_ADDR_LO, L1F_TPD_PRI1_ADDR_LO,

+ L1F_TPD_PRI2_ADDR_LO, L1F_TPD_PRI3_ADDR_LO,

+ L1F_TPD_PRI0_PIDX, L1F_TPD_PRI1_PIDX,

+

+ /* 80 */

+ L1F_TPD_PRI2_PIDX, L1F_TPD_PRI3_PIDX, L1F_TPD_PRI0_CIDX,

+ L1F_TPD_PRI1_CIDX, L1F_TPD_PRI2_CIDX, L1F_TPD_PRI3_CIDX,

+ L1F_TPD_RING_SZ, L1F_TXQ0, L1F_TXQ1, L1F_TXQ2,

+

+ /* 90 */

+ L1F_MSI_MAP_TBL1, L1F_MSI_MAP_TBL2, L1F_MSI_ID_MAP,

+ L1F_MSIX_MASK, L1F_MSIX_PENDING,

+ };

+

+ for (i = 0; i < ARRAY_SIZE(reg); i++)

+ alx_mem_r32(hw, reg[i], &val[i]);

+

+ /* SRAM */

+ for (i = 0; i < 16; i++)

+ alx_mem_r32(hw, ALF_SRAM(i, u32), &val[100 + i]);

+

+ /* RSS */

+ for (i = 0; i < 10; i++)

+ alx_mem_r32(hw, ALF_RSS_KEY(i, u32), &val[120 + i]);

+ for (i = 0; i < 32; i++)

+ alx_mem_r32(hw, ALF_RSS_TBL(i, u32), &val[130 + i]);

+ alx_mem_r32(hw, L1F_RSS_HASH_VAL, &val[162]);

+ alx_mem_r32(hw, L1F_RSS_HASH_FLAG, &val[163]);

+ alx_mem_r32(hw, L1F_RSS_BASE_CPU_NUM, &val[164]);

+

+ /* MIB */

+ for (i = 0; i < 48; i++)

+ alx_mem_r32(hw, ALF_MIB(i, u32), &val[170 + i]);

+}

+

+

+/******************************************************************************/

+static void alf_set_hw_capabilities(struct alx_hw *hw)

+{

+ SET_HW_FLAG(L0S_CAP);

+ SET_HW_FLAG(L1_CAP);

+

+ if (hw->mac_type == alx_mac_l1f)

+ SET_HW_FLAG(GIGA_CAP);

+

+ /* set flags of alx_phy_info */

+ SET_HW_FLAG(PWSAVE_CAP);

+}

+

+

+/* alc_set_hw_info */

+static void alf_set_hw_infos(struct alx_hw *hw)

+{

+ hw->rxstat_reg = L1F_MIB_RX_OK;

+ hw->rxstat_sz = 0x60;

+ hw->txstat_reg = L1F_MIB_TX_OK;

+ hw->txstat_sz = 0x68;

+

+ hw->rx_prod_reg[0] = L1F_RFD_PIDX;

+ hw->rx_cons_reg[0] = L1F_RFD_CIDX;

+

+ hw->tx_prod_reg[0] = L1F_TPD_PRI0_PIDX;

+ hw->tx_cons_reg[0] = L1F_TPD_PRI0_CIDX;

+ hw->tx_prod_reg[1] = L1F_TPD_PRI1_PIDX;

+ hw->tx_cons_reg[1] = L1F_TPD_PRI1_CIDX;

+ hw->tx_prod_reg[2] = L1F_TPD_PRI2_PIDX;

+ hw->tx_cons_reg[2] = L1F_TPD_PRI2_CIDX;

+ hw->tx_prod_reg[3] = L1F_TPD_PRI3_PIDX;

+ hw->tx_cons_reg[3] = L1F_TPD_PRI3_CIDX;

+

+ hw->hwreg_sz = 0x200;

+ hw->eeprom_sz = 0;

+}

+

+

+/*

+ * alf_init_hw

+ */

+void alf_init_hw(struct alx_hw *hw)

+{

+ alf_set_hw_capabilities(hw);

+ alf_set_hw_infos(hw);

+

+ alx_hw_info(hw, "HW Flags = 0x%x

", hw->flags);

+}

diff --git a/drivers/net/ethernet/atheros/alx/alx_cifs.c b/drivers/net/ethernet/atheros/alx/alx_cifs.c

new file mode 100644

index 0000000..864747d

--- /dev/null

+++ b/drivers/net/ethernet/atheros/alx/alx_cifs.c

@@ -0,0 +1,307 @@

+/*

+ * Copyright (c) 2012 Qualcomm Atheros, Inc.

+ *

+ * Permission to use, copy, modify, and/or distribute this software for any

+ * purpose with or without fee is hereby granted, provided that the above

+ * copyright notice and this permission notice appear in all copies.

+ *

+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES

+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF

+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR

+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES

+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN

+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF

+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ */

+

+#include "alx.h"

+#include "alx_hw.h"

+

+/*

+ * Desc: CIFS offload, config the ANNOUNCEMENT message to our adapter.

+ */

+static void alf_setup_cifs(struct alx_hw *hw)

+{

+ u32 ctrl;

+ u32 i;

+ struct alx_swoi *swoi = &hw->swoi_offload;

+

+ if (!swoi->len) {

+ alx_hw_err(hw, "CIFS offload disabled.

");

+ return;

+ }

+

+ /* Write ANNOUNCEMENT message to SRAM. */

+ for (i = 0; i < swoi->len; i += 4) {

+ alx_mem_w32(hw, hw->annce_addr_off + i,

+ *(u32 *) &swoi->msg[i]);

+ }

+

+ ctrl = 0;

+ FIELD_SETL(ctrl, L1F_HRTBT_CTRL_PKTLEN, swoi->len);

+ FIELD_SETL(ctrl, L1F_HRTBT_CTRL_HDRADDR,

+ ((hw->annce_addr_off - PACKET_MAP_ADDR) >> 3));

+ if (swoi->is_vlan)

+ ctrl |= L1F_HRTBT_CTRL_HASVLAN;

+

+ /* The registration packet send period.

+ * 1bit = 2s. nAnncePeriod = (pkt_send_period+1) *2

+ */

+ FIELD_SETL(ctrl, L1F_HRTBT_CTRL_PERIOD, swoi->period / 2 - 1);

+ ctrl |= L1F_HRTBT_CTRL_EN;

+ alx_mem_w32(hw, L1F_HRTBT_CTRL, ctrl);

+ alx_hw_info(hw,

+ "ANNOUNCEMENT message annce_base_addr[0x%08x], len%d, "

+ "%s vlan, Reg%04x-%08x

",

+ (hw->annce_addr_off - PACKET_MAP_ADDR) >> 3, swoi->len,

+ swoi->is_vlan ? "has" : "no", L1F_HRTBT_CTRL, ctrl);

+

+ hw->annce_addr_off =

+ (hw->annce_addr_off + swoi->len + 0x0F) & 0xFFFFFFF0;

+

+ /* Enable magic because cifs needs it. */

+ alx_mem_r32(hw, L1F_WOL0, &ctrl);

+ ctrl |= (L1F_WOL0_MAGIC_EN | L1F_WOL0_PME_MAGIC_EN);

+

+ alx_mem_w32(hw, L1F_WOL0, ctrl);

+}

+

+

+/*

+ * Desc: CIFS offload, config the ANNOUNCEMENT message to our adapter.

+ */

+static void alf_setup_swoi(struct alx_hw *hw)

+{

+ u32 ctrl;

+ u32 i;

+ struct alx_swoi *swoi = &hw->swoi_offload;

+

+ if (!swoi->len) {

+ alx_hw_err(hw, "SWOI offload disabled.

");

+ return;

+ }

+

+ /* Write ANNOUNCEMENT message to SRAM. */

+ for (i = 0; i < swoi->total_len; i += 4) {

+ alx_mem_w32(hw, hw->annce_addr_off + i,

+ *(u32 *) &swoi->msg[i]);

+ }

+

+ ctrl = 0;

+ FIELD_SETL(ctrl, L1F_HRTBT_CTRL_PKTLEN, swoi->len);

+ FIELD_SETL(ctrl, L1F_HRTBT_CTRL_HDRADDRB0,

+ ((hw->annce_addr_off - PACKET_MAP_ADDR) >> 4));

+ if (swoi->fraged)

+ ctrl |= L1F_HRTBT_CTRL_PKT_FRAG;

+

+ if (swoi->is_vlan)

+ ctrl |= L1F_HRTBT_CTRL_HASVLAN;

+

+ /* The registration packet send period.

+ * 1bit = 2s. nAnncePeriod = (pkt_send_period+1) *2

+ */

+ FIELD_SETL(ctrl, L1F_HRTBT_CTRL_PERIOD, swoi->period / 2 - 1);

+ ctrl |= L1F_HRTBT_CTRL_EN;

+ alx_mem_w32(hw, L1F_HRTBT_CTRL, ctrl);

+ alx_hw_info(hw,

+ "ANNOUNCEMENT message annce_base_addr[0x%08x], len%d, "

+ "%s frag, %s vlan, Reg%04x-%08x

",

+ (hw->annce_addr_off - PACKET_MAP_ADDR) >> 3, swoi->len,

+ swoi->fraged ? "has" : "no", swoi->is_vlan ? "has" : "no",

+ L1F_HRTBT_CTRL, ctrl);

+

+ ctrl = 0;

+ FIELD_SETL(ctrl, L1F_HRTBT_EXT_CTRL_FRAG_LEN, swoi->frag_len);

+ if (swoi->pkt_is_8023)

+ ctrl |= L1F_HRTBT_EXT_CTRL_IS_8023;

+ if (swoi->pkt_is_ipv6)

+ ctrl |= L1F_HRTBT_EXT_CTRL_IS_IPV6;

+

+ ctrl |= L1F_HRTBT_EXT_CTRL_WAKEUP_EN | (swoi->pkt_is_ipv6 ?

+ L1F_HRTBT_EXT_CTRL_NS_EN :

+ L1F_HRTBT_EXT_CTRL_ARP_EN);

+ alx_mem_w32(hw, L1F_HRTBT_EXT_CTRL, ctrl);

+ alx_mem_r32(hw, L1F_PMOFLD, &ctrl);

+ /* Config host mac address. */

+ alx_mem_r32(hw, L1F_STAD0, &i);

+ alx_mem_w32(hw, L1F_ARP_MAC0, i);

+ /* alx_mem_w32(hw, L1F_1ST_NS_MAC0, i); */

+ alx_mem_w32(hw, L1F_2ND_NS_MAC0, i);

+ alx_mem_r32(hw, L1F_STAD1, &i);

+ alx_mem_w32(hw, L1F_ARP_MAC1, i);

+ /* alx_mem_w32(hw, L1F_1ST_NS_MAC1, i); */

+ alx_mem_w32(hw, L1F_2ND_NS_MAC1, i);

+

+ if (swoi->pkt_is_ipv6) {

+ alx_mem_w32(hw, L1F_HRTBT_REM_IPV6_ADDR3,

+ swoi->svr_ipv6_addr[0]);

+ alx_mem_w32(hw, L1F_HRTBT_REM_IPV6_ADDR2,

+ swoi->svr_ipv6_addr[1]);

+ alx_mem_w32(hw, L1F_HRTBT_REM_IPV6_ADDR1,

+ swoi->svr_ipv6_addr[2]);

+ alx_mem_w32(hw, L1F_HRTBT_REM_IPV6_ADDR0,

+ swoi->svr_ipv6_addr[3]);

+ alx_mem_w32(hw, L1F_2ND_TAR_IPV6_1_3,

+ swoi->host_ipv6_addr[0]);

+ alx_mem_w32(hw, L1F_2ND_TAR_IPV6_1_2,

+ swoi->host_ipv6_addr[1]);

+ alx_mem_w32(hw, L1F_2ND_TAR_IPV6_1_1,

+ swoi->host_ipv6_addr[2]);

+ alx_mem_w32(hw, L1F_2ND_TAR_IPV6_1_0,

+ swoi->host_ipv6_addr[3]);

+ alx_mem_w32(hw, L1F_2ND_SN_IPV6_3, swoi->host_ipv6_addr[0]);

+ alx_mem_w32(hw, L1F_2ND_SN_IPV6_2, swoi->host_ipv6_addr[1]);

+ alx_mem_w32(hw, L1F_2ND_SN_IPV6_1, swoi->host_ipv6_addr[2]);

+ alx_mem_w32(hw, L1F_2ND_SN_IPV6_0, swoi->host_ipv6_addr[3]);

+ alx_mem_w32(hw, L1F_2ND_TAR_IPV6_2_0, 0);

+ alx_mem_w32(hw, L1F_2ND_TAR_IPV6_2_1, 0);

+ alx_mem_w32(hw, L1F_2ND_TAR_IPV6_2_2, 0);

+ alx_mem_w32(hw, L1F_2ND_TAR_IPV6_2_3, 0);

+ alx_mem_w32(hw, L1F_1ST_TAR_IPV6_1_0, 0);

+ alx_mem_w32(hw, L1F_1ST_TAR_IPV6_1_1, 0);

+ alx_mem_w32(hw, L1F_1ST_TAR_IPV6_1_2, 0);

+ alx_mem_w32(hw, L1F_1ST_TAR_IPV6_1_3, 0);

+ alx_mem_w32(hw, L1F_1ST_TAR_IPV6_2_0, 0);

+ alx_mem_w32(hw, L1F_1ST_TAR_IPV6_2_1, 0);

+ alx_mem_w32(hw, L1F_1ST_TAR_IPV6_2_2, 0);

+ alx_mem_w32(hw, L1F_1ST_TAR_IPV6_2_3, 0);

+ if (!(L1F_PMOFLD_BY_HW & ctrl)) {

+ /* no pm offload, we need config. */

+ alx_mem_r32(hw, L1F_MAC_CTRL, &ctrl);

+ alx_mem_w32(hw, L1F_MAC_CTRL,

+ ctrl | L1F_MAC_CTRL_MULTIALL_EN);

+ } else {

+ if (!(L1F_PMOFLD_MULTI_SOLD & ctrl)) {

+ /* MS PM offload */

+ alx_mem_w32(hw, L1F_ARP_REMOTE_IPV4, 0);

+ alx_mem_w32(hw, L1F_1ST_REMOTE_IPV6_0, 0);

+ alx_mem_w32(hw, L1F_1ST_REMOTE_IPV6_1, 0);

+ alx_mem_w32(hw, L1F_1ST_REMOTE_IPV6_2, 0);

+ alx_mem_w32(hw, L1F_1ST_REMOTE_IPV6_3, 0);

+ alx_mem_w32(hw, L1F_2ND_REMOTE_IPV6_0, 0);

+ alx_mem_w32(hw, L1F_2ND_REMOTE_IPV6_1, 0);

+ alx_mem_w32(hw, L1F_2ND_REMOTE_IPV6_2, 0);

+ alx_mem_w32(hw, L1F_2ND_REMOTE_IPV6_3, 0);

+ }

+ }

+ } else {

+ alx_mem_w32(hw, L1F_HRTBT_REM_IPV4_ADDR, swoi->svr_ipv4_addr);

+ alx_mem_w32(hw, L1F_HRTBT_HOST_IPV4_ADDR,

+ swoi->host_ipv4_addr);

+ alx_mem_w32(hw, L1F_ARP_HOST_IPV4, swoi->host_ipv4_addr);

+ }

+

+ ctrl = 0;

+ FIELD_SETL(ctrl, L1F_HRTBT_WAKEUP_PORT_SRC, swoi->svr_port);

+ FIELD_SETL(ctrl, L1F_HRTBT_WAKEUP_PORT_DEST, swoi->host_port);

+ alx_mem_w32(hw, L1F_HRTBT_WAKEUP_PORT, ctrl);

+ /* NIC will compare the received encrypted nonce

+ * with the following data:

+ */

+ alx_mem_w32(hw, L1F_HRTBT_WAKEUP_DATA7, swoi->wakeup_data[0]);

+ alx_mem_w32(hw, L1F_HRTBT_WAKEUP_DATA6, swoi->wakeup_data[1]);

+ alx_mem_w32(hw, L1F_HRTBT_WAKEUP_DATA5, swoi->wakeup_data[2]);

+ alx_mem_w32(hw, L1F_HRTBT_WAKEUP_DATA4, swoi->wakeup_data[3]);

+ alx_mem_w32(hw, L1F_HRTBT_WAKEUP_DATA3, swoi->wakeup_data[4]);

+ alx_mem_w32(hw, L1F_HRTBT_WAKEUP_DATA2, swoi->wakeup_data[5]);

+ alx_mem_w32(hw, L1F_HRTBT_WAKEUP_DATA1, swoi->wakeup_data[6]);

+ alx_mem_w32(hw, L1F_HRTBT_WAKEUP_DATA0, swoi->wakeup_data[7]);

+ hw->annce_addr_off =

+ (hw->annce_addr_off + swoi->total_len + 0x0F) & 0xFFFFFFF0;

+}

+

+

+/*

+ * Desc: CIFS offload, config the ANNOUNCEMENT message to our adapter.

+ */

+static void alf_setup_teredo(struct alx_hw *hw)

+{

+ u32 ctrl;

+ u32 i;

+ struct alx_teredo *trd = &hw->teredo_offload;

+

+ if (!trd->len) {

+ alx_hw_err(hw, "TEREDO offload disabled.

");

+ return;

+ }

+

+ /* Write TEREDO message to SRAM. */

+ for (i = 0; i < trd->len; i += 4) {

+ alx_mem_w32(hw, hw->annce_addr_off + i,

+ *(u32 *) &trd->msg[i]);

+ }

+

+ ctrl = 0;

+ ctrl |= (L1F_TRD_CTRL_EN | L1F_TRD_CTRL_BUBBLE_WAKE_EN);

+ FIELD_SETL(ctrl, L1F_TRD_CTRL_RSHDR_ADDR,

+ ((hw->annce_addr_off - PACKET_MAP_ADDR) >> 3));

+ FIELD_SETL(ctrl, L1F_TRD_CTRL_SINTV_MAX, trd->intv_max);

+ FIELD_SETL(ctrl, L1F_TRD_CTRL_SINTV_MIN, trd->intv_min);

+ alx_mem_w32(hw, L1F_TRD_CTRL, ctrl);

+ alx_hw_info(hw, "TEREDO message annce_base_addr[0x%08x], len%d

",

+ (hw->annce_addr_off - PACKET_MAP_ADDR) >> 3,

+ trd->len);

+

+ ctrl = 0;

+ FIELD_SETL(ctrl, L1F_TRD_RS_SZ, trd->len);

+ /* FIELD_SETL(ctrl, L1F_TRD_RS_NONCE_OFS, trd->nonce_off); */

+ FIELD_SETL(ctrl, L1F_TRD_RS_SEQ_OFS, trd->ipv4_id_off);

+ alx_mem_w32(hw, L1F_TRD_RS, ctrl);

+

+ alx_mem_w32(hw, L1F_TRD_SRV_IP4, trd->svr_ip4);

+ alx_mem_w32(hw, L1F_TRD_CLNT_EXTNL_IP4, trd->clt_external_ip4);

+ ctrl = 0;

+ FIELD_SETL(ctrl, L1F_TRD_PORT_CLNT_EXTNL, trd->clt_external_port);

+ FIELD_SETL(ctrl, L1F_TRD_PORT_SRV, trd->svr_port);

+ alx_mem_w32(hw, L1F_TRD_PORT, ctrl);

+ alx_mem_w32(hw, L1F_TRD_PREFIX, trd->prefix);

+ alx_mem_w32(hw, L1F_TRD_BUBBLE_DA_IP4, trd->clt_ip4);

+ alx_mem_w32(hw, L1F_TRD_BUBBLE_DA_PORT, trd->clt_port);

+

+ hw->annce_addr_off =

+ (hw->annce_addr_off + trd->len + 0x0F) & 0xFFFFFFF0;

+}

+

+

+int alx_setup_annce(struct alx_adapter *adpt, u32 wire_speed)

+{

+ struct alx_hw *hw = &adpt->hw;

+ u32 ctrl;

+ struct alx_swoi *swoi = &hw->swoi_offload;

+ struct alx_teredo *trd = &hw->teredo_offload;

+

+ hw->annce_addr_off = PACKET_MAP_ADDR + (CIFS_ANNCE_ADDR << 3);

+ if (!swoi->len && !trd->len) {

+ alx_hw_err(hw, "No cifs or teredo offload.

");

+ return -EINVAL;

+ }

+

+ if (swoi->ver == ALX_SWOI_VER_CIFS)

+ alf_setup_cifs(hw);

+ else if (swoi->ver == ALX_SWOI_VER_SWOI)

+ alf_setup_swoi(hw);

+ alf_setup_teredo(hw);

+

+ alx_mem_r32(hw, L1F_GAP, &ctrl);

+ FIELD_SETL(ctrl, L1F_GAP_IPGT, 0x60);

+ alx_mem_w32(hw, L1F_GAP, ctrl);

+

+ alx_mem_r32(hw, L1F_MAC_CTRL, &ctrl);

+ ctrl |= (L1F_MAC_CTRL_TX_EN | L1F_MAC_CTRL_PCRCE | L1F_MAC_CTRL_CRCE);

+ alx_mem_w32(hw, L1F_MAC_CTRL, ctrl);

+

+

+ /* The HW won't switch clock even if we slow it down

+ * when the wire speedis 1G. When the wire speed is 100M,

+ * we shouldn't slow it down or the HW can't send frames correctly.

+ * If the wire speed is 10M, we should slow it

+ * down to save power, and the HW does work, too.

+ */

+ if (wire_speed == SPEED_100) {

+ alx_mem_r32(hw, L1F_SERDES, &ctrl);

+ ctrl &= ~L1F_SERDES_MACCLK_SLWDWN;

+ alx_mem_w32(hw, L1F_SERDES, ctrl);

+ }

+

+ return 0;

+}

diff --git a/drivers/net/ethernet/atheros/alx/alx_cifs.h b/drivers/net/ethernet/atheros/alx/alx_cifs.h

new file mode 100644

index 0000000..c6eb7eb

--- /dev/null

+++ b/drivers/net/ethernet/atheros/alx/alx_cifs.h

@@ -0,0 +1,69 @@

+/*

+ * Copyright (c) 2012 Qualcomm Atheros, Inc.

+ *

+ * Permission to use, copy, modify, and/or distribute this software for any

+ * purpose with or without fee is hereby granted, provided that the above

+ * copyright notice and this permission notice appear in all copies.

+ *

+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES

+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF

+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR

+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES

+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN

+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF

+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ */

+

+#ifndef _ALX_CIFS_H_

+#define _ALX_CIFS_H_

+

+#define ALX_SWOI_VER_CIFS 0x01

+#define ALX_SWOI_VER_SWOI 0x02

+#define ALX_MAX_ANNCE_LEN 1522

+

+struct alx_teredo {

+ u32 ver:4;

+ u32 prefix_hw_compare:1;

+ u32 intv_max:8;

+ u32 intv_min:8;

+ u32 ipv4_id_off:8;

+ u32 s5_wakeup:1;

+ u32 svr_ip4; /* little endian order,

+ * compatible with our NIC. */

+ u16 svr_port; /* little endian order */

+ u32 clt_ip4; /* little endian order */

+ u16 clt_port; /* little endian order */

+ u32 clt_external_ip4; /* little endian order */

+ u16 clt_external_port; /* little endian order */

+ u32 prefix; /* little endian order */

+ u32 len;

+ char msg[ALX_MAX_ANNCE_LEN];

+};

+

+struct alx_swoi {

+ u32 ver:4;

+ u32 period:6;

+ u32 is_vlan:1;

+ u32 fraged:1;

+ u32 len:12;

+ u32 pkt_is_8023:1;

+ u32 pkt_is_ipv6:1;

+ u32 pkt_wakeup_en:1;

+ u32 frag_len:16;

+ u32 svr_ipv4_addr; /* The source ipv4 address

+ * for SWOI wakeup msg. */

+ u32 host_ipv4_addr; /* little endian order */

+ u32 svr_ipv6_addr[4]; /* little endian order */

+ u32 host_ipv6_addr[4]; /* little endian order */

+ u16 svr_port; /* SWOI wakeup packet UDP source port. */

+ u16 host_port; /* little endian order */

+ u32 wakeup_data[8]; /* little endian order */

+

+ u32 total_len; /* annce_len + padding, for SW use only */

+ char msg[ALX_MAX_ANNCE_LEN];

+};

+

+struct alx_adapter;

+int alx_setup_annce(struct alx_adapter *adpt, u32 wire_speed);

+

+#endif /*_ALX_CIFS_H_*/

diff --git a/drivers/net/ethernet/atheros/alx/alx_dfs.c b/drivers/net/ethernet/atheros/alx/alx_dfs.c

new file mode 100644

index 0000000..0fdeeac

--- /dev/null

+++ b/drivers/net/ethernet/atheros/alx/alx_dfs.c

@@ -0,0 +1,878 @@

+/*

+ * Copyright (c) 2012 Qualcomm Atheros, Inc.

+ *

+ * Permission to use, copy, modify, and/or distribute this software for any

+ * purpose with or without fee is hereby granted, provided that the above

+ * copyright notice and this permission notice appear in all copies.

+ *

+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES

+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF

+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR

+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES

+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN

+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF

+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ */

+

+#include <linux/ip.h>

+#include <net/ipv6.h>

+

+#include "alx.h"

+#include "alx_hw.h"

+

+

+static void alx_mem_r8(const struct alx_hw *hw, int reg, u8 *val)

+{

+ if (unlikely(!hw->link_up))

+ readl(hw->hw_addr + reg);

+ *val = readb(hw->hw_addr + reg);

+}

+

+

+static void alx_io_r32(const struct alx_hw *hw, int reg, u32 *pval)

+{

+ *pval = inl(hw->io_addr + reg);

+}

+

+

+static void alx_io_w32(const struct alx_hw *hw, int reg, u32 val)

+{

+ outl(val, hw->io_addr + reg);

+}

+

+

+int alx_flush_mac_address(struct alx_adapter *adpt)

+{

+ struct alx_hw *hw = &adpt->hw;

+ u8 sta_addr[ALX_ETH_LENGTH_OF_ADDRESS];

+ u8 *mac_addr = hw->mac_addr;

+ int retval = 0;

+

+ if (!adpt->dfs.cfg_new_mac)

+ return 0;

+ adpt->dfs.cfg_new_mac = false;

+

+ retval = alf_get_mac_addr(hw, sta_addr);

+ if (retval)

+ return retval;

+

+ retval = alx_validate_mac_addr(sta_addr);

+ if (retval)

+ return retval;

+

+ if (memcmp(sta_addr, mac_addr, ALX_ETH_LENGTH_OF_ADDRESS)) {

+ memcpy(hw->mac_perm_addr, sta_addr, ALX_ETH_LENGTH_OF_ADDRESS);

+ alf_set_mac_addr(hw, mac_addr);

+ }

+

+ return retval;

+}

+

+/*

+ * device sub-command

+ */

+static int alx_dfs_device_inactive(struct alx_adapter *adpt, bool new_mac)

+{

+ struct net_device *netdev = adpt->netdev;

+ struct alx_hw *hw = &adpt->hw;

+

+ adpt->dfs.cfg_new_mac = new_mac;

+

+ if (CHK_ADPT_FLAG(STATE_INACTIVE)) {

+ alx_err(adpt, "can't be inactive again

");

+ return -EINVAL;

+ }

+

+ SET_ADPT_FLAG(STATE_INACTIVE);

+

+ netif_tx_stop_all_queues(netdev);

+ /* call carrier off first to avoid false dev_watchdog timeouts */

+ netif_carrier_off(netdev);

+ netif_tx_disable(netdev);

+

+ alx_disable_intr(adpt);

+

+ alf_stop_mac(hw);

+ return 0;

+}

+

+

+static void alx_dfs_device_reinit(struct alx_adapter *adpt)

+{

+ if (CHK_ADPT_FLAG(STATE_DOWN) ||

+ CHK_ADPT_FLAG(STATE_RESETTING)) {

+ return;

+ }

+

+ while (CHK_ADPT_FLAG(STATE_RESETTING))

+ msleep(20);

+ SET_ADPT_FLAG(STATE_RESETTING);

+

+ alx_stop_internal(adpt, ALX_OPEN_CTRL_RESET_ALL);

+ alx_open_internal(adpt, ALX_OPEN_CTRL_RESET_ALL);

+

+ CLI_ADPT_FLAG(STATE_INACTIVE);

+ CLI_ADPT_FLAG(STATE_RESETTING);

+}

+

+

+/*

+ * Diagnostic Support

+ */

+static void alx_dfs_diag_config_packet(struct alx_adapter *adpt,

+ struct alx_diag_packet *pkt,

+ union alx_sw_rrdesc *srrd)

+{

+ u32 pid = srrd->genr.pid;

+

+ pkt->type = 0;

+ switch (pid) {

+ case 0: /* non-ip */

+ break;

+ case 1: /* ipv4(only) */

+ pkt->type |= ALX_DIAG_PKTYPE_IPV4;

+ break;

+ case 2: /* tcp/ipv6 */

+ pkt->type |= ALX_DIAG_PKTYPE_IPV6;

+ pkt->type |= ALX_DIAG_PKTYPE_TCP;

+ break;

+ case 3: /* tcp/ipv4 */

+ pkt->type |= ALX_DIAG_PKTYPE_IPV4;

+ pkt->type |= ALX_DIAG_PKTYPE_TCP;

+ break;

+ case 4: /* udp/ipv6 */

+ pkt->type |= ALX_DIAG_PKTYPE_IPV6;

+ pkt->type |= ALX_DIAG_PKTYPE_UDP;

+ break;

+ case 5: /* udp/ipv4 */

+ pkt->type |= ALX_DIAG_PKTYPE_IPV4;

+ pkt->type |= ALX_DIAG_PKTYPE_UDP;

+ break;

+ case 6: /* ipv6(only) */

+ pkt->type |= ALX_DIAG_PKTYPE_IPV6;

+ break;

+ case 7:

+ pkt->type |= ALX_DIAG_PKTYPE_LLDP;

+ break;

+ case 8:

+ pkt->type |= ALX_DIAG_PKTYPE_PTP;

+ break;

+ default: /* invalid protocol */

+ break;

+ }

+

+ if (srrd->genr.type)

+ pkt->type |= ALX_DIAG_PKTYPE_802_3;

+ else

+ pkt->type |= ALX_DIAG_PKTYPE_EII;

+

+ /* error */

+ if (srrd->genr.res) {

+ pkt->type |= ALX_DIAG_PKTYPE_RX_ERR;

+ if (srrd->genr.crc)

+ pkt->type |= ALX_DIAG_PKTYPE_CRC_ERR;

+ }

+

+ if (srrd->genr.icmp)

+ pkt->type |= ALX_DIAG_PKTYPE_INCOMPLETE_ERR;

+

+ if (srrd->genr.l4f)

+ pkt->type |= ALX_DIAG_PKTYPE_L4XSUM_ERR;

+

+ if (srrd->genr.ipf)

+ pkt->type |= ALX_DIAG_PKTYPE_IPXSUM_ERR;

+

+ if (srrd->genr.lene)

+ pkt->type |= ALX_DIAG_PKTYPE_802_3_LEN_ERR;

+

+ pkt->rss_hash = srrd->genr.hash;

+ pkt->rss_cpu_num = srrd->genr.rss_cpu;

+ pkt->xsum = srrd->genr.xsum;

+ pkt->length = srrd->genr.pkt_len - 4;

+

+ if (srrd->genr.vlan_flag)

+ ALX_TAG_TO_VLAN(srrd->genr.vlan_tag, pkt->vlanid);

+}

+

+

+void alx_dfs_diag_receive_skb(struct alx_adapter *adpt, struct sk_buff *skb,

+ union alx_sw_rrdesc *srrd)

+{

+ struct sk_buff_head *skb_list = &adpt->dfs.diag_skb_list;

+ struct alx_diag_packet *pkt;

+

+ if (skb_queue_len(skb_list) >= ALX_DIAG_MAX_RX_PACKETS) {

+ if (skb)

+ dev_kfree_skb_irq(skb);

+ return;

+ }

+

+ pkt = adpt->dfs.diag_pkt_info;

+ adpt->dfs.diag_pkt_info++;

+ memset(pkt, 0, sizeof(struct alx_diag_packet));

+ if (((u8 *)adpt->dfs.diag_pkt_info) >=

+ (adpt->dfs.diag_info_buf + adpt->dfs.diag_info_sz)) {

+ adpt->dfs.diag_pkt_info =

+ (struct alx_diag_packet *)adpt->dfs.diag_info_buf;

+ }

+ alx_dfs_diag_config_packet(adpt, pkt, srrd);

+

+ /* borrow skb->sk point store struct alx_diag_packet */

+ skb->sk = (struct sock *)pkt;

+

+ skb_queue_tail(skb_list, skb);

+}

+

+

+/*

+ * Calculate the transmit packet descript needed

+ */

+static bool alx_dfs_diag_check_num_tpdescs(struct alx_tx_queue *txque,

+ struct alx_diag_packet *pkt)

+{

+ u16 num_required = 1;

+ u16 num_available = 0;

+ u16 produce_idx = txque->tpq.produce_idx;

+ u16 consume_idx = txque->tpq.consume_idx;

+

+ if (pkt->type & ALX_DIAG_PKTYPE_TSOV2)

+ num_required += 1;

+

+ num_available = (u16)(consume_idx > produce_idx) ?

+ (consume_idx - produce_idx - 1) :

+ (txque->tpq.count + consume_idx - produce_idx - 1);

+

+ return num_required < num_available;

+}

+

+

+/*

+ * configure tpds according the diag packet

+ */

+static void alx_dfs_diag_config_tpd_tsov2(struct alx_tx_queue *txque,

+ struct alx_diag_packet *pkt,

+ union alx_sw_tpdesc *stpd)

+{

+ /* lso v2 need an extension TPD */

+ if (!(pkt->type & ALX_DIAG_PKTYPE_TSOV2))

+ return;

+

+ stpd->tso.lso = 1;

+ stpd->tso.lso_v2 = 1;

+ stpd->tso.pkt_len = pkt->buf[0].length;

+ alx_set_tpdesc(txque, stpd);

+ memset(stpd, 0, sizeof(union alx_sw_tpdesc));

+}

+

+

+static void alx_dfs_diag_config_tpd(struct alx_tx_queue *txque,

+ struct alx_diag_packet *pkt,

+ union alx_sw_tpdesc *stpd)

+{

+ u16 machdr_offset = 0;

+ u16 iphdr_offset = 0;

+ struct iphdr *iph;

+

+ /* VLAN */

+ if (pkt->type & ALX_DIAG_PKTYPE_VLANINST) {

+ stpd->genr.instag = 1;

+ ALX_VLAN_TO_TAG(pkt->vlanid, stpd->genr.vlan_tag);

+ }

+

+ if (pkt->type & ALX_DIAG_PKTYPE_VLANTAGGED)

+ stpd->genr.vtagged = 1;

+

+ /* checksum offload */

+ if (!(pkt->type & ALX_DIAG_PKTYPE_EII))

+ stpd->genr.type = 1;

+

+ /* MAC header length */

+ machdr_offset = 14;

+ if (pkt->type & ALX_DIAG_PKTYPE_SNAP)

+ machdr_offset += 8;

+

+ if (pkt->type & ALX_DIAG_PKTYPE_VLANTAGGED)

+ machdr_offset += 4;

+

+ /* IP header length */

+ if (pkt->type & ALX_DIAG_PKTYPE_IPV4) {

+ iph = (struct iphdr *)(pkt->buf[0].addr + machdr_offset);

+ iphdr_offset = iph->ihl << 2;

+ } else if (pkt->type & ALX_DIAG_PKTYPE_IPV6) {

+ struct ipv6hdr *ipv6h;

+ struct ipv6_opt_hdr *opth;

+ u8 nexthdr;

+

+ ipv6h = (struct ipv6hdr *)(pkt->buf[0].addr + machdr_offset);

+ iphdr_offset = sizeof(struct ipv6hdr);

+ nexthdr = ipv6h->nexthdr;

+ while (nexthdr != NEXTHDR_TCP && nexthdr != NEXTHDR_UDP &&

+ nexthdr != NEXTHDR_NONE) {

+ /* have IPv6 extension header */

+ opth = (struct ipv6_opt_hdr *)

+ ((u8 *)ipv6h + iphdr_offset);

+

+ if (nexthdr == NEXTHDR_FRAGMENT)

+ iphdr_offset += 8;

+ else if (nexthdr == NEXTHDR_AUTH)

+ iphdr_offset += (opth->hdrlen + 2) << 2;

+ else

+ iphdr_offset += ipv6_optlen(opth);

+ nexthdr = opth->nexthdr;

+ }

+ }

+

+ /* checksum */

+ if (pkt->type & ALX_DIAG_PKTYPE_IPXSUM) {

+ /* IP checksum */

+ stpd->csum.ip_csum = 1;

+ }

+

+ if (pkt->type & ALX_DIAG_PKTYPE_L4XSUM) {

+ /* L4 checksum */

+ if (pkt->type & ALX_DIAG_PKTYPE_TCP)

+ stpd->csum.tcp_csum = 1;

+

+ if (pkt->type & ALX_DIAG_PKTYPE_UDP)

+ stpd->csum.udp_csum = 1;

+

+ stpd->csum.payld_offset = machdr_offset + iphdr_offset;

+ }

+

+ if (pkt->type & ALX_DIAG_PKTYPE_CXSUM) {

+ /* Custom checksum */

+ stpd->csum.c_csum = 1;

+ stpd->csum.payld_offset = pkt->csum_start >> 1;

+ stpd->csum.cxsum_offset = pkt->csum_pos >> 1;

+ }

+

+ /* TCP Large send offload */

+ if (pkt->type & ALX_DIAG_PKTYPE_TSOV1) {

+ stpd->tso.lso = 1;

+ stpd->tso.tcphdr_offset = machdr_offset + iphdr_offset;

+ stpd->tso.mss = pkt->mss;

+ }

+

+ if (pkt->type & ALX_DIAG_PKTYPE_TSOV2) {

+ stpd->tso.lso = 1;

+ stpd->tso.lso_v2 = 1;

+ stpd->tso.tcphdr_offset = machdr_offset + iphdr_offset;

+ stpd->tso.mss = pkt->mss;

+ }

+

+ if (pkt->type & ALX_DIAG_PKTYPE_IPV4)

+ stpd->genr.ipv4 = 1;

+}

+

+

+static void alx_dfs_diag_tx_map(struct alx_adapter *adpt,

+ struct alx_tx_queue *txque,

+ struct alx_diag_packet *pkt,

+ union alx_sw_tpdesc *stpd)

+{

+ struct alx_buffer *tpbuf = NULL;

+

+ tpbuf = GET_TP_BUFFER(txque, txque->tpq.produce_idx);

+ tpbuf->length = pkt->buf[0].length;

+ tpbuf->dma = dma_map_single(txque->dev, pkt->buf[0].addr,

+ tpbuf->length, DMA_TO_DEVICE);

+ stpd->genr.addr = tpbuf->dma;

+ stpd->genr.buffer_len = tpbuf->length;

+ alx_set_tpdesc(txque, stpd);

+

+ /* The last tpd */

+ alx_set_tpdesc_lastfrag(txque);

+ /* diag don't use skb, so it's not need to free */

+ tpbuf->skb = NULL;

+}

+

+

+static netdev_tx_t alx_dfs_diag_xmit_frame(struct alx_adapter *adpt,

+ struct alx_tx_queue *txque,

+ struct alx_diag_packet *pkt)

+{

+ struct alx_hw *hw = &adpt->hw;

+ unsigned long flags = 0;

+ union alx_sw_tpdesc stpd; /* normal*/

+

+ if (CHK_ADPT_FLAG(STATE_DOWN))

+ return NETDEV_TX_OK;

+

+ if (!spin_trylock_irqsave(&adpt->tx_lock, flags)) {

+ alx_err(adpt, "tx locked!

");

+ return NETDEV_TX_LOCKED;

+ }

+

+ if (!alx_dfs_diag_check_num_tpdescs(txque, pkt)) {

+ /* no enough descriptor, just stop queue */

+ spin_unlock_irqrestore(&adpt->tx_lock, flags);

+ return NETDEV_TX_BUSY;

+ }

+

+ memset(&stpd, 0, sizeof(union alx_sw_tpdesc));

+ alx_dfs_diag_config_tpd_tsov2(txque, pkt, &stpd);

+ alx_dfs_diag_config_tpd(txque, pkt, &stpd);

+ alx_dfs_diag_tx_map(adpt, txque, pkt, &stpd);

+

+ /* update produce idx */

+ wmb();

+ alx_mem_w16(hw, txque->produce_reg, txque->tpq.produce_idx);

+

+ spin_unlock_irqrestore(&adpt->tx_lock, flags);

+ return NETDEV_TX_OK;

+}

+

+

+static int alx_dfs_diag_begin(struct alx_adapter *adpt)

+{

+ struct alx_hw *hw = &adpt->hw;

+ int retval = 0;

+

+ if (CHK_ADPT_FLAG(MSIX_EN)) {

+ alx_err(adpt, "warning! Please use MSI or Shared intr

");

+ return -EINVAL;

+ }

+

+ if (CHK_ADPT_FLAG(STATE_DOWN) ||

+ CHK_ADPT_FLAG(STATE_DIAG_RUNNING)) {

+ alx_err(adpt, "warning! Diag is running or nic is down

");

+ return -EINVAL;

+ }

+

+ SET_ADPT_FLAG(STATE_DIAG_RUNNING);

+ SET_HW_FLAG(LOOPBACK_EN);

+ alf_update_mac_filter(hw);

+

+ alx_mem_w32(hw, ALX_CLK_GATE, 0x0);

+

+ skb_queue_head_init(&adpt->dfs.diag_skb_list);

+

+ adpt->dfs.diag_recv_sz = ALX_DIAG_MAX_DATA_BUFFER +

+ sizeof(struct alx_diag_packet) * ALX_DIAG_MAX_RX_PACKETS;

+ adpt->dfs.diag_send_sz = ALX_DIAG_MAX_DATA_BUFFER +

+ sizeof(struct alx_diag_packet) * ALX_DIAG_MAX_TX_PACKETS;

+ adpt->dfs.diag_info_sz =

+ sizeof(struct alx_diag_packet) * ALX_DIAG_MAX_RX_PACKETS;

+

+ netif_info(adpt, hw, adpt->netdev,

+ "send_buf_sz=0x%x, recv_buf_sz=0x%x, info_buf_sz=0x%x

",

+ adpt->dfs.diag_send_sz, adpt->dfs.diag_recv_sz,

+ adpt->dfs.diag_info_sz);

+

+ adpt->dfs.diag_recv_buf = kmalloc(adpt->dfs.diag_recv_sz, GFP_KERNEL);

+ if (!adpt->dfs.diag_recv_buf) {

+ alx_err(adpt, "error alloc recv buff

");

+ retval = -ENOMEM;

+ goto err_alloc_recv_mem;

+ }

+

+ adpt->dfs.diag_send_buf = kmalloc(adpt->dfs.diag_send_sz, GFP_KERNEL);

+ if (!adpt->dfs.diag_send_buf) {

+ alx_err(adpt, "error alloc send buff

");

+ retval = -ENOMEM;

+ goto err_alloc_send_mem;

+ }

+

+ adpt->dfs.diag_info_buf = kmalloc(adpt->dfs.diag_info_sz, GFP_KERNEL);

+ if (!adpt->dfs.diag_info_buf) {

+ alx_err(adpt, "error alloc packet info buff

");

+ retval = -ENOMEM;

+ goto err_alloc_info_mem;

+ }

+ adpt->dfs.diag_pkt_info =

+ (struct alx_diag_packet *)adpt->dfs.diag_info_buf;

+ return 0;

+

+err_alloc_info_mem:

+ kfree(adpt->dfs.diag_send_buf);

+err_alloc_send_mem:

+ kfree(adpt->dfs.diag_recv_buf);

+err_alloc_recv_mem:

+ CLI_HW_FLAG(LOOPBACK_EN);

+ alf_update_mac_filter(hw);

+ CLI_ADPT_FLAG(STATE_DIAG_RUNNING);

+ return retval;

+}

+

+

+static int alx_dfs_diag_end(struct alx_adapter *adpt)

+{

+ struct alx_hw *hw = &adpt->hw;

+

+ if (!CHK_ADPT_FLAG(STATE_DIAG_RUNNING)) {

+ alx_err(adpt, "can't end diag, becasue diag isn't running

");

+ return -EINVAL;

+ }

+

+ kfree(adpt->dfs.diag_recv_buf);

+ kfree(adpt->dfs.diag_send_buf);

+ kfree(adpt->dfs.diag_info_buf);

+ adpt->dfs.diag_recv_buf = NULL;

+ adpt->dfs.diag_send_buf = NULL;

+ adpt->dfs.diag_info_buf = NULL;

+ adpt->dfs.diag_pkt_info = NULL;

+ adpt->dfs.diag_info_sz = 0;

+ adpt->dfs.diag_recv_sz = 0;

+ adpt->dfs.diag_send_sz = 0;

+

+ skb_queue_purge(&adpt->dfs.diag_skb_list);

+

+ CLI_HW_FLAG(LOOPBACK_EN);

+ alf_update_mac_filter(hw);

+ CLI_ADPT_FLAG(STATE_DIAG_RUNNING);

+ return 0;

+}

+

+

+static int alx_dfs_diag_rx_pkts(struct alx_adapter *adpt, char *buf,

+ u32 size_in, u32 *size_out)

+{

+ struct sk_buff_head *list = &adpt->dfs.diag_skb_list;

+ struct alx_diag_packet *pkt, *fpkt;

+ struct sk_buff *skb;

+ u8 *data;

+ u32 i, count, offset;

+

+ if (CHK_ADPT_FLAG(STATE_DOWN) ||

+ !CHK_ADPT_FLAG(STATE_DIAG_RUNNING)) {

+ alx_err(adpt, "warning! Diag isn't running or nic is down

");

+ return -EINVAL;

+ }

+

+ count = skb_queue_len(list);

+ if (!count) {

+ *size_out = 0;

+ return -EINVAL;

+ }

+

+ offset = count * sizeof(struct alx_diag_packet);

+ if (offset >= adpt->dfs.diag_recv_sz) {

+ alx_err(adpt, "used diag buffer is greater than allocated

");

+ return -EINVAL;

+ }

+

+ memset(adpt->dfs.diag_recv_buf, 0, adpt->dfs.diag_recv_sz);

+ fpkt = pkt = (struct alx_diag_packet *)adpt->dfs.diag_recv_buf;

+ data = adpt->dfs.diag_recv_buf + offset;

+

+ for (i = 0; i < count; i++) {

+ skb = skb_dequeue(list);

+ if (!skb)

+ break;

+ memcpy(pkt, skb->sk, sizeof(struct alx_diag_packet));

+ pkt->buf[0].offset = offset;

+ pkt->buf[0].length = pkt->length;

+

+ if (pkt->length != skb->len) {

+ netif_warn(adpt, hw, adpt->netdev,

+ "pkt->length(0x%x) != skb->len(0x%x)

",

+ pkt->length, skb->len);

+ }

+ memcpy(data, skb->data, pkt->length);

+

+ offset += pkt->length;

+ if (offset >= adpt->dfs.diag_recv_sz) {

+ alx_err(adpt,

+ "used diag buffer is greater than allocated

");

+ }

+ kfree_skb(skb);

+ pkt->next = pkt + 1;

+ pkt++;

+ data = adpt->dfs.diag_recv_buf + offset;

+ }

+ fpkt[count - 1].next = NULL;

+

+ if (!buf || offset > size_in) {

+ alx_err(adpt, "receive buf is null or too small

");

+ return -EINVAL;

+ }

+

+ *size_out = offset;

+ if (copy_to_user((void __user *)buf, adpt->dfs.diag_recv_buf,

+ offset)) {

+ alx_err(adpt, "can't copy to user space

");

+ return -EFAULT;

+ }

+ return 0;

+}

+

+static int alx_dfs_diag_tx_pkts(struct alx_adapter *adpt,

+ char *buf, u32 size_in)

+{

+ struct alx_diag_packet *pkt;

+ struct alx_tx_queue *txque = adpt->tx_queue[0];

+ int num_pkts = ALX_DIAG_MAX_TX_PACKETS;

+

+ if (CHK_ADPT_FLAG(STATE_DOWN) ||

+ !CHK_ADPT_FLAG(STATE_DIAG_RUNNING)) {

+ alx_err(adpt, "warning! Diag isn't running or nic is down

");

+ return -EINVAL;

+ }

+

+ pkt = (struct alx_diag_packet *)adpt->dfs.diag_send_buf;

+ if (!buf || size_in > adpt->dfs.diag_send_sz) {

+ alx_err(adpt, "sending buf is null or too big

");

+ return -EINVAL;

+ }

+

+ if (copy_from_user(pkt, (void __user *)buf, size_in)) {

+ alx_err(adpt, "can't copy from user

");

+ return -EFAULT;

+ }

+

+ do {

+ /* fix buf[0].addr in alx_diag_packet */

+ if (pkt->buf[0].offset > size_in)

+ alx_err(adpt, "offset of alx_diag_packet error

");

+

+ pkt->buf[0].addr = adpt->dfs.diag_send_buf + pkt->buf[0].offset;

+

+ alx_dfs_diag_xmit_frame(adpt, txque, pkt);

+

+ if (pkt->next == NULL)

+ break;

+ pkt++;

+ } while (--num_pkts);

+

+ return 0;

+}

+

+

+

+static int alx_dfs_cifs_annce_clear(struct alx_adapter *adpt,

+ struct alx_dfs_ioctl_data *did)

+{

+ adpt->hw.swoi_offload.len = 0;

+ return 0;

+}

+

+static int alx_dfs_cifs_annce_config(struct alx_adapter *adpt,

+ struct alx_dfs_ioctl_data *did)

+{

+ struct alx_hw *hw = &adpt->hw;

+ int ret = 0;

+

+ if (!adpt->cifs ||

+ did->param.buf.addr == NULL ||

+ did->param.buf.size_in != sizeof(hw->swoi_offload)) {

+ hw->swoi_offload.len = 0;

+ return -EINVAL;

+ }

+

+ ret = copy_from_user(&hw->swoi_offload,

+ (void __user *)did->param.buf.addr,

+ sizeof(hw->swoi_offload));

+ if (ret != 0)

+ return -EINVAL;

+

+ if (hw->swoi_offload.ver == ALX_SWOI_VER_CIFS) {

+ if (FIELD_GETX(hw->pci_revid, L1F_PCI_REVID) > L1F_REV_A1) {

+ hw->swoi_offload.len = 0;

+ return -EINVAL;

+ }

+ } else if (hw->swoi_offload.ver == ALX_SWOI_VER_SWOI) {

+ if (FIELD_GETX(hw->pci_revid, L1F_PCI_REVID) < L1F_REV_B0) {

+ hw->swoi_offload.total_len = 0;

+ return -EINVAL;

+ }

+ } else {

+ hw->swoi_offload.total_len = 0;

+ return -EINVAL;

+ }

+

+ return 0;

+}

+

+

+static int alx_dfs_ioctl_command_general(struct alx_adapter *adpt,

+ struct alx_dfs_ioctl_data *did)

+{

+ struct alx_hw *hw = &adpt->hw;

+ int retval = 0;

+

+ switch (did->sub_cmd) {

+ case ALX_DFS_IOCTL_SCMD_GMAC_REG_32:

+ alx_mem_r32(hw, did->param.mac.num, &did->param.mac.val32);

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: Read reg_32 %04x %08x

",

+ did->param.mac.num, did->param.mac.val32);

+ break;

+ case ALX_DFS_IOCTL_SCMD_SMAC_REG_32:

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: write reg_32 %04x %08x

",

+ did->param.mac.num, did->param.mac.val32);

+ alx_mem_w32(hw, did->param.mac.num, did->param.mac.val32);

+ break;

+

+ case ALX_DFS_IOCTL_SCMD_GMAC_REG_16:

+ alx_mem_r16(hw, did->param.mac.num, &did->param.mac.val16);

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: read reg_16 %04x %08x

",

+ did->param.mac.num, did->param.mac.val16);

+ break;

+ case ALX_DFS_IOCTL_SCMD_SMAC_REG_16:

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: write reg_16 %04x %08x

",

+ did->param.mac.num, did->param.mac.val16);

+ alx_mem_w16(hw, did->param.mac.num, did->param.mac.val16);

+ break;

+ case ALX_DFS_IOCTL_SCMD_GMAC_REG_8:

+ alx_mem_r8(hw, did->param.mac.num, &did->param.mac.val8);

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: read reg_8 %04x %08x

",

+ did->param.mac.num, did->param.mac.val8);

+ break;

+ case ALX_DFS_IOCTL_SCMD_SMAC_REG_8:

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: write reg_8 %04x %08x

",

+ did->param.mac.num, did->param.mac.val8);

+ alx_mem_w8(hw, did->param.mac.num, did->param.mac.val8);

+ break;

+

+ /* Read/Write MAC Register By config Mode */

+ case ALX_DFS_IOCTL_SCMD_GMAC_CFG_32:

+ alx_cfg_r32(hw, did->param.mac.num, &did->param.mac.val32);

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: read cfg_32 %04x %08x

",

+ did->param.mac.num, did->param.mac.val32);

+ break;

+ case ALX_DFS_IOCTL_SCMD_SMAC_CFG_32:

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: write cfg_32 %04x %08x

",

+ did->param.mac.num, did->param.mac.val32);

+ alx_cfg_w32(hw, did->param.mac.num, did->param.mac.val32);

+ break;

+

+ /* Read/Write MAC Register By IO Port */

+ case ALX_DFS_IOCTL_SCMD_GMAC_IO_32:

+ alx_io_r32(hw, did->param.mac.num, &did->param.mac.val32);

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: read io_32 %04x %08x

",

+ did->param.mac.num, did->param.mac.val32);

+ break;

+ case ALX_DFS_IOCTL_SCMD_SMAC_IO_32:

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: write io_32 %04x %08x

",

+ did->param.mac.num, did->param.mac.val32);

+ alx_io_w32(hw, did->param.mac.num, did->param.mac.val32);

+ break;

+

+

+ /* Read/Write PHY Ext Register */

+ case ALX_DFS_IOCTL_SCMD_GMII_EXT_REG:

+ if (!capable(CAP_NET_ADMIN)) {

+ retval = -EPERM;

+ goto out;

+ }

+

+ retval = alf_read_ext_phy_reg(hw, (u8)did->param.mii.dev,

+ did->param.mii.num,

+ &did->param.mii.val);

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: read phy_ext %02x:%02x %04x

",

+ did->param.mii.dev, did->param.mii.num,

+ did->param.mii.val);

+ if (retval) {

+ retval = -EIO;

+ goto out;

+ }

+ break;

+

+ case ALX_DFS_IOCTL_SCMD_SMII_EXT_REG:

+ if (!capable(CAP_NET_ADMIN)) {

+ retval = -EPERM;

+ goto out;

+ }

+

+ retval = alf_write_ext_phy_reg(hw, (u8)did->param.mii.dev,

+ did->param.mii.num,

+ did->param.mii.val);

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: write phy_ext %02x:%02x %04x

",

+ did->param.mii.dev, did->param.mii.num,

+ did->param.mii.val);

+ if (retval) {

+ retval = -EIO;

+ goto out;

+ }

+ break;

+

+

+ /* Diag & Memcfg */

+ case ALX_DFS_IOCTL_SCMD_DEVICE_INACTIVE:

+ netif_dbg(adpt, hw, adpt->netdev, "DFS: Device Inactive

");

+ retval = alx_dfs_device_inactive(adpt, did->param.dat.val0);

+ break;

+

+ case ALX_DFS_IOCTL_SCMD_DEVICE_REINIT:

+ netif_dbg(adpt, hw, adpt->netdev, "DFS: Device Reset

");

+ alx_dfs_device_reinit(adpt);

+ break;

+

+ case ALX_DFS_IOCTL_SCMD_DIAG_BEGIN:

+ netif_dbg(adpt, hw, adpt->netdev, "DFS: Diag begin.

");

+ retval = alx_dfs_diag_begin(adpt);

+ break;

+ case ALX_DFS_IOCTL_SCMD_DIAG_END:

+ netif_dbg(adpt, hw, adpt->netdev, "DFS: Diag end.

");

+ retval = alx_dfs_diag_end(adpt);

+ break;

+ case ALX_DFS_IOCTL_SCMD_DIAG_TX_PKT:

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: Diag TX (%p:in-%08x)

",

+ did->param.buf.addr, did->param.buf.size_in);

+ retval = alx_dfs_diag_tx_pkts(adpt, did->param.buf.addr,

+ did->param.buf.size_in);

+ break;

+ case ALX_DFS_IOCTL_SCMD_DIAG_RX_PKT:

+ retval = alx_dfs_diag_rx_pkts(adpt, did->param.buf.addr,

+ did->param.buf.size_in,

+ &did->param.buf.size_out);

+ netif_dbg(adpt, hw, adpt->netdev,

+ "DFS: Diag RX (%p:in-%08x, out-%08x)

",

+ did->param.buf.addr, did->param.buf.size_in,

+ did->param.buf.size_out);

+ break;

+

+ /* SWOI */

+ case ALX_DFS_IOCTL_SCMD_ANNCE_CLEAR:

+ retval = alx_dfs_cifs_annce_clear(adpt, did);

+ break;

+

+ case ALX_DFS_IOCTL_SCMD_ANNCE_CONFIG:

+ retval = alx_dfs_cifs_annce_config(adpt, did);

+ break;

+

+ default:

+ retval = -EOPNOTSUPP;

+ break;

+ }

+out:

+ return retval;

+}

+

+/*

+ * debugfs file operation

+ */

+long alx_debugfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)

+{

+ struct alx_adapter *adpt = (struct alx_adapter *)file->private_data;

+ struct alx_dfs_ioctl_data did;

+ long retval = 0;

+

+ if (copy_from_user(&did, (void __user *)arg, sizeof(did)))

+ return -EFAULT;

+

+ switch (cmd) {

+ case ALX_DFS_IOCTL_CMD_GENR:

+ retval = alx_dfs_ioctl_command_general(adpt, &did);

+ break;

+ default:

+ return -EINVAL;

+ }

+

+ if (copy_to_user((void __user *)arg, &did, sizeof(did)))

+ return -EFAULT;

+

+ return retval;

+}

diff --git a/drivers/net/ethernet/atheros/alx/alx_dfs.h b/drivers/net/ethernet/atheros/alx/alx_dfs.h

new file mode 100644

index 0000000..ccc83a7

--- /dev/null

+++ b/drivers/net/ethernet/atheros/alx/alx_dfs.h

@@ -0,0 +1,182 @@

+/*

+ * Copyright (c) 2012 Qualcomm Atheros, Inc.

+ *

+ * Permission to use, copy, modify, and/or distribute this software for any

+ * purpose with or without fee is hereby granted, provided that the above

+ * copyright notice and this permission notice appear in all copies.

+ *

+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES

+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF

+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR

+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES

+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN

+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF

+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ */

+

+#ifndef _ALX_DFS_H_

+#define _ALX_DFS_H_

+

+/*

+ * debugfs parameter in alx_adapter

+ */

+struct alx_debugfs_param {

+ struct dentry *fent;

+

+ /* DIAG parameter */

+ u8 *diag_recv_buf;

+ u8 *diag_send_buf;

+ u8 *diag_info_buf;

+ u32 diag_recv_sz;

+ u32 diag_send_sz;

+ u32 diag_info_sz;

+ struct alx_diag_packet *diag_pkt_info;

+ struct sk_buff_head diag_skb_list;

+

+ /* MEMCFG parameter*/

+ bool cfg_new_mac;

+};

+

+/*

+ * debugfs IOCTL parameter

+ */

+struct alx_dfs_ioctl_param_buf {

+ void *addr;

+ __u32 size_in;

+ __u32 size_out;

+};

+

+struct alx_dfs_ioctl_param_dat {

+ __u32 val0;

+ __u32 val1;

+};

+

+struct alx_dfs_ioctl_param_mac {

+ __u32 num;

+ union {

+ __u32 val32;

+ __u16 val16;

+ __u8 val8;

+ };

+};

+

+struct alx_dfs_ioctl_param_mii {

+ __u16 dev;

+ __u16 num;

+ __u16 val;

+};

+

+

+struct alx_dfs_ioctl_data {

+ __u32 sub_cmd;

+ union {

+ struct alx_dfs_ioctl_param_buf buf;

+ struct alx_dfs_ioctl_param_dat dat;

+ struct alx_dfs_ioctl_param_mac mac;

+ struct alx_dfs_ioctl_param_mii mii;

+ } param;

+};

+

+

+#define ALX_DFS_IOCTL_CMD_GENR _IOWR('L', 0x80, struct alx_dfs_ioctl_data)

+

+#define ALX_DFS_IOCTL_SCMD_GMAC_REG_32 0x0001

+#define ALX_DFS_IOCTL_SCMD_SMAC_REG_32 0x0002

+#define ALX_DFS_IOCTL_SCMD_GMAC_REG_16 0x0003

+#define ALX_DFS_IOCTL_SCMD_SMAC_REG_16 0x0004

+#define ALX_DFS_IOCTL_SCMD_GMAC_REG_8 0x0005

+#define ALX_DFS_IOCTL_SCMD_SMAC_REG_8 0x0006

+

+#define ALX_DFS_IOCTL_SCMD_GMAC_CFG_32 0x0011

+#define ALX_DFS_IOCTL_SCMD_SMAC_CFG_32 0x0012

+

+#define ALX_DFS_IOCTL_SCMD_GMAC_IO_32 0x0021

+#define ALX_DFS_IOCTL_SCMD_SMAC_IO_32 0x0022

+

+#define ALX_DFS_IOCTL_SCMD_GMII_EXT_REG 0x0031

+#define ALX_DFS_IOCTL_SCMD_SMII_EXT_REG 0x0032

+#define ALX_DFS_IOCTL_SCMD_GMII_IDR_REG 0x0033

+#define ALX_DFS_IOCTL_SCMD_SMII_IDR_REG 0x0034

+

+#define ALX_DFS_IOCTL_SCMD_DEVICE_INACTIVE 0x10001

+#define ALX_DFS_IOCTL_SCMD_DEVICE_REINIT 0x10002

+#define ALX_DFS_IOCTL_SCMD_DIAG_BEGIN 0x10003

+#define ALX_DFS_IOCTL_SCMD_DIAG_END 0x10004

+#define ALX_DFS_IOCTL_SCMD_DIAG_TX_PKT 0x10005

+#define ALX_DFS_IOCTL_SCMD_DIAG_RX_PKT 0x10006

+

+#define ALX_DFS_IOCTL_SCMD_ANNCE_CLEAR 0x20001

+#define ALX_DFS_IOCTL_SCMD_ANNCE_CONFIG 0x20002

+

+

+/*

+ * Diag tool support

+ */

+#define ALX_DIAG_MAX_PACKET_BUFS 1

+#define ALX_DIAG_MAX_DATA_BUFFER (48 * 64 * 1024)

+#define ALX_DIAG_MAX_TX_PACKETS 64

+#define ALX_DIAG_MAX_RX_PACKETS 512

+

+struct alx_diag_buf {

+ u8 *addr;

+ u32 offset;

+ u32 length;

+};

+

+struct alx_diag_packet {

+ struct alx_diag_packet *next;

+ u32 length; /* total length of the packet(buf) */

+ u32 type; /* packet type, vlan, ip checksum */

+ struct alx_diag_buf buf[ALX_DIAG_MAX_PACKET_BUFS];

+ struct alx_diag_buf sglist[ALX_DIAG_MAX_PACKET_BUFS];

+ u16 vlanid;

+ u16 mss;

+ u32 rss_hash;

+ u16 rss_cpu_num;

+ u16 xsum; /* rx, ip-payload checksum */

+ u16 csum_start; /* custom checksum offset to the

+ * mac-header */

+ u16 csum_pos; /* custom checksom position

+ * (to the mac_header) */

+ u32 uplevel_reserved[4];

+ void *lowlevel_reserved[4];

+};

+#define ALX_DIAG_PKTYPE_IPXSUM 0x00000001L /* ip checksum offload,

+ * TO:task offload */

+#define ALX_DIAG_PKTYPE_L4XSUM 0x00000002L /* tcp/udp checksum

+ * offload */

+#define ALX_DIAG_PKTYPE_VLANINST 0x00000004L /* insert vlan tag */

+#define ALX_DIAG_PKTYPE_TSOV1 0x00000008L /* tcp large send v1 */

+#define ALX_DIAG_PKTYPE_TSOV2 0x00000010L /* tcp large send v2 */

+#define ALX_DIAG_PKTYPE_CXSUM 0x00000020L /* checksum offload */

+#define ALX_DIAG_PKTYPE_VLANTAGGED 0x00000040L /* vlan tag */

+#define ALX_DIAG_PKTYPE_IPV4 0x00000080L /* ipv4 */

+#define ALX_DIAG_PKTYPE_IPV6 0x00000100L /* ipv6 */

+#define ALX_DIAG_PKTYPE_TCP 0x00000200L /* tcp */

+#define ALX_DIAG_PKTYPE_UDP 0x00000400L /* udp */

+#define ALX_DIAG_PKTYPE_EII 0x00000800L /* ethernet II */

+#define ALX_DIAG_PKTYPE_802_3 0x00001000L /* 802.3 */

+#define ALX_DIAG_PKTYPE_SNAP 0x00002000L /* 802.2/snap */

+#define ALX_DIAG_PKTYPE_FRAGMENT 0x00004000L /* fragment ip packet */

+#define ALX_DIAG_PKTYPE_SGLIST_VALID 0x00008000L /* SGList valid */

+#define ALX_DIAG_PKTYPE_HASH_VLAID 0x00010000L /* Hash valid */

+#define ALX_DIAG_PKTYPE_CPUNUM_VALID 0x00020000L /* CpuNum valid */

+#define ALX_DIAG_PKTYPE_XSUM_VALID 0x00040000L

+#define ALX_DIAG_PKTYPE_IPXSUM_ERR 0x00080000L

+#define ALX_DIAG_PKTYPE_L4XSUM_ERR 0x00100000L

+#define ALX_DIAG_PKTYPE_802_3_LEN_ERR 0x00200000L

+#define ALX_DIAG_PKTYPE_INCOMPLETE_ERR 0x00400000L

+#define ALX_DIAG_PKTYPE_CRC_ERR 0x00800000L

+#define ALX_DIAG_PKTYPE_RX_ERR 0x01000000L

+#define ALX_DIAG_PKTYPE_PTP 0x02000000L /* 1588 PTP */

+#define ALX_DIAG_PKTYPE_LLDP 0x04000000L /* IEEE LLDP */

+

+

+union alx_sw_rrdesc;

+int alx_flush_mac_address(struct alx_adapter *adpt);

+void alx_dfs_diag_receive_skb(struct alx_adapter *adpt, struct sk_buff *skb,

+ union alx_sw_rrdesc *srrd);

+

+long alx_debugfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);

+

+#endif

diff --git a/drivers/net/ethernet/atheros/alx/alx_ethtool.c b/drivers/net/ethernet/atheros/alx/alx_ethtool.c

new file mode 100644

index 0000000..92fb461

--- /dev/null

+++ b/drivers/net/ethernet/atheros/alx/alx_ethtool.c

@@ -0,0 +1,337 @@

+/*

+ * Copyright (c) 2012 Qualcomm Atheros, Inc.

+ *

+ * Permission to use, copy, modify, and/or distribute this software for any

+ * purpose with or without fee is hereby granted, provided that the above

+ * copyright notice and this permission notice appear in all copies.

+ *

+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES

+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF

+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR

+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES

+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN

+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF

+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ */

+

+

+#include <linux/pci.h>

+#include <linux/ethtool.h>

+

+#include "alx.h"

+#include "alx_hwcom.h"

+

+static int alx_get_settings(struct net_device *netdev,

+ struct ethtool_cmd *ecmd)

+{

+ struct alx_adapter *adpt = netdev_priv(netdev);

+ struct alx_hw *hw = &adpt->hw;

+

+ ecmd->supported = (SUPPORTED_10baseT_Half |

+ SUPPORTED_10baseT_Full |

+ SUPPORTED_100baseT_Half |

+ SUPPORTED_100baseT_Full |

+ SUPPORTED_Autoneg |

+ SUPPORTED_TP);

+ if (CHK_HW_FLAG(GIGA_CAP))

+ ecmd->supported |= SUPPORTED_1000baseT_Full;

+

+ ecmd->advertising = ADVERTISED_TP;

+

+ ecmd->advertising |= ADVERTISED_Autoneg;

+ ecmd->advertising |= hw->autoneg_advertised;

+

+ ecmd->port = PORT_TP;

+ ecmd->phy_address = 0;

+ ecmd->autoneg = AUTONEG_ENABLE;

+ ecmd->transceiver = XCVR_INTERNAL;

+

+ if (hw->link_up) {

+ switch (hw->link_speed) {

+ case LX_LC_10H:

+ ethtool_cmd_speed_set(ecmd, SPEED_10);

+ ecmd->duplex = DUPLEX_HALF;

+ break;

+ case LX_LC_10F:

+ ethtool_cmd_speed_set(ecmd, SPEED_10);

+ ecmd->duplex = DUPLEX_FULL;

+ break;

+ case LX_LC_100H:

+ ethtool_cmd_speed_set(ecmd, SPEED_100);

+ ecmd->duplex = DUPLEX_HALF;

+ break;

+ case LX_LC_100F:

+ ethtool_cmd_speed_set(ecmd, SPEED_100);

+ ecmd->duplex = DUPLEX_FULL;

+ break;

+ case LX_LC_1000F:

+ ethtool_cmd_speed_set(ecmd, SPEED_1000);

+ ecmd->duplex = DUPLEX_FULL;

+ break;

+ default:

+ ecmd->speed = -1;

+ ecmd->duplex = -1;

+ break;

+ }

+ } else {

+ ethtool_cmd_speed_set(ecmd, -1);

+ ecmd->duplex = -1;

+ }

+

+ return 0;

+}

+

+

+static int alx_set_settings(struct net_device *netdev,

+ struct ethtool_cmd *ecmd)

+{

+ struct alx_adapter *adpt = netdev_priv(netdev);

+ struct alx_hw *hw = &adpt->hw;

+ u32 advertised, old;

+ int error = 0;

+

+ while (CHK_ADPT_FLAG(STATE_RESETTING))

+ msleep(20);

+ SET_ADPT_FLAG(STATE_RESETTING);

+

+ old = hw->autoneg_advertised;

+ advertised = 0;

+ if (ecmd->autoneg == AUTONEG_ENABLE) {

+ advertised = LX_LC_ALL;

+ } else {

+ u32 speed = ethtool_cmd_speed(ecmd);

+ if (speed == SPEED_1000) {

+ if (ecmd->duplex != DUPLEX_FULL) {

+ dev_warn(&adpt->pdev->dev,

+ "1000M half is invalid

");

+ CLI_ADPT_FLAG(STATE_RESETTING);

+ return -EINVAL;

+ }

+ advertised = LX_LC_1000F;

+ } else if (speed == SPEED_100) {

+ if (ecmd->duplex == DUPLEX_FULL)

+ advertised = LX_LC_100F;

+ else

+ advertised = LX_LC_100H;

+ } else {

+ if (ecmd->duplex == DUPLEX_FULL)

+ advertised = LX_LC_10F;

+ else

+ advertised = LX_LC_10H;

+ }

+ }

+

+ if (hw->autoneg_advertised == advertised) {

+ CLI_ADPT_FLAG(STATE_RESETTING);

+ return error;

+ }

+

+ hw->autoneg_advertised = advertised;

+ error = alf_setup_phy_link(hw, hw->autoneg_advertised, true,

+ !hw->disable_fc_autoneg);

+ if (error) {

+ dev_err(&adpt->pdev->dev,

+ "setup link failed with code %d

", error);

+ hw->autoneg_advertised = old;

+ alf_setup_phy_link(hw, hw->autoneg_advertised, true,

+ !hw->disable_fc_autoneg);

+ }

+ CLI_ADPT_FLAG(STATE_RESETTING);

+ return error;

+}

+

+

+static void alx_get_pauseparam(struct net_device *netdev,

+ struct ethtool_pauseparam *pause)

+{

+ struct alx_adapter *adpt = netdev_priv(netdev);

+ struct alx_hw *hw = &adpt->hw;

+

+

+ if (hw->disable_fc_autoneg ||

+ hw->cur_fc_mode == alx_fc_none)

+ pause->autoneg = 0;

+ else

+ pause->autoneg = 1;

+

+ if (hw->cur_fc_mode == alx_fc_rx_pause) {

+ pause->rx_pause = 1;

+ } else if (hw->cur_fc_mode == alx_fc_tx_pause) {

+ pause->tx_pause = 1;

+ } else if (hw->cur_fc_mode == alx_fc_full) {

+ pause->rx_pause = 1;

+ pause->tx_pause = 1;

+ }

+}

+

+

+static int alx_set_pauseparam(struct net_device *netdev,

+ struct ethtool_pauseparam *pause)

+{

+ struct alx_adapter *adpt = netdev_priv(netdev);

+ struct alx_hw *hw = &adpt->hw;

+ enum alx_fc_mode req_fc_mode;

+ bool disable_fc_autoneg;

+ int retval;

+

+ while (CHK_ADPT_FLAG(STATE_RESETTING))

+ msleep(20);

+ SET_ADPT_FLAG(STATE_RESETTING);

+

+ req_fc_mode = hw->req_fc_mode;

+ disable_fc_autoneg = hw->disable_fc_autoneg;

+

+

+ if (pause->autoneg != AUTONEG_ENABLE)

+ disable_fc_autoneg = true;

+ else

+ disable_fc_autoneg = false;

+

+ if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)

+ req_fc_mode = alx_fc_full;

+ else if (pause->rx_pause && !pause->tx_pause)

+ req_fc_mode = alx_fc_rx_pause;

+ else if (!pause->rx_pause && pause->tx_pause)

+ req_fc_mode = alx_fc_tx_pause;

+ else if (!pause->rx_pause && !pause->tx_pause)

+ req_fc_mode = alx_fc_none;

+ else

+ return -EINVAL;

+

+ if ((hw->req_fc_mode != req_fc_mode) ||

+ (hw->disable_fc_autoneg != d