diff --git a/package/network/utils/comgt/Makefile b/package/network/utils/comgt/Makefile index 24dae2521e..429c938602 100644 --- a/package/network/utils/comgt/Makefile +++ b/package/network/utils/comgt/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=comgt PKG_VERSION:=0.32 -PKG_RELEASE:=34 +PKG_RELEASE:=35 PKG_SOURCE:=$(PKG_NAME).$(PKG_VERSION).tgz PKG_SOURCE_URL:=@SF/comgt @@ -79,6 +79,7 @@ define Package/comgt/install $(INSTALL_DATA) ./files/getcarrier.gcom $(1)/etc/gcom/getcarrier.gcom $(INSTALL_DATA) ./files/getcnum.gcom $(1)/etc/gcom/getcnum.gcom $(INSTALL_DATA) ./files/getimsi.gcom $(1)/etc/gcom/getimsi.gcom + $(INSTALL_DATA) ./files/runcommand.gcom $(1)/etc/gcom/runcommand.gcom $(INSTALL_DIR) $(1)/etc/hotplug.d/tty $(INSTALL_CONF) ./files/3g.usb $(1)/etc/hotplug.d/tty/30-3g $(INSTALL_DIR) $(1)/lib/netifd/proto @@ -96,7 +97,6 @@ endef define Package/comgt-ncm/install $(INSTALL_DIR) $(1)/etc/gcom $(INSTALL_DATA) ./files/ncm.json $(1)/etc/gcom/ncm.json - $(INSTALL_DATA) ./files/runcommand.gcom $(1)/etc/gcom/runcommand.gcom $(INSTALL_DIR) $(1)/lib/netifd/proto $(INSTALL_BIN) ./files/ncm.sh $(1)/lib/netifd/proto/ncm.sh endef diff --git a/package/network/utils/comgt/files/3g.sh b/package/network/utils/comgt/files/3g.sh index 9220cbf5a0..42ba894e59 100644 --- a/package/network/utils/comgt/files/3g.sh +++ b/package/network/utils/comgt/files/3g.sh @@ -72,6 +72,8 @@ proto_3g_setup() { *) CODE="2,2";; esac export MODE="AT^SYSCFG=${CODE},3FFFFFFF,2,4" + elif echo "$cardinfo" | grep -q "MikroTik"; then + COMMAND="AT+CFUN=1" gcom -d "$device" -s /etc/gcom/runcommand.gcom || return 1 fi if [ -n "$pincode" ]; then diff --git a/package/network/utils/comgt/files/ncm.json b/package/network/utils/comgt/files/ncm.json index 7d9a38fe36..df0810ddbc 100644 --- a/package/network/utils/comgt/files/ncm.json +++ b/package/network/utils/comgt/files/ncm.json @@ -112,5 +112,17 @@ ], "connect": "AT+ZGACT=1,${profile}", "disconnect": "AT+ZGACT=0,${profile}" + }, + "\"mikrotik\"": { + "configure": [ + "AT+CFUN=4", + "AT+ZGDCONT=${profile},\\\"${pdptype}\\\",\\\"${apn}\\\",0", + "AT+ZDHCPLEASE=10", + "AT+CFUN=1" + ], + "waitforconnect": "\\\"+ZCONSTAT: 1,${context_type}\\\",\\\"+ZGIPDNS: ${context_type}\\\"", + "connect": "AT+ZGACT=1,${context_type}", + "finalize": "AT+ZDHCPLEASE=0", + "disconnect": "AT+ZGACT=0,1" } } diff --git a/package/network/utils/comgt/files/ncm.sh b/package/network/utils/comgt/files/ncm.sh index 2f36697487..dec058712d 100644 --- a/package/network/utils/comgt/files/ncm.sh +++ b/package/network/utils/comgt/files/ncm.sh @@ -86,10 +86,25 @@ proto_ncm_setup() { return 1 } - [ -n "$delay" ] && sleep "$delay" - - manufacturer=$(gcom -d "$device" -s /etc/gcom/getcardinfo.gcom | awk 'NF && $0 !~ /AT\+CGMI/ { sub(/\+CGMI: /,""); print tolower($1); exit; }') - [ $? -ne 0 -o -z "$manufacturer" ] && { + start=$(date +%s) + while true; do + manufacturer=$(gcom -d "$device" -s /etc/gcom/getcardinfo.gcom | awk 'NF && $0 !~ /AT\+CGMI/ { sub(/\+CGMI: /,""); print tolower($1); exit; }') + [ "$manufacturer" = "error" ] && { + manufacturer="" + } + [ -n "$manufacturer" ] && { + break + } + [ -z "$delay" ] && { + break + } + sleep 1 + elapsed=$(($(date +%s) - start)) + [ "$elapsed" -gt "$delay" ] && { + break + } + done + [ -z "$manufacturer" ] && { echo "Failed to get modem information" proto_notify_error "$interface" GETINFO_FAILED return 1 diff --git a/target/linux/bmips/config-5.15 b/target/linux/bmips/config-5.15 index f1d7752bd8..c38a4450ab 100644 --- a/target/linux/bmips/config-5.15 +++ b/target/linux/bmips/config-5.15 @@ -10,6 +10,7 @@ CONFIG_B53_MMAP_DRIVER=y CONFIG_B53_SPI_DRIVER=y CONFIG_BCM6345_EXT_IRQ=y CONFIG_BCM6345_L1_IRQ=y +CONFIG_BCM6348_ENET=y CONFIG_BCM6368_ENETSW=y CONFIG_BCM63XX_POWER=y CONFIG_BCM7038_WDT=y diff --git a/target/linux/bmips/dts/bcm6358-huawei-hg556a-b.dts b/target/linux/bmips/dts/bcm6358-huawei-hg556a-b.dts index 6c8b093edf..273470b1bd 100644 --- a/target/linux/bmips/dts/bcm6358-huawei-hg556a-b.dts +++ b/target/linux/bmips/dts/bcm6358-huawei-hg556a-b.dts @@ -140,6 +140,24 @@ status = "okay"; }; +ðernet1 { + status = "okay"; + + nvmem-cells = <&macaddr_cfe_6a0>; + nvmem-cell-names = "mac-address"; + + phy-mode = "mii"; + + fixed-link { + speed = <100>; + full-duplex; + }; +}; + +&iudma { + status = "okay"; +}; + &ohci { status = "okay"; }; diff --git a/target/linux/bmips/dts/bcm6358.dtsi b/target/linux/bmips/dts/bcm6358.dtsi index e08ead0623..606e096d2e 100644 --- a/target/linux/bmips/dts/bcm6358.dtsi +++ b/target/linux/bmips/dts/bcm6358.dtsi @@ -346,5 +346,84 @@ status = "disabled"; }; + + ethernet0: ethernet@fffe4000 { + compatible = "brcm,bcm6358-emac"; + reg = <0xfffe4000 0x2dc>; + + clocks = <&periph_clk BCM6358_CLK_ENET0>; + + interrupt-parent = <&periph_intc>; + interrupts = , + , + ; + interrupt-names = "emac", + "rx", + "tx"; + + brcm,iudma = <&iudma>; + + dma-rx = <0>; + dma-tx = <1>; + + status = "disabled"; + + mdio0: mdio { + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + ethernet1: ethernet@fffe4800 { + compatible = "brcm,bcm6358-emac"; + reg = <0xfffe4800 0x2dc>; + + clocks = <&periph_clk BCM6358_CLK_ENET1>; + + interrupt-parent = <&periph_intc>; + interrupts = , + , + ; + interrupt-names = "emac", + "rx", + "tx"; + + brcm,iudma = <&iudma>; + brcm,external-mii; + + dma-rx = <2>; + dma-tx = <3>; + + status = "disabled"; + + mdio1: mdio { + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + iudma: dma@fffe5000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "brcm,bcm6358-iudma"; + reg = <0xfffe5000 0x24>, + <0xfffe5100 0x80>, + <0xfffe5200 0x80>; + reg-names = "dma", + "dma-channels", + "dma-sram"; + + dma-channels = <8>; + + clocks = <&periph_clk BCM6358_CLK_EMUSB>, + <&periph_clk BCM6358_CLK_USBSU>, + <&periph_clk BCM6358_CLK_EPHY>, + <&periph_clk BCM6358_CLK_ENET>; + + resets = <&periph_rst BCM6358_RST_ENET>, + <&periph_rst BCM6358_RST_EPHY>; + + status = "disabled"; + }; }; }; diff --git a/target/linux/bmips/files/drivers/net/ethernet/broadcom/bcm6348-enet.c b/target/linux/bmips/files/drivers/net/ethernet/broadcom/bcm6348-enet.c new file mode 100644 index 0000000000..2bd243089e --- /dev/null +++ b/target/linux/bmips/files/drivers/net/ethernet/broadcom/bcm6348-enet.c @@ -0,0 +1,1719 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * BCM6348 Ethernet Controller Driver + * + * Copyright (C) 2020 Álvaro Fernández Rojas + * Copyright (C) 2015 Jonas Gorski + * Copyright (C) 2008 Maxime Bizon + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* DMA channels */ +#define DMA_CHAN_WIDTH 0x10 + +/* Controller Configuration Register */ +#define DMA_CFG_REG 0x0 +#define DMA_CFG_EN_SHIFT 0 +#define DMA_CFG_EN_MASK (1 << DMA_CFG_EN_SHIFT) +#define DMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1)) + +/* Flow Control Descriptor Low Threshold register */ +#define DMA_FLOWCL_REG(x) (0x4 + (x) * 6) + +/* Flow Control Descriptor High Threshold register */ +#define DMA_FLOWCH_REG(x) (0x8 + (x) * 6) + +/* Flow Control Descriptor Buffer Alloca Threshold register */ +#define DMA_BUFALLOC_REG(x) (0xc + (x) * 6) +#define DMA_BUFALLOC_FORCE_SHIFT 31 +#define DMA_BUFALLOC_FORCE_MASK (1 << DMA_BUFALLOC_FORCE_SHIFT) + +/* Channel Configuration register */ +#define DMAC_CHANCFG_REG 0x0 +#define DMAC_CHANCFG_EN_SHIFT 0 +#define DMAC_CHANCFG_EN_MASK (1 << DMAC_CHANCFG_EN_SHIFT) +#define DMAC_CHANCFG_PKTHALT_SHIFT 1 +#define DMAC_CHANCFG_PKTHALT_MASK (1 << DMAC_CHANCFG_PKTHALT_SHIFT) +#define DMAC_CHANCFG_BUFHALT_SHIFT 2 +#define DMAC_CHANCFG_BUFHALT_MASK (1 << DMAC_CHANCFG_BUFHALT_SHIFT) +#define DMAC_CHANCFG_CHAINING_SHIFT 2 +#define DMAC_CHANCFG_CHAINING_MASK (1 << DMAC_CHANCFG_CHAINING_SHIFT) +#define DMAC_CHANCFG_WRAP_EN_SHIFT 3 +#define DMAC_CHANCFG_WRAP_EN_MASK (1 << DMAC_CHANCFG_WRAP_EN_SHIFT) +#define DMAC_CHANCFG_FLOWC_EN_SHIFT 4 +#define DMAC_CHANCFG_FLOWC_EN_MASK (1 << DMAC_CHANCFG_FLOWC_EN_SHIFT) + +/* Interrupt Control/Status register */ +#define DMAC_IR_REG 0x4 +#define DMAC_IR_BUFDONE_MASK (1 << 0) +#define DMAC_IR_PKTDONE_MASK (1 << 1) +#define DMAC_IR_NOTOWNER_MASK (1 << 2) + +/* Interrupt Mask register */ +#define DMAC_IRMASK_REG 0x8 + +/* Maximum Burst Length */ +#define DMAC_MAXBURST_REG 0xc + +/* Ring Start Address register */ +#define DMAS_RSTART_REG 0x0 + +/* State Ram Word 2 */ +#define DMAS_SRAM2_REG 0x4 + +/* State Ram Word 3 */ +#define DMAS_SRAM3_REG 0x8 + +/* State Ram Word 4 */ +#define DMAS_SRAM4_REG 0xc + +struct bcm6348_iudma_desc { + u32 len_stat; + u32 address; +}; + +/* control */ +#define DMADESC_LENGTH_SHIFT 16 +#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT) +#define DMADESC_OWNER_MASK (1 << 15) +#define DMADESC_EOP_MASK (1 << 14) +#define DMADESC_SOP_MASK (1 << 13) +#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK) +#define DMADESC_WRAP_MASK (1 << 12) + +/* status */ +#define DMADESC_UNDER_MASK (1 << 9) +#define DMADESC_APPEND_CRC (1 << 8) +#define DMADESC_OVSIZE_MASK (1 << 4) +#define DMADESC_RXER_MASK (1 << 2) +#define DMADESC_CRC_MASK (1 << 1) +#define DMADESC_OV_MASK (1 << 0) +#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \ + DMADESC_OVSIZE_MASK | \ + DMADESC_RXER_MASK | \ + DMADESC_CRC_MASK | \ + DMADESC_OV_MASK) + +struct bcm6348_iudma { + void __iomem *dma_base; + void __iomem *dma_chan; + void __iomem *dma_sram; + + spinlock_t dma_base_lock; + + struct clk **clock; + unsigned int num_clocks; + + struct reset_control **reset; + unsigned int num_resets; + + unsigned int dma_channels; +}; + +int bcm6348_iudma_drivers_register(struct platform_device *pdev); + +static inline u32 dma_readl(struct bcm6348_iudma *iudma, u32 off) +{ + u32 val; + + spin_lock(&iudma->dma_base_lock); + val = __raw_readl(iudma->dma_base + off); + spin_unlock(&iudma->dma_base_lock); + + return val; +} + +static inline void dma_writel(struct bcm6348_iudma *iudma, u32 val, u32 off) +{ + spin_lock(&iudma->dma_base_lock); + __raw_writel(val, iudma->dma_base + off); + spin_unlock(&iudma->dma_base_lock); +} + +static inline u32 dmac_readl(struct bcm6348_iudma *iudma, u32 off, int chan) +{ + return __raw_readl(iudma->dma_chan + chan * DMA_CHAN_WIDTH + off); +} + +static inline void dmac_writel(struct bcm6348_iudma *iudma, u32 val, u32 off, + int chan) +{ + __raw_writel(val, iudma->dma_chan + chan * DMA_CHAN_WIDTH + off); +} + +static inline void dmas_writel(struct bcm6348_iudma *iudma, u32 val, u32 off, + int chan) +{ + __raw_writel(val, iudma->dma_sram + chan * DMA_CHAN_WIDTH + off); +} + +static void bcm6348_iudma_chan_stop(struct bcm6348_iudma *iudma, int chan) +{ + int limit = 1000; + + dmac_writel(iudma, 0, DMAC_CHANCFG_REG, chan); + + do { + u32 val; + + val = dmac_readl(iudma, DMAC_CHANCFG_REG, chan); + if (!(val & DMAC_CHANCFG_EN_MASK)) + break; + + udelay(1); + } while (limit--); +} + +static int bcm6348_iudma_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct bcm6348_iudma *iudma; + unsigned i; + int num_resets; + int ret; + + iudma = devm_kzalloc(dev, sizeof(*iudma), GFP_KERNEL); + if (!iudma) + return -ENOMEM; + + if (of_property_read_u32(node, "dma-channels", &iudma->dma_channels)) + return -ENODEV; + + iudma->dma_base = devm_platform_ioremap_resource_byname(pdev, "dma"); + if (IS_ERR_OR_NULL(iudma->dma_base)) + return PTR_ERR(iudma->dma_base); + + iudma->dma_chan = devm_platform_ioremap_resource_byname(pdev, + "dma-channels"); + if (IS_ERR_OR_NULL(iudma->dma_chan)) + return PTR_ERR(iudma->dma_chan); + + iudma->dma_sram = devm_platform_ioremap_resource_byname(pdev, + "dma-sram"); + if (IS_ERR_OR_NULL(iudma->dma_sram)) + return PTR_ERR(iudma->dma_sram); + + iudma->num_clocks = of_clk_get_parent_count(node); + if (iudma->num_clocks) { + iudma->clock = devm_kcalloc(dev, iudma->num_clocks, + sizeof(struct clk *), GFP_KERNEL); + if (IS_ERR_OR_NULL(iudma->clock)) + return PTR_ERR(iudma->clock); + } + for (i = 0; i < iudma->num_clocks; i++) { + iudma->clock[i] = of_clk_get(node, i); + if (IS_ERR_OR_NULL(iudma->clock[i])) { + dev_err(dev, "error getting iudma clock %d\n", i); + return PTR_ERR(iudma->clock[i]); + } + + ret = clk_prepare_enable(iudma->clock[i]); + if (ret) { + dev_err(dev, "error enabling iudma clock %d\n", i); + return ret; + } + } + + num_resets = of_count_phandle_with_args(node, "resets", + "#reset-cells"); + if (num_resets > 0) + iudma->num_resets = num_resets; + else + iudma->num_resets = 0; + if (iudma->num_resets) { + iudma->reset = devm_kcalloc(dev, iudma->num_resets, + sizeof(struct reset_control *), + GFP_KERNEL); + if (IS_ERR_OR_NULL(iudma->reset)) + return PTR_ERR(iudma->reset); + } + for (i = 0; i < iudma->num_resets; i++) { + iudma->reset[i] = devm_reset_control_get_by_index(dev, i); + if (IS_ERR_OR_NULL(iudma->reset[i])) { + dev_err(dev, "error getting iudma reset %d\n", i); + return PTR_ERR(iudma->reset[i]); + } + + ret = reset_control_reset(iudma->reset[i]); + if (ret) { + dev_err(dev, "error performing iudma reset %d\n", i); + return ret; + } + } + + dma_writel(iudma, 0, DMA_CFG_REG); + for (i = 0; i < iudma->dma_channels; i++) + bcm6348_iudma_chan_stop(iudma, i); + dma_writel(iudma, DMA_CFG_EN_MASK, DMA_CFG_REG); + + spin_lock_init(&iudma->dma_base_lock); + + dev_info(dev, "bcm6348-iudma @ 0x%px\n", iudma->dma_base); + + platform_set_drvdata(pdev, iudma); + + return bcm6348_iudma_drivers_register(pdev); +} + +static const struct of_device_id bcm6348_iudma_of_match[] = { + { .compatible = "brcm,bcm6338-iudma", }, + { .compatible = "brcm,bcm6348-iudma", }, + { .compatible = "brcm,bcm6358-iudma", }, + { /* sentinel */ }, +}; + +static struct platform_driver bcm6348_iudma_driver = { + .driver = { + .name = "bcm6348-iudma", + .of_match_table = of_match_ptr(bcm6348_iudma_of_match), + }, + .probe = bcm6348_iudma_probe, +}; +builtin_platform_driver(bcm6348_iudma_driver); + +/* + * BCM6348 Eternet MACs + */ + +/* MTU */ +#define ENET_MAX_MTU 2046 + +#define ENET_TAG_SIZE 6 +#define ENET_MTU_OVERHEAD (VLAN_ETH_HLEN + VLAN_HLEN + \ + ENET_TAG_SIZE) + +/* Default number of descriptor */ +#define ENET_DEF_RX_DESC 64 +#define ENET_DEF_TX_DESC 32 +#define ENET_DEF_CPY_BREAK 128 + +/* Maximum burst len for dma (4 bytes unit) */ +#define ENET_DMA_MAXBURST 8 + +/* Receiver Configuration register */ +#define ENET_RXCFG_REG 0x0 +#define ENET_RXCFG_ALLMCAST_SHIFT 1 +#define ENET_RXCFG_ALLMCAST_MASK (1 << ENET_RXCFG_ALLMCAST_SHIFT) +#define ENET_RXCFG_PROMISC_SHIFT 3 +#define ENET_RXCFG_PROMISC_MASK (1 << ENET_RXCFG_PROMISC_SHIFT) +#define ENET_RXCFG_LOOPBACK_SHIFT 4 +#define ENET_RXCFG_LOOPBACK_MASK (1 << ENET_RXCFG_LOOPBACK_SHIFT) +#define ENET_RXCFG_ENFLOW_SHIFT 5 +#define ENET_RXCFG_ENFLOW_MASK (1 << ENET_RXCFG_ENFLOW_SHIFT) + +/* Receive Maximum Length register */ +#define ENET_RXMAXLEN_REG 0x4 +#define ENET_RXMAXLEN_SHIFT 0 +#define ENET_RXMAXLEN_MASK (0x7ff << ENET_RXMAXLEN_SHIFT) + +/* Transmit Maximum Length register */ +#define ENET_TXMAXLEN_REG 0x8 +#define ENET_TXMAXLEN_SHIFT 0 +#define ENET_TXMAXLEN_MASK (0x7ff << ENET_TXMAXLEN_SHIFT) + +/* MII Status/Control register */ +#define ENET_MIISC_REG 0x10 +#define ENET_MIISC_MDCFREQDIV_SHIFT 0 +#define ENET_MIISC_MDCFREQDIV_MASK (0x7f << ENET_MIISC_MDCFREQDIV_SHIFT) +#define ENET_MIISC_PREAMBLEEN_SHIFT 7 +#define ENET_MIISC_PREAMBLEEN_MASK (1 << ENET_MIISC_PREAMBLEEN_SHIFT) + +/* MII Data register */ +#define ENET_MIID_REG 0x14 +#define ENET_MIID_DATA_SHIFT 0 +#define ENET_MIID_DATA_MASK (0xffff << ENET_MIID_DATA_SHIFT) +#define ENET_MIID_TA_SHIFT 16 +#define ENET_MIID_TA_MASK (0x3 << ENET_MIID_TA_SHIFT) +#define ENET_MIID_REG_SHIFT 18 +#define ENET_MIID_REG_MASK (0x1f << ENET_MIID_REG_SHIFT) +#define ENET_MIID_PHY_SHIFT 23 +#define ENET_MIID_PHY_MASK (0x1f << ENET_MIID_PHY_SHIFT) +#define ENET_MIID_OP_SHIFT 28 +#define ENET_MIID_OP_WRITE (0x5 << ENET_MIID_OP_SHIFT) +#define ENET_MIID_OP_READ (0x6 << ENET_MIID_OP_SHIFT) + +/* Ethernet Interrupt Mask register */ +#define ENET_IRMASK_REG 0x18 + +/* Ethernet Interrupt register */ +#define ENET_IR_REG 0x1c +#define ENET_IR_MII BIT(0) +#define ENET_IR_MIB BIT(1) +#define ENET_IR_FLOWC BIT(2) + +/* Ethernet Control register */ +#define ENET_CTL_REG 0x2c +#define ENET_CTL_ENABLE_SHIFT 0 +#define ENET_CTL_ENABLE_MASK (1 << ENET_CTL_ENABLE_SHIFT) +#define ENET_CTL_DISABLE_SHIFT 1 +#define ENET_CTL_DISABLE_MASK (1 << ENET_CTL_DISABLE_SHIFT) +#define ENET_CTL_SRESET_SHIFT 2 +#define ENET_CTL_SRESET_MASK (1 << ENET_CTL_SRESET_SHIFT) +#define ENET_CTL_EPHYSEL_SHIFT 3 +#define ENET_CTL_EPHYSEL_MASK (1 << ENET_CTL_EPHYSEL_SHIFT) + +/* Transmit Control register */ +#define ENET_TXCTL_REG 0x30 +#define ENET_TXCTL_FD_SHIFT 0 +#define ENET_TXCTL_FD_MASK (1 << ENET_TXCTL_FD_SHIFT) + +/* Transmit Watermask register */ +#define ENET_TXWMARK_REG 0x34 +#define ENET_TXWMARK_WM_SHIFT 0 +#define ENET_TXWMARK_WM_MASK (0x3f << ENET_TXWMARK_WM_SHIFT) + +/* MIB Control register */ +#define ENET_MIBCTL_REG 0x38 +#define ENET_MIBCTL_RDCLEAR_SHIFT 0 +#define ENET_MIBCTL_RDCLEAR_MASK (1 << ENET_MIBCTL_RDCLEAR_SHIFT) + +/* Perfect Match Data Low register */ +#define ENET_PML_REG(x) (0x58 + (x) * 8) +#define ENET_PMH_REG(x) (0x5c + (x) * 8) +#define ENET_PMH_DATAVALID_SHIFT 16 +#define ENET_PMH_DATAVALID_MASK (1 << ENET_PMH_DATAVALID_SHIFT) + +/* MIB register */ +#define ENET_MIB_REG(x) (0x200 + (x) * 4) +#define ENET_MIB_REG_COUNT 55 + +/* + * TX transmit threshold (4 bytes unit), FIFO is 256 bytes, the value + * must be low enough so that a DMA transfer of above burst length can + * not overflow the fifo + */ +#define ENET_TX_FIFO_TRESH 32 + +struct bcm6348_emac { + struct bcm6348_iudma *iudma; + void __iomem *base; + + struct clk **clock; + unsigned int num_clocks; + + struct reset_control **reset; + unsigned int num_resets; + + int copybreak; + + int irq_rx; + int irq_tx; + + /* hw view of rx & tx dma ring */ + dma_addr_t rx_desc_dma; + dma_addr_t tx_desc_dma; + + /* allocated size (in bytes) for rx & tx dma ring */ + unsigned int rx_desc_alloc_size; + unsigned int tx_desc_alloc_size; + + struct napi_struct napi; + + /* dma channel id for rx */ + int rx_chan; + + /* number of dma desc in rx ring */ + int rx_ring_size; + + /* cpu view of rx dma ring */ + struct bcm6348_iudma_desc *rx_desc_cpu; + + /* current number of armed descriptor given to hardware for rx */ + int rx_desc_count; + + /* next rx descriptor to fetch from hardware */ + int rx_curr_desc; + + /* next dirty rx descriptor to refill */ + int rx_dirty_desc; + + /* size of allocated rx skbs */ + unsigned int rx_skb_size; + + /* list of skb given to hw for rx */ + struct sk_buff **rx_skb; + + /* used when rx skb allocation failed, so we defer rx queue + * refill */ + struct timer_list rx_timeout; + + /* lock rx_timeout against rx normal operation */ + spinlock_t rx_lock; + + /* dma channel id for tx */ + int tx_chan; + + /* number of dma desc in tx ring */ + int tx_ring_size; + + /* cpu view of tx dma ring */ + struct bcm6348_iudma_desc *tx_desc_cpu; + + /* number of available descriptor for tx */ + int tx_desc_count; + + /* next tx descriptor avaiable */ + int tx_curr_desc; + + /* next dirty tx descriptor to reclaim */ + int tx_dirty_desc; + + /* list of skb given to hw for tx */ + struct sk_buff **tx_skb; + + /* lock used by tx reclaim and xmit */ + spinlock_t tx_lock; + + /* network device reference */ + struct net_device *net_dev; + + /* platform device reference */ + struct platform_device *pdev; + + /* external mii bus */ + bool ext_mii; + + /* phy */ + int old_link; + int old_duplex; + int old_pause; +}; + +static inline void emac_writel(struct bcm6348_emac *emac, u32 val, u32 off) +{ + __raw_writel(val, emac->base + off); +} + +static inline u32 emac_readl(struct bcm6348_emac *emac, u32 off) +{ + return __raw_readl(emac->base + off); +} + +/* + * refill rx queue + */ +static int bcm6348_emac_refill_rx(struct net_device *ndev) +{ + struct bcm6348_emac *emac = netdev_priv(ndev); + struct bcm6348_iudma *iudma = emac->iudma; + struct platform_device *pdev = emac->pdev; + struct device *dev = &pdev->dev; + + while (emac->rx_desc_count < emac->rx_ring_size) { + struct bcm6348_iudma_desc *desc; + struct sk_buff *skb; + dma_addr_t p; + int desc_idx; + u32 len_stat; + + desc_idx = emac->rx_dirty_desc; + desc = &emac->rx_desc_cpu[desc_idx]; + + if (!emac->rx_skb[desc_idx]) { + skb = netdev_alloc_skb(ndev, emac->rx_skb_size); + if (!skb) + break; + emac->rx_skb[desc_idx] = skb; + p = dma_map_single(dev, skb->data, emac->rx_skb_size, + DMA_FROM_DEVICE); + desc->address = p; + } + + len_stat = emac->rx_skb_size << DMADESC_LENGTH_SHIFT; + len_stat |= DMADESC_OWNER_MASK; + if (emac->rx_dirty_desc == emac->rx_ring_size - 1) { + len_stat |= DMADESC_WRAP_MASK; + emac->rx_dirty_desc = 0; + } else { + emac->rx_dirty_desc++; + } + wmb(); + desc->len_stat = len_stat; + + emac->rx_desc_count++; + + /* tell dma engine we allocated one buffer */ + dma_writel(iudma, 1, DMA_BUFALLOC_REG(emac->rx_chan)); + } + + /* If rx ring is still empty, set a timer to try allocating + * again at a later time. */ + if (emac->rx_desc_count == 0 && netif_running(ndev)) { + dev_warn(dev, "unable to refill rx ring\n"); + emac->rx_timeout.expires = jiffies + HZ; + add_timer(&emac->rx_timeout); + } + + return 0; +} + +/* + * timer callback to defer refill rx queue in case we're OOM + */ +static void bcm6348_emac_refill_rx_timer(struct timer_list *t) +{ + struct bcm6348_emac *emac = from_timer(emac, t, rx_timeout); + struct net_device *ndev = emac->net_dev; + + spin_lock(&emac->rx_lock); + bcm6348_emac_refill_rx(ndev); + spin_unlock(&emac->rx_lock); +} + +/* + * extract packet from rx queue + */ +static int bcm6348_emac_receive_queue(struct net_device *ndev, int budget) +{ + struct bcm6348_emac *emac = netdev_priv(ndev); + struct bcm6348_iudma *iudma = emac->iudma; + struct platform_device *pdev = emac->pdev; + struct device *dev = &pdev->dev; + int processed = 0; + + /* don't scan ring further than number of refilled + * descriptor */ + if (budget > emac->rx_desc_count) + budget = emac->rx_desc_count; + + do { + struct bcm6348_iudma_desc *desc; + struct sk_buff *skb; + int desc_idx; + u32 len_stat; + unsigned int len; + + desc_idx = emac->rx_curr_desc; + desc = &emac->rx_desc_cpu[desc_idx]; + + /* make sure we actually read the descriptor status at + * each loop */ + rmb(); + + len_stat = desc->len_stat; + + /* break if dma ownership belongs to hw */ + if (len_stat & DMADESC_OWNER_MASK) + break; + + processed++; + emac->rx_curr_desc++; + if (emac->rx_curr_desc == emac->rx_ring_size) + emac->rx_curr_desc = 0; + emac->rx_desc_count--; + + /* if the packet does not have start of packet _and_ + * end of packet flag set, then just recycle it */ + if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { + ndev->stats.rx_dropped++; + continue; + } + + /* valid packet */ + skb = emac->rx_skb[desc_idx]; + len = (len_stat & DMADESC_LENGTH_MASK) + >> DMADESC_LENGTH_SHIFT; + /* don't include FCS */ + len -= 4; + + if (len < emac->copybreak) { + struct sk_buff *nskb; + + nskb = napi_alloc_skb(&emac->napi, len); + if (!nskb) { + /* forget packet, just rearm desc */ + ndev->stats.rx_dropped++; + continue; + } + + dma_sync_single_for_cpu(dev, desc->address, + len, DMA_FROM_DEVICE); + memcpy(nskb->data, skb->data, len); + dma_sync_single_for_device(dev, desc->address, + len, DMA_FROM_DEVICE); + skb = nskb; + } else { + dma_unmap_single(dev, desc->address, + emac->rx_skb_size, DMA_FROM_DEVICE); + emac->rx_skb[desc_idx] = NULL; + } + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, ndev); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; + netif_receive_skb(skb); + } while (--budget > 0); + + if (processed || !emac->rx_desc_count) { + bcm6348_emac_refill_rx(ndev); + + /* kick rx dma */ + dmac_writel(iudma, DMAC_CHANCFG_EN_MASK, DMAC_CHANCFG_REG, + emac->rx_chan); + } + + return processed; +} + +/* + * try to or force reclaim of transmitted buffers + */ +static int bcm6348_emac_tx_reclaim(struct net_device *ndev, int force) +{ + struct bcm6348_emac *emac = netdev_priv(ndev); + struct platform_device *pdev = emac->pdev; + struct device *dev = &pdev->dev; + int released = 0; + + while (emac->tx_desc_count < emac->tx_ring_size) { + struct bcm6348_iudma_desc *desc; + struct sk_buff *skb; + + /* We run in a bh and fight against start_xmit, which + * is called with bh disabled */ + spin_lock(&emac->tx_lock); + + desc = &emac->tx_desc_cpu[emac->tx_dirty_desc]; + + if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { + spin_unlock(&emac->tx_lock); + break; + } + + /* ensure other field of the descriptor were not read + * before we checked ownership */ + rmb(); + + skb = emac->tx_skb[emac->tx_dirty_desc]; + emac->tx_skb[emac->tx_dirty_desc] = NULL; + dma_unmap_single(dev, desc->address, skb->len, DMA_TO_DEVICE); + + emac->tx_dirty_desc++; + if (emac->tx_dirty_desc == emac->tx_ring_size) + emac->tx_dirty_desc = 0; + emac->tx_desc_count++; + + spin_unlock(&emac->tx_lock); + + if (desc->len_stat & DMADESC_UNDER_MASK) + ndev->stats.tx_errors++; + + dev_kfree_skb(skb); + released++; + } + + if (netif_queue_stopped(ndev) && released) + netif_wake_queue(ndev); + + return released; +} + +static int bcm6348_emac_poll(struct napi_struct *napi, int budget) +{ + struct bcm6348_emac *emac = container_of(napi, struct bcm6348_emac, + napi); + struct bcm6348_iudma *iudma = emac->iudma; + struct net_device *ndev = emac->net_dev; + int rx_work_done; + + /* ack interrupts */ + dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IR_REG, + emac->rx_chan); + dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IR_REG, + emac->tx_chan); + + /* reclaim sent skb */ + bcm6348_emac_tx_reclaim(ndev, 0); + + spin_lock(&emac->rx_lock); + rx_work_done = bcm6348_emac_receive_queue(ndev, budget); + spin_unlock(&emac->rx_lock); + + if (rx_work_done >= budget) { + /* rx queue is not yet empty/clean */ + return rx_work_done; + } + + /* no more packet in rx/tx queue, remove device from poll + * queue */ + napi_complete_done(napi, rx_work_done); + + /* restore rx/tx interrupt */ + dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IRMASK_REG, + emac->rx_chan); + dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, DMAC_IRMASK_REG, + emac->tx_chan); + + return rx_work_done; +} + +/* + * emac interrupt handler + */ +static irqreturn_t bcm6348_emac_isr_mac(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct bcm6348_emac *emac = netdev_priv(ndev); + u32 stat; + + stat = emac_readl(emac, ENET_IR_REG); + if (!(stat & ENET_IR_MIB)) + return IRQ_NONE; + + /* clear & mask interrupt */ + emac_writel(emac, ENET_IR_MIB, ENET_IR_REG); + emac_writel(emac, 0, ENET_IRMASK_REG); + + return IRQ_HANDLED; +} + +/* + * rx/tx dma interrupt handler + */ +static irqreturn_t bcm6348_emac_isr_dma(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct bcm6348_emac *emac = netdev_priv(ndev); + struct bcm6348_iudma *iudma = emac->iudma; + + /* mask rx/tx interrupts */ + dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->rx_chan); + dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->tx_chan); + + napi_schedule(&emac->napi); + + return IRQ_HANDLED; +} + +/* + * tx request callback + */ +static netdev_tx_t bcm6348_emac_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct bcm6348_emac *emac = netdev_priv(ndev); + struct bcm6348_iudma *iudma = emac->iudma; + struct platform_device *pdev = emac->pdev; + struct device *dev = &pdev->dev; + struct bcm6348_iudma_desc *desc; + u32 len_stat; + netdev_tx_t ret; + + /* lock against tx reclaim */ + spin_lock(&emac->tx_lock); + + /* make sure the tx hw queue is not full, should not happen + * since we stop queue before it's the case */ + if (unlikely(!emac->tx_desc_count)) { + netif_stop_queue(ndev); + dev_err(dev, "xmit called with no tx desc available?\n"); + ret = NETDEV_TX_BUSY; + goto out_unlock; + } + + /* point to the next available desc */ + desc = &emac->tx_desc_cpu[emac->tx_curr_desc]; + emac->tx_skb[emac->tx_curr_desc] = skb; + + /* fill descriptor */ + desc->address = dma_map_single(dev, skb->data, skb->len, + DMA_TO_DEVICE); + + len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; + len_stat |= DMADESC_ESOP_MASK | DMADESC_APPEND_CRC | + DMADESC_OWNER_MASK; + + emac->tx_curr_desc++; + if (emac->tx_curr_desc == emac->tx_ring_size) { + emac->tx_curr_desc = 0; + len_stat |= DMADESC_WRAP_MASK; + } + emac->tx_desc_count--; + + /* dma might be already polling, make sure we update desc + * fields in correct order */ + wmb(); + desc->len_stat = len_stat; + wmb(); + + /* kick tx dma */ + dmac_writel(iudma, DMAC_CHANCFG_EN_MASK, DMAC_CHANCFG_REG, + emac->tx_chan); + + /* stop queue if no more desc available */ + if (!emac->tx_desc_count) + netif_stop_queue(ndev); + + ndev->stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; + ret = NETDEV_TX_OK; + +out_unlock: + spin_unlock(&emac->tx_lock); + return ret; +} + +/* + * Change the interface's emac address. + */ +static int bcm6348_emac_set_mac_address(struct net_device *ndev, void *p) +{ + struct bcm6348_emac *emac = netdev_priv(ndev); + struct sockaddr *addr = p; + u32 val; + + eth_hw_addr_set(ndev, addr->sa_data); + + /* use perfect match register 0 to store my emac address */ + val = (ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]; + emac_writel(emac, val, ENET_PML_REG(0)); + + val = (ndev->dev_addr[0] << 8 | ndev->dev_addr[1]); + val |= ENET_PMH_DATAVALID_MASK; + emac_writel(emac, val, ENET_PMH_REG(0)); + + return 0; +} + +/* + * Change rx mode (promiscuous/allmulti) and update multicast list + */ +static void bcm6348_emac_set_multicast_list(struct net_device *ndev) +{ + struct bcm6348_emac *emac = netdev_priv(ndev); + struct netdev_hw_addr *ha; + u32 val; + unsigned int i; + + val = emac_readl(emac, ENET_RXCFG_REG); + + if (ndev->flags & IFF_PROMISC) + val |= ENET_RXCFG_PROMISC_MASK; + else + val &= ~ENET_RXCFG_PROMISC_MASK; + + /* only 3 perfect match registers left, first one is used for + * own mac address */ + if ((ndev->flags & IFF_ALLMULTI) || netdev_mc_count(ndev) > 3) + val |= ENET_RXCFG_ALLMCAST_MASK; + else + val &= ~ENET_RXCFG_ALLMCAST_MASK; + + /* no need to set perfect match registers if we catch all + * multicast */ + if (val & ENET_RXCFG_ALLMCAST_MASK) { + emac_writel(emac, val, ENET_RXCFG_REG); + return; + } + + i = 0; + netdev_for_each_mc_addr(ha, ndev) { + u8 *dmi_addr; + u32 tmp; + + if (i == 3) + break; + + /* update perfect match registers */ + dmi_addr = ha->addr; + tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | + (dmi_addr[4] << 8) | dmi_addr[5]; + emac_writel(emac, tmp, ENET_PML_REG(i + 1)); + + tmp = (dmi_addr[0] << 8 | dmi_addr[1]); + tmp |= ENET_PMH_DATAVALID_MASK; + emac_writel(emac, tmp, ENET_PMH_REG(i++ + 1)); + } + + for (; i < 3; i++) { + emac_writel(emac, 0, ENET_PML_REG(i + 1)); + emac_writel(emac, 0, ENET_PMH_REG(i + 1)); + } + + emac_writel(emac, val, ENET_RXCFG_REG); +} + +/* + * disable emac + */ +static void bcm6348_emac_disable_mac(struct bcm6348_emac *emac) +{ + int limit; + u32 val; + + val = emac_readl(emac, ENET_CTL_REG); + val |= ENET_CTL_DISABLE_MASK; + emac_writel(emac, val, ENET_CTL_REG); + + limit = 1000; + do { + val = emac_readl(emac, ENET_CTL_REG); + if (!(val & ENET_CTL_DISABLE_MASK)) + break; + udelay(1); + } while (limit--); +} + +/* + * set emac duplex parameters + */ +static void bcm6348_emac_set_duplex(struct bcm6348_emac *emac, int fullduplex) +{ + u32 val; + + val = emac_readl(emac, ENET_TXCTL_REG); + if (fullduplex) + val |= ENET_TXCTL_FD_MASK; + else + val &= ~ENET_TXCTL_FD_MASK; + emac_writel(emac, val, ENET_TXCTL_REG); +} + +/* + * set emac flow control parameters + */ +static void bcm6348_emac_set_flow(struct bcm6348_emac *emac, bool rx_en, bool tx_en) +{ + struct bcm6348_iudma *iudma = emac->iudma; + u32 val; + + val = emac_readl(emac, ENET_RXCFG_REG); + if (rx_en) + val |= ENET_RXCFG_ENFLOW_MASK; + else + val &= ~ENET_RXCFG_ENFLOW_MASK; + emac_writel(emac, val, ENET_RXCFG_REG); + + dmas_writel(iudma, emac->rx_desc_dma, DMAS_RSTART_REG, emac->rx_chan); + dmas_writel(iudma, emac->tx_desc_dma, DMAS_RSTART_REG, emac->tx_chan); + + val = dma_readl(iudma, DMA_CFG_REG); + if (tx_en) + val |= DMA_CFG_FLOWCH_MASK(emac->rx_chan); + else + val &= ~DMA_CFG_FLOWCH_MASK(emac->rx_chan); + dma_writel(iudma, val, DMA_CFG_REG); +} + +/* + * adjust emac phy + */ +static void bcm6348_emac_adjust_phy(struct net_device *ndev) +{ + struct phy_device *phydev = ndev->phydev; + struct bcm6348_emac *emac = netdev_priv(ndev); + struct platform_device *pdev = emac->pdev; + struct device *dev = &pdev->dev; + bool status_changed = false; + + if (emac->old_link != phydev->link) { + status_changed = true; + emac->old_link = phydev->link; + } + + if (phydev->link && phydev->duplex != emac->old_duplex) { + bcm6348_emac_set_duplex(emac, phydev->duplex == DUPLEX_FULL); + status_changed = true; + emac->old_duplex = phydev->duplex; + } + + if (phydev->link && phydev->pause != emac->old_pause) { + bool rx_pause_en, tx_pause_en; + + if (phydev->pause) { + rx_pause_en = true; + tx_pause_en = true; + } else { + rx_pause_en = false; + tx_pause_en = false; + } + + bcm6348_emac_set_flow(emac, rx_pause_en, tx_pause_en); + status_changed = true; + emac->old_pause = phydev->pause; + } + + if (status_changed) + dev_info(dev, "%s: phy link %s %s/%s/%s/%s\n", + ndev->name, + phydev->link ? "UP" : "DOWN", + phy_modes(phydev->interface), + phy_speed_to_str(phydev->speed), + phy_duplex_to_str(phydev->duplex), + phydev->pause ? "rx/tx" : "off"); +} + + +static int bcm6348_emac_open(struct net_device *ndev) +{ + struct bcm6348_emac *emac = netdev_priv(ndev); + struct bcm6348_iudma *iudma = emac->iudma; + struct platform_device *pdev = emac->pdev; + struct device *dev = &pdev->dev; + struct sockaddr addr; + unsigned int i, size; + int ret; + void *p; + u32 val; + + /* mask all interrupts and request them */ + emac_writel(emac, 0, ENET_IRMASK_REG); + dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->rx_chan); + dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->tx_chan); + + ret = request_irq(ndev->irq, bcm6348_emac_isr_mac, 0, ndev->name, + ndev); + if (ret) + return ret; + + ret = request_irq(emac->irq_rx, bcm6348_emac_isr_dma, + 0, ndev->name, ndev); + if (ret) + goto out_freeirq; + + ret = request_irq(emac->irq_tx, bcm6348_emac_isr_dma, + 0, ndev->name, ndev); + if (ret) + goto out_freeirq_rx; + + /* initialize perfect match registers */ + for (i = 0; i < 4; i++) { + emac_writel(emac, 0, ENET_PML_REG(i)); + emac_writel(emac, 0, ENET_PMH_REG(i)); + } + + /* write device mac address */ + memcpy(addr.sa_data, ndev->dev_addr, ETH_ALEN); + bcm6348_emac_set_mac_address(ndev, &addr); + + /* allocate rx dma ring */ + size = emac->rx_ring_size * sizeof(struct bcm6348_iudma_desc); + p = dma_alloc_coherent(dev, size, &emac->rx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(dev, "cannot allocate rx ring %u\n", size); + ret = -ENOMEM; + goto out_freeirq_tx; + } + + memset(p, 0, size); + emac->rx_desc_alloc_size = size; + emac->rx_desc_cpu = p; + + /* allocate tx dma ring */ + size = emac->tx_ring_size * sizeof(struct bcm6348_iudma_desc); + p = dma_alloc_coherent(dev, size, &emac->tx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(dev, "cannot allocate tx ring\n"); + ret = -ENOMEM; + goto out_free_rx_ring; + } + + memset(p, 0, size); + emac->tx_desc_alloc_size = size; + emac->tx_desc_cpu = p; + + emac->tx_skb = kzalloc(sizeof(struct sk_buff *) * emac->tx_ring_size, + GFP_KERNEL); + if (!emac->tx_skb) { + dev_err(dev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_ring; + } + + emac->tx_desc_count = emac->tx_ring_size; + emac->tx_dirty_desc = 0; + emac->tx_curr_desc = 0; + spin_lock_init(&emac->tx_lock); + + /* init & fill rx ring with skbs */ + emac->rx_skb = kzalloc(sizeof(struct sk_buff *) * emac->rx_ring_size, + GFP_KERNEL); + if (!emac->rx_skb) { + dev_err(dev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_skb; + } + + emac->rx_desc_count = 0; + emac->rx_dirty_desc = 0; + emac->rx_curr_desc = 0; + + /* initialize flow control buffer allocation */ + dma_writel(iudma, DMA_BUFALLOC_FORCE_MASK | 0, + DMA_BUFALLOC_REG(emac->rx_chan)); + + if (bcm6348_emac_refill_rx(ndev)) { + dev_err(dev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + /* write rx & tx ring addresses */ + dmas_writel(iudma, emac->rx_desc_dma, + DMAS_RSTART_REG, emac->rx_chan); + dmas_writel(iudma, emac->tx_desc_dma, + DMAS_RSTART_REG, emac->tx_chan); + + /* clear remaining state ram for rx & tx channel */ + dmas_writel(iudma, 0, DMAS_SRAM2_REG, emac->rx_chan); + dmas_writel(iudma, 0, DMAS_SRAM2_REG, emac->tx_chan); + dmas_writel(iudma, 0, DMAS_SRAM3_REG, emac->rx_chan); + dmas_writel(iudma, 0, DMAS_SRAM3_REG, emac->tx_chan); + dmas_writel(iudma, 0, DMAS_SRAM4_REG, emac->rx_chan); + dmas_writel(iudma, 0, DMAS_SRAM4_REG, emac->tx_chan); + + /* set max rx/tx length */ + emac_writel(emac, ndev->mtu, ENET_RXMAXLEN_REG); + emac_writel(emac, ndev->mtu, ENET_TXMAXLEN_REG); + + /* set dma maximum burst len */ + dmac_writel(iudma, ENET_DMA_MAXBURST, + DMAC_MAXBURST_REG, emac->rx_chan); + dmac_writel(iudma, ENET_DMA_MAXBURST, + DMAC_MAXBURST_REG, emac->tx_chan); + + /* set correct transmit fifo watermark */ + emac_writel(emac, ENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); + + /* set flow control low/high threshold to 1/3 / 2/3 */ + val = emac->rx_ring_size / 3; + dma_writel(iudma, val, DMA_FLOWCL_REG(emac->rx_chan)); + val = (emac->rx_ring_size * 2) / 3; + dma_writel(iudma, val, DMA_FLOWCH_REG(emac->rx_chan)); + + /* all set, enable emac and interrupts, start dma engine and + * kick rx dma channel + */ + wmb(); + val = emac_readl(emac, ENET_CTL_REG); + val |= ENET_CTL_ENABLE_MASK; + emac_writel(emac, val, ENET_CTL_REG); + dmac_writel(iudma, DMAC_CHANCFG_EN_MASK, + DMAC_CHANCFG_REG, emac->rx_chan); + + /* watch "mib counters about to overflow" interrupt */ + emac_writel(emac, ENET_IR_MIB, ENET_IR_REG); + emac_writel(emac, ENET_IR_MIB, ENET_IRMASK_REG); + + /* watch "packet transferred" interrupt in rx and tx */ + dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, + DMAC_IR_REG, emac->rx_chan); + dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, + DMAC_IR_REG, emac->tx_chan); + + /* make sure we enable napi before rx interrupt */ + napi_enable(&emac->napi); + + dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, + DMAC_IRMASK_REG, emac->rx_chan); + dmac_writel(iudma, DMAC_IR_PKTDONE_MASK, + DMAC_IRMASK_REG, emac->tx_chan); + + if (ndev->phydev) + phy_start(ndev->phydev); + + netif_carrier_on(ndev); + netif_start_queue(ndev); + + return 0; + +out: + for (i = 0; i < emac->rx_ring_size; i++) { + struct bcm6348_iudma_desc *desc; + + if (!emac->rx_skb[i]) + continue; + + desc = &emac->rx_desc_cpu[i]; + dma_unmap_single(dev, desc->address, emac->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(emac->rx_skb[i]); + } + kfree(emac->rx_skb); + +out_free_tx_skb: + kfree(emac->tx_skb); + +out_free_tx_ring: + dma_free_coherent(dev, emac->tx_desc_alloc_size, + emac->tx_desc_cpu, emac->tx_desc_dma); + +out_free_rx_ring: + dma_free_coherent(dev, emac->rx_desc_alloc_size, + emac->rx_desc_cpu, emac->rx_desc_dma); + +out_freeirq_tx: + if (emac->irq_tx != -1) + free_irq(emac->irq_tx, ndev); + +out_freeirq_rx: + free_irq(emac->irq_rx, ndev); + +out_freeirq: + if (ndev->phydev) + phy_disconnect(ndev->phydev); + + return ret; +} + +static int bcm6348_emac_stop(struct net_device *ndev) +{ + struct bcm6348_emac *emac = netdev_priv(ndev); + struct bcm6348_iudma *iudma = emac->iudma; + struct device *dev = &emac->pdev->dev; + unsigned int i; + + netif_stop_queue(ndev); + napi_disable(&emac->napi); + if (ndev->phydev) + phy_stop(ndev->phydev); + del_timer_sync(&emac->rx_timeout); + + /* mask all interrupts */ + emac_writel(emac, 0, ENET_IRMASK_REG); + dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->rx_chan); + dmac_writel(iudma, 0, DMAC_IRMASK_REG, emac->tx_chan); + + /* disable dma & emac */ + bcm6348_iudma_chan_stop(iudma, emac->tx_chan); + bcm6348_iudma_chan_stop(iudma, emac->rx_chan); + bcm6348_emac_disable_mac(emac); + + /* force reclaim of all tx buffers */ + bcm6348_emac_tx_reclaim(ndev, 1); + + /* free the rx skb ring */ + for (i = 0; i < emac->rx_ring_size; i++) { + struct bcm6348_iudma_desc *desc; + + if (!emac->rx_skb[i]) + continue; + + desc = &emac->rx_desc_cpu[i]; + dma_unmap_single_attrs(dev, desc->address, emac->rx_skb_size, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + kfree_skb(emac->rx_skb[i]); + } + + /* free remaining allocated memory */ + kfree(emac->rx_skb); + kfree(emac->tx_skb); + dma_free_coherent(dev, emac->rx_desc_alloc_size, emac->rx_desc_cpu, + emac->rx_desc_dma); + dma_free_coherent(dev, emac->tx_desc_alloc_size, emac->tx_desc_cpu, + emac->tx_desc_dma); + free_irq(emac->irq_tx, ndev); + free_irq(emac->irq_rx, ndev); + free_irq(ndev->irq, ndev); + + netdev_reset_queue(ndev); + + return 0; +} + +static const struct net_device_ops bcm6348_emac_ops = { + .ndo_open = bcm6348_emac_open, + .ndo_stop = bcm6348_emac_stop, + .ndo_start_xmit = bcm6348_emac_start_xmit, + .ndo_set_mac_address = bcm6348_emac_set_mac_address, + .ndo_set_rx_mode = bcm6348_emac_set_multicast_list, +}; + +static int bcm6348_emac_mdio_op(struct bcm6348_emac *emac, uint32_t data) +{ + int limit; + + /* Make sure mii interrupt status is cleared */ + emac_writel(emac, ENET_IR_MII, ENET_IR_REG); + + /* Issue mii op */ + emac_writel(emac, data, ENET_MIID_REG); + wmb(); + + /* busy wait on mii interrupt bit, with timeout */ + limit = 1000; + do { + if (emac_readl(emac, ENET_IR_REG) & ENET_IR_MII) + break; + udelay(1); + } while (limit-- > 0); + + return (limit < 0) ? 1 : 0; +} + +static int bcm6348_emac_mdio_read(struct mii_bus *bus, int phy_id, int loc) +{ + struct bcm6348_emac *emac = bus->priv; + struct platform_device *pdev = emac->pdev; + struct device *dev = &pdev->dev; + uint32_t reg; + + reg = 0x2 << ENET_MIID_TA_SHIFT; + reg |= loc << ENET_MIID_REG_SHIFT; + reg |= phy_id << ENET_MIID_PHY_SHIFT; + reg |= ENET_MIID_OP_READ; + + if (bcm6348_emac_mdio_op(emac, reg)) { + dev_err(dev, "mdio_read: phy=%d loc=%x timeout!\n", + phy_id, loc); + return -EINVAL; + } + + reg = emac_readl(emac, ENET_MIID_REG); + reg = (reg >> ENET_MIID_DATA_SHIFT) & ENET_MIID_DATA_MASK; + + return (int) reg; +} + +static int bcm6348_emac_mdio_write(struct mii_bus *bus, int phy_id, + int loc, uint16_t val) +{ + struct bcm6348_emac *emac = bus->priv; + struct platform_device *pdev = emac->pdev; + struct device *dev = &pdev->dev; + uint32_t reg; + + reg = (val << ENET_MIID_DATA_SHIFT) & ENET_MIID_DATA_MASK; + reg |= 0x2 << ENET_MIID_TA_SHIFT; + reg |= loc << ENET_MIID_REG_SHIFT; + reg |= phy_id << ENET_MIID_PHY_SHIFT; + reg |= ENET_MIID_OP_WRITE; + + if (bcm6348_emac_mdio_op(emac, reg)) { + dev_err(dev, "mdio_write: phy=%d loc=%x timeout!\n", + phy_id, loc); + return -EINVAL; + } + + bcm6348_emac_mdio_op(emac, reg); + + return 0; +} + +static int bcm6348_emac_mdio_init(struct bcm6348_emac *emac, + struct device_node *np) +{ + struct platform_device *pdev = emac->pdev; + struct device *dev = &pdev->dev; + struct device_node *mnp; + struct mii_bus *mii_bus; + int ret; + + mnp = of_get_child_by_name(np, "mdio"); + if (!mnp) + return -ENODEV; + + mii_bus = devm_mdiobus_alloc(dev); + if (!mii_bus) { + of_node_put(mnp); + return -ENOMEM; + } + + mii_bus->priv = emac; + mii_bus->name = np->full_name; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev)); + mii_bus->parent = dev; + mii_bus->read = bcm6348_emac_mdio_read; + mii_bus->write = bcm6348_emac_mdio_write; + mii_bus->phy_mask = 0x3f; + + ret = devm_of_mdiobus_register(dev, mii_bus, mnp); + of_node_put(mnp); + if (ret) { + dev_err(dev, "MDIO bus registration failed\n"); + return ret; + } + + dev_info(dev, "MDIO bus init\n"); + + return 0; +} + +/* + * preinit hardware to allow mii operation while device is down + */ +static void bcm6348_emac_hw_preinit(struct bcm6348_emac *emac) +{ + u32 val; + int limit; + + /* make sure emac is disabled */ + bcm6348_emac_disable_mac(emac); + + /* soft reset emac */ + val = ENET_CTL_SRESET_MASK; + emac_writel(emac, val, ENET_CTL_REG); + wmb(); + + limit = 1000; + do { + val = emac_readl(emac, ENET_CTL_REG); + if (!(val & ENET_CTL_SRESET_MASK)) + break; + udelay(1); + } while (limit--); + + /* select correct mii interface */ + val = emac_readl(emac, ENET_CTL_REG); + if (emac->ext_mii) + val |= ENET_CTL_EPHYSEL_MASK; + else + val &= ~ENET_CTL_EPHYSEL_MASK; + emac_writel(emac, val, ENET_CTL_REG); + + /* turn on mdc clock */ + emac_writel(emac, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | + ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); + + /* set mib counters to self-clear when read */ + val = emac_readl(emac, ENET_MIBCTL_REG); + val |= ENET_MIBCTL_RDCLEAR_MASK; + emac_writel(emac, val, ENET_MIBCTL_REG); +} + +static int bcm6348_emac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *dma_node; + struct platform_device *dma_pdev; + struct bcm6348_emac *emac; + struct bcm6348_iudma *iudma; + struct net_device *ndev; + unsigned i; + int num_resets; + int ret; + + dma_node = of_parse_phandle(node, "brcm,iudma", 0); + if (!dma_node) + return -EINVAL; + + dma_pdev = of_find_device_by_node(dma_node); + of_node_put(dma_node); + if (!dma_pdev) + return -EINVAL; + + iudma = platform_get_drvdata(dma_pdev); + if (!iudma) + return -EPROBE_DEFER; + + ndev = devm_alloc_etherdev(dev, sizeof(*emac)); + if (!ndev) + return -ENOMEM; + + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + + emac = netdev_priv(ndev); + emac->iudma = iudma; + emac->pdev = pdev; + emac->net_dev = ndev; + + emac->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR_OR_NULL(emac->base)) + return PTR_ERR(emac->base); + + ndev->irq = of_irq_get_byname(node, "emac"); + if (!ndev->irq) + return -ENODEV; + + emac->irq_rx = of_irq_get_byname(node, "rx"); + if (!emac->irq_rx) + return -ENODEV; + + emac->irq_tx = of_irq_get_byname(node, "tx"); + if (!emac->irq_tx) + return -ENODEV; + + if (of_property_read_u32(node, "dma-rx", &emac->rx_chan)) + return -ENODEV; + + if (of_property_read_u32(node, "dma-tx", &emac->tx_chan)) + return -ENODEV; + + emac->ext_mii = of_property_read_bool(node, "brcm,external-mii"); + + emac->rx_ring_size = ENET_DEF_RX_DESC; + emac->tx_ring_size = ENET_DEF_TX_DESC; + emac->copybreak = ENET_DEF_CPY_BREAK; + + emac->old_link = 0; + emac->old_duplex = -1; + emac->old_pause = -1; + + of_get_mac_address(node, ndev->dev_addr); + if (is_valid_ether_addr(ndev->dev_addr)) { + dev_info(dev, "mtd mac %pM\n", ndev->dev_addr); + } else { + random_ether_addr(ndev->dev_addr); + dev_info(dev, "random mac %pM\n", ndev->dev_addr); + } + + emac->rx_skb_size = ALIGN(ndev->mtu + ENET_MTU_OVERHEAD, + ENET_DMA_MAXBURST * 4); + + emac->num_clocks = of_clk_get_parent_count(node); + if (emac->num_clocks) { + emac->clock = devm_kcalloc(dev, emac->num_clocks, + sizeof(struct clk *), GFP_KERNEL); + if (IS_ERR_OR_NULL(emac->clock)) + return PTR_ERR(emac->clock); + } + for (i = 0; i < emac->num_clocks; i++) { + emac->clock[i] = of_clk_get(node, i); + if (IS_ERR_OR_NULL(emac->clock[i])) { + dev_err(dev, "error getting emac clock %d\n", i); + return PTR_ERR(emac->clock[i]); + } + + ret = clk_prepare_enable(emac->clock[i]); + if (ret) { + dev_err(dev, "error enabling emac clock %d\n", i); + return ret; + } + } + + num_resets = of_count_phandle_with_args(node, "resets", + "#reset-cells"); + if (num_resets > 0) + emac->num_resets = num_resets; + else + emac->num_resets = 0; + if (emac->num_resets) { + emac->reset = devm_kcalloc(dev, emac->num_resets, + sizeof(struct reset_control *), + GFP_KERNEL); + if (IS_ERR_OR_NULL(emac->reset)) + return PTR_ERR(emac->reset); + + } + for (i = 0; i < emac->num_resets; i++) { + emac->reset[i] = devm_reset_control_get_by_index(dev, i); + if (IS_ERR_OR_NULL(emac->reset[i])) { + dev_err(dev, "error getting emac reset %d\n", i); + return PTR_ERR(emac->reset[i]); + } + + ret = reset_control_reset(emac->reset[i]); + if (ret) { + dev_err(dev, "error performing emac reset %d\n", i); + return ret; + } + } + + /* do minimal hardware init to be able to probe mii bus */ + bcm6348_emac_hw_preinit(emac); + + ret = bcm6348_emac_mdio_init(emac, node); + if (ret) + return ret; + + spin_lock_init(&emac->rx_lock); + + timer_setup(&emac->rx_timeout, bcm6348_emac_refill_rx_timer, 0); + + /* zero mib counters */ + for (i = 0; i < ENET_MIB_REG_COUNT; i++) + emac_writel(emac, 0, ENET_MIB_REG(i)); + + /* register netdevice */ + ndev->netdev_ops = &bcm6348_emac_ops; + ndev->min_mtu = ETH_ZLEN - ETH_HLEN; + ndev->mtu = ETH_DATA_LEN - VLAN_ETH_HLEN; + ndev->max_mtu = ENET_MAX_MTU - VLAN_ETH_HLEN; + netif_napi_add(ndev, &emac->napi, bcm6348_emac_poll, 16); + SET_NETDEV_DEV(ndev, dev); + + ret = devm_register_netdev(dev, ndev); + if (ret) + goto out_disable_clk; + + netif_carrier_off(ndev); + + ndev->phydev = of_phy_get_and_connect(ndev, node, + bcm6348_emac_adjust_phy); + if (IS_ERR_OR_NULL(ndev->phydev)) + dev_warn(dev, "PHY not found!\n"); + + dev_info(dev, "%s at 0x%px, IRQ %d\n", ndev->name, emac->base, + ndev->irq); + + return 0; + +out_disable_clk: + for (i = 0; i < emac->num_resets; i++) + reset_control_assert(emac->reset[i]); + + for (i = 0; i < emac->num_clocks; i++) + clk_disable_unprepare(emac->clock[i]); + + return ret; +} + +static int bcm6348_emac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct bcm6348_emac *emac = netdev_priv(ndev); + unsigned int i; + + emac_writel(emac, 0, ENET_MIISC_REG); + + for (i = 0; i < emac->num_resets; i++) + reset_control_assert(emac->reset[i]); + + for (i = 0; i < emac->num_clocks; i++) + clk_disable_unprepare(emac->clock[i]); + + return 0; +} + +static const struct of_device_id bcm6348_emac_of_match[] = { + { .compatible = "brcm,bcm6338-emac", }, + { .compatible = "brcm,bcm6348-emac", }, + { .compatible = "brcm,bcm6358-emac", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcm6348_emac_of_match); + +static struct platform_driver bcm6348_emac_driver = { + .driver = { + .name = "bcm6348-emac", + .of_match_table = of_match_ptr(bcm6348_emac_of_match), + }, + .probe = bcm6348_emac_probe, + .remove = bcm6348_emac_remove, +}; + +int bcm6348_iudma_drivers_register(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + ret = platform_driver_register(&bcm6348_emac_driver); + if (ret) + dev_err(dev, "error registering emac driver!\n"); + + return ret; +} diff --git a/target/linux/bmips/files/drivers/net/ethernet/broadcom/bcm6368-enetsw.c b/target/linux/bmips/files/drivers/net/ethernet/broadcom/bcm6368-enetsw.c index f43e484a9b..124baee9b4 100644 --- a/target/linux/bmips/files/drivers/net/ethernet/broadcom/bcm6368-enetsw.c +++ b/target/linux/bmips/files/drivers/net/ethernet/broadcom/bcm6368-enetsw.c @@ -194,9 +194,6 @@ struct bcm6368_enetsw { /* number of dma desc in tx ring */ int tx_ring_size; - /* maximum dma burst size */ - int dma_maxburst; - /* cpu view of rx dma ring */ struct bcm6368_enetsw_desc *tx_desc_cpu; @@ -220,15 +217,6 @@ struct bcm6368_enetsw { /* platform device reference */ struct platform_device *pdev; - - /* dma channel enable mask */ - u32 dma_chan_en_mask; - - /* dma channel interrupt mask */ - u32 dma_chan_int_mask; - - /* dma channel width */ - unsigned int dma_chan_width; }; static inline void dma_writel(struct bcm6368_enetsw *priv, u32 val, u32 off) @@ -238,27 +226,29 @@ static inline void dma_writel(struct bcm6368_enetsw *priv, u32 val, u32 off) static inline u32 dma_readl(struct bcm6368_enetsw *priv, u32 off, int chan) { - return __raw_readl(priv->dma_chan + off + chan * priv->dma_chan_width); + return __raw_readl(priv->dma_chan + off + chan * DMA_CHAN_WIDTH); } -static inline void dmac_writel(struct bcm6368_enetsw *priv, u32 val, - u32 off, int chan) +static inline void dmac_writel(struct bcm6368_enetsw *priv, u32 val, u32 off, + int chan) { - __raw_writel(val, priv->dma_chan + off + chan * priv->dma_chan_width); + __raw_writel(val, priv->dma_chan + off + chan * DMA_CHAN_WIDTH); } static inline void dmas_writel(struct bcm6368_enetsw *priv, u32 val, u32 off, int chan) { - __raw_writel(val, priv->dma_sram + off + chan * priv->dma_chan_width); + __raw_writel(val, priv->dma_sram + off + chan * DMA_CHAN_WIDTH); } /* * refill rx queue */ -static int bcm6368_enetsw_refill_rx(struct net_device *dev, bool napi_mode) +static int bcm6368_enetsw_refill_rx(struct net_device *ndev, bool napi_mode) { - struct bcm6368_enetsw *priv = netdev_priv(dev); + struct bcm6368_enetsw *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; while (priv->rx_desc_count < priv->rx_ring_size) { struct bcm6368_enetsw_desc *desc; @@ -280,9 +270,9 @@ static int bcm6368_enetsw_refill_rx(struct net_device *dev, bool napi_mode) if (unlikely(!buf)) break; - p = dma_map_single(&priv->pdev->dev, buf + NET_SKB_PAD, + p = dma_map_single(dev, buf + NET_SKB_PAD, priv->rx_buf_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&priv->pdev->dev, p))) { + if (unlikely(dma_mapping_error(dev, p))) { skb_free_frag(buf); break; } @@ -310,8 +300,8 @@ static int bcm6368_enetsw_refill_rx(struct net_device *dev, bool napi_mode) /* If rx ring is still empty, set a timer to try allocating * again at a later time. */ - if (priv->rx_desc_count == 0 && netif_running(dev)) { - dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); + if (priv->rx_desc_count == 0 && netif_running(ndev)) { + dev_warn(dev, "unable to refill rx ring\n"); priv->rx_timeout.expires = jiffies + HZ; add_timer(&priv->rx_timeout); } @@ -325,20 +315,21 @@ static int bcm6368_enetsw_refill_rx(struct net_device *dev, bool napi_mode) static void bcm6368_enetsw_refill_rx_timer(struct timer_list *t) { struct bcm6368_enetsw *priv = from_timer(priv, t, rx_timeout); - struct net_device *dev = priv->net_dev; + struct net_device *ndev = priv->net_dev; spin_lock(&priv->rx_lock); - bcm6368_enetsw_refill_rx(dev, false); + bcm6368_enetsw_refill_rx(ndev, false); spin_unlock(&priv->rx_lock); } /* * extract packet from rx queue */ -static int bcm6368_enetsw_receive_queue(struct net_device *dev, int budget) +static int bcm6368_enetsw_receive_queue(struct net_device *ndev, int budget) { - struct bcm6368_enetsw *priv = netdev_priv(dev); - struct device *kdev = &priv->pdev->dev; + struct bcm6368_enetsw *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; struct list_head rx_list; struct sk_buff *skb; int processed = 0; @@ -379,7 +370,7 @@ static int bcm6368_enetsw_receive_queue(struct net_device *dev, int budget) /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { - dev->stats.rx_dropped++; + ndev->stats.rx_dropped++; continue; } @@ -396,19 +387,19 @@ static int bcm6368_enetsw_receive_queue(struct net_device *dev, int budget) if (unlikely(!nbuf)) { /* forget packet, just rearm desc */ - dev->stats.rx_dropped++; + ndev->stats.rx_dropped++; continue; } - dma_sync_single_for_cpu(kdev, desc->address, + dma_sync_single_for_cpu(dev, desc->address, len, DMA_FROM_DEVICE); memcpy(nbuf + NET_SKB_PAD, buf + NET_SKB_PAD, len); - dma_sync_single_for_device(kdev, desc->address, + dma_sync_single_for_device(dev, desc->address, len, DMA_FROM_DEVICE); buf = nbuf; frag_size = nfrag_size; } else { - dma_unmap_single(kdev, desc->address, + dma_unmap_single(dev, desc->address, priv->rx_buf_size, DMA_FROM_DEVICE); priv->rx_buf[desc_idx] = NULL; frag_size = priv->rx_frag_size; @@ -417,27 +408,27 @@ static int bcm6368_enetsw_receive_queue(struct net_device *dev, int budget) skb = napi_build_skb(buf, frag_size); if (unlikely(!skb)) { skb_free_frag(buf); - dev->stats.rx_dropped++; + ndev->stats.rx_dropped++; continue; } skb_reserve(skb, NET_SKB_PAD); skb_put(skb, len); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; list_add_tail(&skb->list, &rx_list); } while (processed < budget); list_for_each_entry(skb, &rx_list, list) - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb_list(&rx_list); priv->rx_desc_count -= processed; if (processed || !priv->rx_desc_count) { - bcm6368_enetsw_refill_rx(dev, true); + bcm6368_enetsw_refill_rx(ndev, true); /* kick rx dma */ - dmac_writel(priv, priv->dma_chan_en_mask, + dmac_writel(priv, DMAC_CHANCFG_EN_MASK, DMAC_CHANCFG_REG, priv->rx_chan); } @@ -447,10 +438,12 @@ static int bcm6368_enetsw_receive_queue(struct net_device *dev, int budget) /* * try to or force reclaim of transmitted buffers */ -static int bcm6368_enetsw_tx_reclaim(struct net_device *dev, int force, +static int bcm6368_enetsw_tx_reclaim(struct net_device *ndev, int force, int budget) { - struct bcm6368_enetsw *priv = netdev_priv(dev); + struct bcm6368_enetsw *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; unsigned int bytes = 0; int released = 0; @@ -475,7 +468,7 @@ static int bcm6368_enetsw_tx_reclaim(struct net_device *dev, int force, skb = priv->tx_skb[priv->tx_dirty_desc]; priv->tx_skb[priv->tx_dirty_desc] = NULL; - dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, + dma_unmap_single(dev, desc->address, skb->len, DMA_TO_DEVICE); priv->tx_dirty_desc++; @@ -486,17 +479,17 @@ static int bcm6368_enetsw_tx_reclaim(struct net_device *dev, int force, spin_unlock(&priv->tx_lock); if (desc->len_stat & DMADESC_UNDER_MASK) - dev->stats.tx_errors++; + ndev->stats.tx_errors++; bytes += skb->len; napi_consume_skb(skb, budget); released++; } - netdev_completed_queue(dev, released, bytes); + netdev_completed_queue(ndev, released, bytes); - if (netif_queue_stopped(dev) && released) - netif_wake_queue(dev); + if (netif_queue_stopped(ndev) && released) + netif_wake_queue(ndev); return released; } @@ -507,20 +500,20 @@ static int bcm6368_enetsw_tx_reclaim(struct net_device *dev, int force, static int bcm6368_enetsw_poll(struct napi_struct *napi, int budget) { struct bcm6368_enetsw *priv = container_of(napi, struct bcm6368_enetsw, napi); - struct net_device *dev = priv->net_dev; + struct net_device *ndev = priv->net_dev; int rx_work_done; /* ack interrupts */ - dmac_writel(priv, priv->dma_chan_int_mask, + dmac_writel(priv, DMAC_IR_PKTDONE_MASK, DMAC_IR_REG, priv->rx_chan); - dmac_writel(priv, priv->dma_chan_int_mask, + dmac_writel(priv, DMAC_IR_PKTDONE_MASK, DMAC_IR_REG, priv->tx_chan); /* reclaim sent skb */ - bcm6368_enetsw_tx_reclaim(dev, 0, budget); + bcm6368_enetsw_tx_reclaim(ndev, 0, budget); spin_lock(&priv->rx_lock); - rx_work_done = bcm6368_enetsw_receive_queue(dev, budget); + rx_work_done = bcm6368_enetsw_receive_queue(ndev, budget); spin_unlock(&priv->rx_lock); if (rx_work_done >= budget) { @@ -533,10 +526,10 @@ static int bcm6368_enetsw_poll(struct napi_struct *napi, int budget) napi_complete_done(napi, rx_work_done); /* restore rx/tx interrupt */ - dmac_writel(priv, priv->dma_chan_int_mask, - DMAC_IRMASK_REG, priv->rx_chan); - dmac_writel(priv, priv->dma_chan_int_mask, - DMAC_IRMASK_REG, priv->tx_chan); + dmac_writel(priv, DMAC_IR_PKTDONE_MASK, + DMAC_IRMASK_REG, priv->rx_chan); + dmac_writel(priv, DMAC_IR_PKTDONE_MASK, + DMAC_IRMASK_REG, priv->tx_chan); return rx_work_done; } @@ -546,8 +539,8 @@ static int bcm6368_enetsw_poll(struct napi_struct *napi, int budget) */ static irqreturn_t bcm6368_enetsw_isr_dma(int irq, void *dev_id) { - struct net_device *dev = dev_id; - struct bcm6368_enetsw *priv = netdev_priv(dev); + struct net_device *ndev = dev_id; + struct bcm6368_enetsw *priv = netdev_priv(ndev); /* mask rx/tx interrupts */ dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->rx_chan); @@ -562,9 +555,11 @@ static irqreturn_t bcm6368_enetsw_isr_dma(int irq, void *dev_id) * tx request callback */ static netdev_tx_t -bcm6368_enetsw_start_xmit(struct sk_buff *skb, struct net_device *dev) +bcm6368_enetsw_start_xmit(struct sk_buff *skb, struct net_device *ndev) { - struct bcm6368_enetsw *priv = netdev_priv(dev); + struct bcm6368_enetsw *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; struct bcm6368_enetsw_desc *desc; u32 len_stat; netdev_tx_t ret; @@ -576,9 +571,8 @@ bcm6368_enetsw_start_xmit(struct sk_buff *skb, struct net_device *dev) /* make sure the tx hw queue is not full, should not happen * since we stop queue before it's the case */ if (unlikely(!priv->tx_desc_count)) { - netif_stop_queue(dev); - dev_err(&priv->pdev->dev, "xmit called with no tx desc " - "available?\n"); + netif_stop_queue(ndev); + dev_err(dev, "xmit called with no tx desc available?\n"); ret = NETDEV_TX_BUSY; goto out_unlock; } @@ -604,9 +598,8 @@ bcm6368_enetsw_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* fill descriptor */ - p = dma_map_single(&priv->pdev->dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&priv->pdev->dev, p))) { + p = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, p))) { dev_kfree_skb(skb); ret = NETDEV_TX_OK; goto out_unlock; @@ -634,18 +627,18 @@ bcm6368_enetsw_start_xmit(struct sk_buff *skb, struct net_device *dev) desc->len_stat = len_stat; wmb(); - netdev_sent_queue(dev, skb->len); + netdev_sent_queue(ndev, skb->len); /* kick tx dma */ - dmac_writel(priv, priv->dma_chan_en_mask, DMAC_CHANCFG_REG, + dmac_writel(priv, DMAC_CHANCFG_EN_MASK, DMAC_CHANCFG_REG, priv->tx_chan); /* stop queue if no more desc available */ if (!priv->tx_desc_count) - netif_stop_queue(dev); + netif_stop_queue(ndev); - dev->stats.tx_bytes += skb->len; - dev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; ret = NETDEV_TX_OK; out_unlock: @@ -673,10 +666,11 @@ static void bcm6368_enetsw_disable_dma(struct bcm6368_enetsw *priv, int chan) } while (limit--); } -static int bcm6368_enetsw_open(struct net_device *dev) +static int bcm6368_enetsw_open(struct net_device *ndev) { - struct bcm6368_enetsw *priv = netdev_priv(dev); - struct device *kdev = &priv->pdev->dev; + struct bcm6368_enetsw *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; int i, ret; unsigned int size; void *p; @@ -687,22 +681,22 @@ static int bcm6368_enetsw_open(struct net_device *dev) dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->tx_chan); ret = request_irq(priv->irq_rx, bcm6368_enetsw_isr_dma, - 0, dev->name, dev); + 0, ndev->name, ndev); if (ret) goto out_freeirq; if (priv->irq_tx != -1) { ret = request_irq(priv->irq_tx, bcm6368_enetsw_isr_dma, - 0, dev->name, dev); + 0, ndev->name, ndev); if (ret) goto out_freeirq_rx; } /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm6368_enetsw_desc); - p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + p = dma_alloc_coherent(dev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { - dev_err(kdev, "cannot allocate rx ring %u\n", size); + dev_err(dev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; goto out_freeirq_tx; } @@ -713,9 +707,9 @@ static int bcm6368_enetsw_open(struct net_device *dev) /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm6368_enetsw_desc); - p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + p = dma_alloc_coherent(dev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { - dev_err(kdev, "cannot allocate tx ring\n"); + dev_err(dev, "cannot allocate tx ring\n"); ret = -ENOMEM; goto out_free_rx_ring; } @@ -727,7 +721,7 @@ static int bcm6368_enetsw_open(struct net_device *dev) priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, GFP_KERNEL); if (!priv->tx_skb) { - dev_err(kdev, "cannot allocate tx skb queue\n"); + dev_err(dev, "cannot allocate tx skb queue\n"); ret = -ENOMEM; goto out_free_tx_ring; } @@ -741,7 +735,7 @@ static int bcm6368_enetsw_open(struct net_device *dev) priv->rx_buf = kzalloc(sizeof(unsigned char *) * priv->rx_ring_size, GFP_KERNEL); if (!priv->rx_buf) { - dev_err(kdev, "cannot allocate rx buffer queue\n"); + dev_err(dev, "cannot allocate rx buffer queue\n"); ret = -ENOMEM; goto out_free_tx_skb; } @@ -754,8 +748,8 @@ static int bcm6368_enetsw_open(struct net_device *dev) dma_writel(priv, DMA_BUFALLOC_FORCE_MASK | 0, DMA_BUFALLOC_REG(priv->rx_chan)); - if (bcm6368_enetsw_refill_rx(dev, false)) { - dev_err(kdev, "cannot allocate rx buffer queue\n"); + if (bcm6368_enetsw_refill_rx(ndev, false)) { + dev_err(dev, "cannot allocate rx buffer queue\n"); ret = -ENOMEM; goto out; } @@ -775,9 +769,9 @@ static int bcm6368_enetsw_open(struct net_device *dev) dmas_writel(priv, 0, DMAS_SRAM4_REG, priv->tx_chan); /* set dma maximum burst len */ - dmac_writel(priv, priv->dma_maxburst, + dmac_writel(priv, ENETSW_DMA_MAXBURST, DMAC_MAXBURST_REG, priv->rx_chan); - dmac_writel(priv, priv->dma_maxburst, + dmac_writel(priv, ENETSW_DMA_MAXBURST, DMAC_MAXBURST_REG, priv->tx_chan); /* set flow control low/high threshold to 1/3 / 2/3 */ @@ -808,8 +802,8 @@ static int bcm6368_enetsw_open(struct net_device *dev) dmac_writel(priv, DMAC_IR_PKTDONE_MASK, DMAC_IRMASK_REG, priv->tx_chan); - netif_carrier_on(dev); - netif_start_queue(dev); + netif_carrier_on(ndev); + netif_start_queue(ndev); return 0; @@ -821,7 +815,7 @@ out: continue; desc = &priv->rx_desc_cpu[i]; - dma_unmap_single(kdev, desc->address, priv->rx_buf_size, + dma_unmap_single(dev, desc->address, priv->rx_buf_size, DMA_FROM_DEVICE); skb_free_frag(priv->rx_buf[i]); } @@ -831,31 +825,32 @@ out_free_tx_skb: kfree(priv->tx_skb); out_free_tx_ring: - dma_free_coherent(kdev, priv->tx_desc_alloc_size, + dma_free_coherent(dev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); out_free_rx_ring: - dma_free_coherent(kdev, priv->rx_desc_alloc_size, + dma_free_coherent(dev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); out_freeirq_tx: if (priv->irq_tx != -1) - free_irq(priv->irq_tx, dev); + free_irq(priv->irq_tx, ndev); out_freeirq_rx: - free_irq(priv->irq_rx, dev); + free_irq(priv->irq_rx, ndev); out_freeirq: return ret; } -static int bcm6368_enetsw_stop(struct net_device *dev) +static int bcm6368_enetsw_stop(struct net_device *ndev) { - struct bcm6368_enetsw *priv = netdev_priv(dev); - struct device *kdev = &priv->pdev->dev; + struct bcm6368_enetsw *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; int i; - netif_stop_queue(dev); + netif_stop_queue(ndev); napi_disable(&priv->napi); del_timer_sync(&priv->rx_timeout); @@ -868,7 +863,7 @@ static int bcm6368_enetsw_stop(struct net_device *dev) bcm6368_enetsw_disable_dma(priv, priv->rx_chan); /* force reclaim of all tx buffers */ - bcm6368_enetsw_tx_reclaim(dev, 1, 0); + bcm6368_enetsw_tx_reclaim(ndev, 1, 0); /* free the rx buffer ring */ for (i = 0; i < priv->rx_ring_size; i++) { @@ -878,7 +873,7 @@ static int bcm6368_enetsw_stop(struct net_device *dev) continue; desc = &priv->rx_desc_cpu[i]; - dma_unmap_single_attrs(kdev, desc->address, priv->rx_buf_size, + dma_unmap_single_attrs(dev, desc->address, priv->rx_buf_size, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); skb_free_frag(priv->rx_buf[i]); @@ -887,15 +882,15 @@ static int bcm6368_enetsw_stop(struct net_device *dev) /* free remaining allocated memory */ kfree(priv->rx_buf); kfree(priv->tx_skb); - dma_free_coherent(kdev, priv->rx_desc_alloc_size, + dma_free_coherent(dev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); - dma_free_coherent(kdev, priv->tx_desc_alloc_size, + dma_free_coherent(dev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); if (priv->irq_tx != -1) - free_irq(priv->irq_tx, dev); - free_irq(priv->irq_rx, dev); + free_irq(priv->irq_tx, ndev); + free_irq(priv->irq_rx, ndev); - netdev_reset_queue(dev); + netdev_reset_queue(ndev); return 0; } @@ -908,19 +903,25 @@ static const struct net_device_ops bcm6368_enetsw_ops = { static int bcm6368_enetsw_probe(struct platform_device *pdev) { - struct bcm6368_enetsw *priv; struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; + struct bcm6368_enetsw *priv; struct net_device *ndev; struct resource *res; unsigned i; + int num_resets; int ret; - ndev = alloc_etherdev(sizeof(*priv)); + ndev = devm_alloc_etherdev(dev, sizeof(*priv)); if (!ndev) return -ENOMEM; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + priv = netdev_priv(ndev); + priv->pdev = pdev; + priv->net_dev = ndev; priv->num_pms = of_count_phandle_with_args(node, "power-domains", "#power-domain-cells"); @@ -960,18 +961,18 @@ static int bcm6368_enetsw_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma"); priv->dma_base = devm_ioremap_resource(dev, res); - if (IS_ERR(priv->dma_base)) + if (IS_ERR_OR_NULL(priv->dma_base)) return PTR_ERR(priv->dma_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma-channels"); priv->dma_chan = devm_ioremap_resource(dev, res); - if (IS_ERR(priv->dma_chan)) + if (IS_ERR_OR_NULL(priv->dma_chan)) return PTR_ERR(priv->dma_chan); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma-sram"); priv->dma_sram = devm_ioremap_resource(dev, res); - if (IS_ERR(priv->dma_sram)) + if (IS_ERR_OR_NULL(priv->dma_sram)) return PTR_ERR(priv->dma_sram); priv->irq_rx = platform_get_irq_byname(pdev, "rx"); @@ -992,15 +993,8 @@ static int bcm6368_enetsw_probe(struct platform_device *pdev) priv->rx_ring_size = ENETSW_DEF_RX_DESC; priv->tx_ring_size = ENETSW_DEF_TX_DESC; - - priv->dma_maxburst = ENETSW_DMA_MAXBURST; - priv->copybreak = ENETSW_DEF_CPY_BREAK; - priv->dma_chan_en_mask = DMAC_CHANCFG_EN_MASK; - priv->dma_chan_int_mask = DMAC_IR_PKTDONE_MASK; - priv->dma_chan_width = DMA_CHAN_WIDTH; - of_get_mac_address(node, ndev->dev_addr); if (is_valid_ether_addr(ndev->dev_addr)) { dev_info(dev, "mtd mac %pM\n", ndev->dev_addr); @@ -1010,7 +1004,7 @@ static int bcm6368_enetsw_probe(struct platform_device *pdev) } priv->rx_buf_size = ALIGN(ndev->mtu + ENETSW_MTU_OVERHEAD, - priv->dma_maxburst * 4); + ENETSW_DMA_MAXBURST * 4); priv->rx_frag_size = ENETSW_FRAG_SIZE(priv->rx_buf_size); @@ -1018,14 +1012,14 @@ static int bcm6368_enetsw_probe(struct platform_device *pdev) if (priv->num_clocks) { priv->clock = devm_kcalloc(dev, priv->num_clocks, sizeof(struct clk *), GFP_KERNEL); - if (!priv->clock) - return -ENOMEM; + if (IS_ERR_OR_NULL(priv->clock)) + return PTR_ERR(priv->clock); } for (i = 0; i < priv->num_clocks; i++) { priv->clock[i] = of_clk_get(node, i); if (IS_ERR(priv->clock[i])) { dev_err(dev, "error getting clock %d\n", i); - return -EINVAL; + return PTR_ERR(priv->clock[i]); } ret = clk_prepare_enable(priv->clock[i]); @@ -1035,20 +1029,24 @@ static int bcm6368_enetsw_probe(struct platform_device *pdev) } } - priv->num_resets = of_count_phandle_with_args(node, "resets", - "#reset-cells"); + num_resets = of_count_phandle_with_args(node, "resets", + "#reset-cells"); + if (num_resets > 0) + priv->num_resets = num_resets; + else + priv->num_resets = 0; if (priv->num_resets) { priv->reset = devm_kcalloc(dev, priv->num_resets, sizeof(struct reset_control *), GFP_KERNEL); - if (!priv->reset) - return -ENOMEM; + if (IS_ERR_OR_NULL(priv->reset)) + return PTR_ERR(priv->reset); } for (i = 0; i < priv->num_resets; i++) { priv->reset[i] = devm_reset_control_get_by_index(dev, i); if (IS_ERR(priv->reset[i])) { dev_err(dev, "error getting reset %d\n", i); - return -EINVAL; + return PTR_ERR(priv->reset[i]); } ret = reset_control_reset(priv->reset[i]); @@ -1068,16 +1066,16 @@ static int bcm6368_enetsw_probe(struct platform_device *pdev) ndev->mtu = ETH_DATA_LEN + ENETSW_TAG_SIZE; ndev->max_mtu = ETH_DATA_LEN + ENETSW_TAG_SIZE; netif_napi_add(ndev, &priv->napi, bcm6368_enetsw_poll, 16); - SET_NETDEV_DEV(ndev, dev); - ret = register_netdev(ndev); - if (ret) + ret = devm_register_netdev(dev, ndev); + if (ret) { + netif_napi_del(&priv->napi); goto out_disable_clk; + } netif_carrier_off(ndev); - platform_set_drvdata(pdev, ndev); - priv->pdev = pdev; - priv->net_dev = ndev; + + dev_info(dev, "%s at 0x%px, IRQ %d\n", ndev->name, priv->dma_base, ndev->irq); return 0; @@ -1098,8 +1096,6 @@ static int bcm6368_enetsw_remove(struct platform_device *pdev) struct bcm6368_enetsw *priv = netdev_priv(ndev); unsigned int i; - unregister_netdev(ndev); - pm_runtime_put_sync(dev); for (i = 0; priv->pm && i < priv->num_pms; i++) { dev_pm_domain_detach(priv->pm[i], true); @@ -1112,8 +1108,6 @@ static int bcm6368_enetsw_remove(struct platform_device *pdev) for (i = 0; i < priv->num_clocks; i++) clk_disable_unprepare(priv->clock[i]); - free_netdev(ndev); - return 0; } diff --git a/target/linux/bmips/generic/base-files/etc/board.d/02_network b/target/linux/bmips/generic/base-files/etc/board.d/02_network index 238dd27939..a5995849f0 100644 --- a/target/linux/bmips/generic/base-files/etc/board.d/02_network +++ b/target/linux/bmips/generic/base-files/etc/board.d/02_network @@ -11,6 +11,9 @@ comtrend,vr-3025u) ucidef_set_bridge_device switch ucidef_set_interface_lan "lan1 lan2 lan3 lan4" ;; +huawei,hg556a-b) + ucidef_set_interface_lan "eth0" + ;; esac board_config_flush diff --git a/target/linux/bmips/nand/base-files/etc/board.d/03_gpio_switches b/target/linux/bmips/nand/base-files/etc/board.d/03_gpio_switches new file mode 100644 index 0000000000..75ac111b0c --- /dev/null +++ b/target/linux/bmips/nand/base-files/etc/board.d/03_gpio_switches @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +. /lib/functions/uci-defaults.sh + +board_config_update + +case "$(board_name)" in +sercomm,h500-s-lowi |\ +sercomm,h500-s-vfes) + ucidef_add_gpio_switch "qtn_power" "Quantenna Module Power" "480" "1" + ;; +esac + +board_config_flush + +exit 0 diff --git a/target/linux/bmips/patches-5.15/501-net-broadcom-add-BCM6348-enet-controller-driver.patch b/target/linux/bmips/patches-5.15/501-net-broadcom-add-BCM6348-enet-controller-driver.patch new file mode 100644 index 0000000000..7d278b9a33 --- /dev/null +++ b/target/linux/bmips/patches-5.15/501-net-broadcom-add-BCM6348-enet-controller-driver.patch @@ -0,0 +1,44 @@ +From 590b60fb08cb1e70fe02d3f407c6b3dbe9ad06ff Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= +Date: Mon, 1 Mar 2021 07:34:39 +0100 +Subject: [PATCH] net: broadcom: add BCM6348 enetsw controller driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This controller is present on BCM6338, BCM6348 and BCM6358 SoCs. + +Signed-off-by: Álvaro Fernández Rojas +--- + drivers/net/ethernet/broadcom/Kconfig | 8 + + drivers/net/ethernet/broadcom/Makefile | 1 + + 3 files changed, 1120 insertions(+) + create mode 100644 drivers/net/ethernet/broadcom/bcm6368-enetsw.c + +--- a/drivers/net/ethernet/broadcom/Kconfig ++++ b/drivers/net/ethernet/broadcom/Kconfig +@@ -68,6 +68,14 @@ config BCM63XX_ENET + This driver supports the ethernet MACs in the Broadcom 63xx + MIPS chipset family (BCM63XX). + ++config BCM6348_ENET ++ tristate "Broadcom BCM6348 internal mac support" ++ depends on BMIPS_GENERIC || COMPILE_TEST ++ default y ++ help ++ This driver supports Ethernet controller integrated into Broadcom ++ BCM6348 family SoCs. ++ + config BCM6368_ENETSW + tristate "Broadcom BCM6368 internal mac support" + depends on BMIPS_GENERIC || COMPILE_TEST +--- a/drivers/net/ethernet/broadcom/Makefile ++++ b/drivers/net/ethernet/broadcom/Makefile +@@ -6,6 +6,7 @@ + obj-$(CONFIG_B44) += b44.o + obj-$(CONFIG_BCM4908_ENET) += bcm4908_enet.o + obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o ++obj-$(CONFIG_BCM6348_ENET) += bcm6348-enet.o + obj-$(CONFIG_BCM6368_ENETSW) += bcm6368-enetsw.o + obj-$(CONFIG_BCMGENET) += genet/ + obj-$(CONFIG_BNX2) += bnx2.o diff --git a/target/linux/generic/backport-5.15/730-18-v6.3-net-ethernet-mtk_eth_soc-fix-tx-throughput-regressio.patch b/target/linux/generic/backport-5.15/730-18-v6.3-net-ethernet-mtk_eth_soc-fix-tx-throughput-regressio.patch new file mode 100644 index 0000000000..e633f6f1fb --- /dev/null +++ b/target/linux/generic/backport-5.15/730-18-v6.3-net-ethernet-mtk_eth_soc-fix-tx-throughput-regressio.patch @@ -0,0 +1,31 @@ +From: Felix Fietkau +Date: Fri, 24 Mar 2023 14:56:58 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: fix tx throughput regression with + direct 1G links + +Using the QDMA tx scheduler to throttle tx to line speed works fine for +switch ports, but apparently caused a regression on non-switch ports. + +Based on a number of tests, it seems that this throttling can be safely +dropped without re-introducing the issues on switch ports that the +tx scheduling changes resolved. + +Link: https://lore.kernel.org/netdev/trinity-92c3826f-c2c8-40af-8339-bc6d0d3ffea4-1678213958520@3c-app-gmx-bs16/ +Fixes: f63959c7eec3 ("net: ethernet: mtk_eth_soc: implement multi-queue support for per-port queues") +Reported-by: Frank Wunderlich +Reported-by: Daniel Golle +Tested-by: Daniel Golle +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -676,8 +676,6 @@ static void mtk_mac_link_up(struct phyli + break; + } + +- mtk_set_queue_speed(mac->hw, mac->id, speed); +- + /* Configure duplex */ + if (duplex == DUPLEX_FULL) + mcr |= MAC_MCR_FORCE_DPX; diff --git a/target/linux/generic/backport-5.15/733-v6.3-18-net-ethernet-mtk_eth_soc-add-support-for-MT7981.patch b/target/linux/generic/backport-5.15/733-v6.3-18-net-ethernet-mtk_eth_soc-add-support-for-MT7981.patch index bfa0df3053..066b70c5e2 100644 --- a/target/linux/generic/backport-5.15/733-v6.3-18-net-ethernet-mtk_eth_soc-add-support-for-MT7981.patch +++ b/target/linux/generic/backport-5.15/733-v6.3-18-net-ethernet-mtk_eth_soc-add-support-for-MT7981.patch @@ -34,7 +34,7 @@ mtk_eth_path_name(path), __func__, updated); --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4757,6 +4757,26 @@ static const struct mtk_soc_data mt7629_ +@@ -4755,6 +4755,26 @@ static const struct mtk_soc_data mt7629_ }, }; @@ -61,7 +61,7 @@ static const struct mtk_soc_data mt7986_data = { .reg_map = &mt7986_reg_map, .ana_rgc3 = 0x128, -@@ -4799,6 +4819,7 @@ const struct of_device_id of_mtk_match[] +@@ -4797,6 +4817,7 @@ const struct of_device_id of_mtk_match[] { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, diff --git a/target/linux/generic/backport-5.15/733-v6.3-19-net-ethernet-mtk_eth_soc-set-MDIO-bus-clock-frequenc.patch b/target/linux/generic/backport-5.15/733-v6.3-19-net-ethernet-mtk_eth_soc-set-MDIO-bus-clock-frequenc.patch index df7ee081f7..9def19d67e 100644 --- a/target/linux/generic/backport-5.15/733-v6.3-19-net-ethernet-mtk_eth_soc-set-MDIO-bus-clock-frequenc.patch +++ b/target/linux/generic/backport-5.15/733-v6.3-19-net-ethernet-mtk_eth_soc-set-MDIO-bus-clock-frequenc.patch @@ -21,7 +21,7 @@ Signed-off-by: Jakub Kicinski --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -704,8 +704,10 @@ static const struct phylink_mac_ops mtk_ +@@ -702,8 +702,10 @@ static const struct phylink_mac_ops mtk_ static int mtk_mdio_init(struct mtk_eth *eth) { @@ -32,7 +32,7 @@ Signed-off-by: Jakub Kicinski mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); if (!mii_np) { -@@ -731,6 +733,25 @@ static int mtk_mdio_init(struct mtk_eth +@@ -729,6 +731,25 @@ static int mtk_mdio_init(struct mtk_eth eth->mii_bus->parent = eth->dev; snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); diff --git a/target/linux/generic/backport-5.15/733-v6.3-20-net-ethernet-mtk_eth_soc-switch-to-external-PCS-driv.patch b/target/linux/generic/backport-5.15/733-v6.3-20-net-ethernet-mtk_eth_soc-switch-to-external-PCS-driv.patch index 1eca7dfeaf..203c5dc48b 100644 --- a/target/linux/generic/backport-5.15/733-v6.3-20-net-ethernet-mtk_eth_soc-switch-to-external-PCS-driv.patch +++ b/target/linux/generic/backport-5.15/733-v6.3-20-net-ethernet-mtk_eth_soc-switch-to-external-PCS-driv.patch @@ -60,7 +60,7 @@ Signed-off-by: Jakub Kicinski } return NULL; -@@ -3979,8 +3980,17 @@ static int mtk_unreg_dev(struct mtk_eth +@@ -3977,8 +3978,17 @@ static int mtk_unreg_dev(struct mtk_eth return 0; } @@ -78,7 +78,7 @@ Signed-off-by: Jakub Kicinski mtk_unreg_dev(eth); mtk_free_dev(eth); cancel_work_sync(ð->pending_work); -@@ -4410,6 +4420,36 @@ void mtk_eth_set_dma_device(struct mtk_e +@@ -4408,6 +4418,36 @@ void mtk_eth_set_dma_device(struct mtk_e rtnl_unlock(); } @@ -115,7 +115,7 @@ Signed-off-by: Jakub Kicinski static int mtk_probe(struct platform_device *pdev) { struct resource *res = NULL; -@@ -4473,13 +4513,7 @@ static int mtk_probe(struct platform_dev +@@ -4471,13 +4511,7 @@ static int mtk_probe(struct platform_dev } if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { @@ -130,7 +130,7 @@ Signed-off-by: Jakub Kicinski if (err) return err; -@@ -4490,14 +4524,17 @@ static int mtk_probe(struct platform_dev +@@ -4488,14 +4522,17 @@ static int mtk_probe(struct platform_dev "mediatek,pctl"); if (IS_ERR(eth->pctl)) { dev_err(&pdev->dev, "no pctl regmap found\n"); @@ -151,7 +151,7 @@ Signed-off-by: Jakub Kicinski } if (eth->soc->offload_version) { -@@ -4657,6 +4694,8 @@ err_deinit_hw: +@@ -4655,6 +4692,8 @@ err_deinit_hw: mtk_hw_deinit(eth); err_wed_exit: mtk_wed_exit(); diff --git a/target/linux/generic/config-filter b/target/linux/generic/config-filter index 16daecf321..201b68dddf 100644 --- a/target/linux/generic/config-filter +++ b/target/linux/generic/config-filter @@ -9,6 +9,7 @@ CONFIG_CLANG_VERSION=.* # CONFIG_INLINE_.* is not set # CONFIG_LD_.* is not set CONFIG_LLD_VERSION=.* +CONFIG_PAHOLE_VERSION=.* CONFIG_PLUGIN_HOSTCC=".*" # CONFIG_SET_FS is not set # CONFIG_TASKS_.* is not set diff --git a/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch index 5dcbc6c46a..a4e3d26503 100644 --- a/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch +++ b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch @@ -10,7 +10,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -2994,8 +2994,8 @@ static irqreturn_t mtk_handle_irq_rx(int +@@ -2992,8 +2992,8 @@ static irqreturn_t mtk_handle_irq_rx(int eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { @@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau } return IRQ_HANDLED; -@@ -3007,8 +3007,8 @@ static irqreturn_t mtk_handle_irq_tx(int +@@ -3005,8 +3005,8 @@ static irqreturn_t mtk_handle_irq_tx(int eth->tx_events++; if (likely(napi_schedule_prep(ð->tx_napi))) { @@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau } return IRQ_HANDLED; -@@ -4675,6 +4675,8 @@ static int mtk_probe(struct platform_dev +@@ -4673,6 +4673,8 @@ static int mtk_probe(struct platform_dev * for NAPI to work */ init_dummy_netdev(ð->dummy_dev); diff --git a/target/linux/generic/pending-5.15/731-net-ethernet-mediatek-ppe-add-support-for-flow-accou.patch b/target/linux/generic/pending-5.15/731-net-ethernet-mediatek-ppe-add-support-for-flow-accou.patch index c7e1d3f6f0..6ca33152fc 100644 --- a/target/linux/generic/pending-5.15/731-net-ethernet-mediatek-ppe-add-support-for-flow-accou.patch +++ b/target/linux/generic/pending-5.15/731-net-ethernet-mediatek-ppe-add-support-for-flow-accou.patch @@ -53,7 +53,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4644,8 +4644,8 @@ static int mtk_probe(struct platform_dev +@@ -4642,8 +4642,8 @@ static int mtk_probe(struct platform_dev for (i = 0; i < num_ppe; i++) { u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; @@ -64,7 +64,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov if (!eth->ppe[i]) { err = -ENOMEM; goto err_free_dev; -@@ -4772,6 +4772,7 @@ static const struct mtk_soc_data mt7622_ +@@ -4770,6 +4770,7 @@ static const struct mtk_soc_data mt7622_ .required_pctl = false, .offload_version = 2, .hash_offset = 2, @@ -72,7 +72,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), -@@ -4809,6 +4810,7 @@ static const struct mtk_soc_data mt7629_ +@@ -4807,6 +4808,7 @@ static const struct mtk_soc_data mt7629_ .hw_features = MTK_HW_FEATURES, .required_clks = MT7629_CLKS_BITMAP, .required_pctl = false, @@ -80,7 +80,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4829,6 +4831,7 @@ static const struct mtk_soc_data mt7981_ +@@ -4827,6 +4829,7 @@ static const struct mtk_soc_data mt7981_ .offload_version = 2, .hash_offset = 4, .foe_entry_size = sizeof(struct mtk_foe_entry), diff --git a/target/linux/generic/pending-5.15/732-00-net-ethernet-mtk_eth_soc-drop-generic-vlan-rx-offloa.patch b/target/linux/generic/pending-5.15/732-00-net-ethernet-mtk_eth_soc-drop-generic-vlan-rx-offloa.patch index b5e118f9fa..35b296a255 100644 --- a/target/linux/generic/pending-5.15/732-00-net-ethernet-mtk_eth_soc-drop-generic-vlan-rx-offloa.patch +++ b/target/linux/generic/pending-5.15/732-00-net-ethernet-mtk_eth_soc-drop-generic-vlan-rx-offloa.patch @@ -17,7 +17,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1852,9 +1852,7 @@ static int mtk_poll_rx(struct napi_struc +@@ -1850,9 +1850,7 @@ static int mtk_poll_rx(struct napi_struc while (done < budget) { unsigned int pktlen, *rxdcsum; @@ -27,7 +27,7 @@ Signed-off-by: Felix Fietkau dma_addr_t dma_addr; u32 hash, reason; int mac = 0; -@@ -1989,36 +1987,21 @@ static int mtk_poll_rx(struct napi_struc +@@ -1987,36 +1985,21 @@ static int mtk_poll_rx(struct napi_struc skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, netdev); @@ -70,7 +70,7 @@ Signed-off-by: Felix Fietkau skb_record_rx_queue(skb, 0); napi_gro_receive(napi, skb); -@@ -2833,29 +2816,11 @@ static netdev_features_t mtk_fix_feature +@@ -2831,29 +2814,11 @@ static netdev_features_t mtk_fix_feature static int mtk_set_features(struct net_device *dev, netdev_features_t features) { @@ -100,7 +100,7 @@ Signed-off-by: Felix Fietkau return 0; } -@@ -3169,30 +3134,6 @@ static int mtk_open(struct net_device *d +@@ -3167,30 +3132,6 @@ static int mtk_open(struct net_device *d struct mtk_eth *eth = mac->hw; int i, err; @@ -131,7 +131,7 @@ Signed-off-by: Felix Fietkau err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); if (err) { netdev_err(dev, "%s: could not attach PHY: %d\n", __func__, -@@ -3233,6 +3174,35 @@ static int mtk_open(struct net_device *d +@@ -3231,6 +3172,35 @@ static int mtk_open(struct net_device *d phylink_start(mac->phylink); netif_tx_start_all_queues(dev); @@ -167,7 +167,7 @@ Signed-off-by: Felix Fietkau return 0; } -@@ -3717,10 +3687,9 @@ static int mtk_hw_init(struct mtk_eth *e +@@ -3715,10 +3685,9 @@ static int mtk_hw_init(struct mtk_eth *e if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { val = mtk_r32(eth, MTK_CDMP_IG_CTRL); mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); @@ -180,7 +180,7 @@ Signed-off-by: Felix Fietkau /* set interrupt delays based on current Net DIM sample */ mtk_dim_rx(ð->rx_dim.work); -@@ -4367,7 +4336,7 @@ static int mtk_add_mac(struct mtk_eth *e +@@ -4365,7 +4334,7 @@ static int mtk_add_mac(struct mtk_eth *e eth->netdev[id]->hw_features |= NETIF_F_LRO; eth->netdev[id]->vlan_features = eth->soc->hw_features & diff --git a/target/linux/generic/pending-5.15/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch b/target/linux/generic/pending-5.15/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch index 72bad79c13..fbf0cb5735 100644 --- a/target/linux/generic/pending-5.15/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch +++ b/target/linux/generic/pending-5.15/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch @@ -16,7 +16,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1425,12 +1425,28 @@ static void mtk_wake_queue(struct mtk_et +@@ -1423,12 +1423,28 @@ static void mtk_wake_queue(struct mtk_et } } @@ -45,7 +45,7 @@ Signed-off-by: Felix Fietkau bool gso = false; int tx_num; -@@ -1452,6 +1468,18 @@ static netdev_tx_t mtk_start_xmit(struct +@@ -1450,6 +1466,18 @@ static netdev_tx_t mtk_start_xmit(struct return NETDEV_TX_BUSY; } @@ -64,7 +64,7 @@ Signed-off-by: Felix Fietkau /* TSO: fill MSS info in tcp checksum field */ if (skb_is_gso(skb)) { if (skb_cow_head(skb, 0)) { -@@ -1467,8 +1495,14 @@ static netdev_tx_t mtk_start_xmit(struct +@@ -1465,8 +1493,14 @@ static netdev_tx_t mtk_start_xmit(struct } } diff --git a/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch b/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch new file mode 100644 index 0000000000..0aa9382b6b --- /dev/null +++ b/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch @@ -0,0 +1,42 @@ +From: Felix Fietkau +Date: Wed, 29 Mar 2023 16:02:54 +0200 +Subject: [PATCH] net: ethernet: mtk_eth_soc: fix remaining throughput + regression + +Based on further tests, it seems that the QDMA shaper is not able to +perform shaping close to the MAC link rate without throughput loss. +This cannot be compensated by increasing the shaping rate, so it seems +to be an internal limit. + +Fix the remaining throughput regression by detecting that condition and +limiting shaping to ports with lower link speed. + +This patch intentionally ignores link speed gain from TRGMII, because +even on such links, shaping to 1000 Mbit/s incurs some throughput +degradation. + +Fixes: f63959c7eec3 ("net: ethernet: mtk_eth_soc: implement multi-queue support for per-port queues") +Reported-by: Frank Wunderlich +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -667,6 +667,7 @@ static void mtk_mac_link_up(struct phyli + MAC_MCR_FORCE_RX_FC); + + /* Configure speed */ ++ mac->speed = speed; + switch (speed) { + case SPEED_2500: + case SPEED_1000: +@@ -3145,6 +3146,9 @@ found: + if (dp->index >= MTK_QDMA_NUM_QUEUES) + return NOTIFY_DONE; + ++ if (mac->speed > 0 && mac->speed <= s.base.speed) ++ s.base.speed = 0; ++ + mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); + + return NOTIFY_DONE; diff --git a/target/linux/generic/pending-5.15/760-net-core-add-optional-threading-for-backlog-processi.patch b/target/linux/generic/pending-5.15/760-net-core-add-optional-threading-for-backlog-processi.patch index c26491abdb..fc83416b2a 100644 --- a/target/linux/generic/pending-5.15/760-net-core-add-optional-threading-for-backlog-processi.patch +++ b/target/linux/generic/pending-5.15/760-net-core-add-optional-threading-for-backlog-processi.patch @@ -172,6 +172,14 @@ Signed-off-by: Felix Fietkau #ifdef CONFIG_RPS remsd = oldsd->rps_ipi_list; oldsd->rps_ipi_list = NULL; +@@ -11706,6 +11784,7 @@ static int __init net_dev_init(void) + sd->cpu = i; + #endif + ++ INIT_LIST_HEAD(&sd->backlog.poll_list); + init_gro_hash(&sd->backlog); + sd->backlog.poll = process_backlog; + sd->backlog.weight = weight_p; --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -28,6 +28,7 @@ static int int_3600 = 3600; diff --git a/target/linux/mediatek/image/filogic.mk b/target/linux/mediatek/image/filogic.mk index 58a912d67e..1179a08d60 100644 --- a/target/linux/mediatek/image/filogic.mk +++ b/target/linux/mediatek/image/filogic.mk @@ -44,7 +44,7 @@ define Device/asus_tuf-ax4200 DEVICE_DTS := mt7986a-asus-tuf-ax4200 DEVICE_DTS_DIR := ../dts DEVICE_DTS_LOADADDR := 0x47000000 - DEVICE_PACKAGES := kmod-usb3 + DEVICE_PACKAGES := kmod-usb3 kmod-mt7986-firmware mt7986-wo-firmware IMAGES := sysupgrade.bin KERNEL_LOADADDR := 0x48000000 KERNEL = kernel-bin | lzma | \ diff --git a/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch b/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch index ffa98e3f0d..4187557b78 100644 --- a/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch +++ b/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch @@ -103,7 +103,7 @@ Signed-off-by: David S. Miller ret = mtk_mdio_busy_wait(eth); if (ret < 0) -@@ -730,6 +773,7 @@ static int mtk_mdio_init(struct mtk_eth +@@ -729,6 +772,7 @@ static int mtk_mdio_init(struct mtk_eth eth->mii_bus->name = "mdio"; eth->mii_bus->read = mtk_mdio_read; eth->mii_bus->write = mtk_mdio_write; diff --git a/target/linux/ramips/patches-5.15/700-net-ethernet-mediatek-support-net-labels.patch b/target/linux/ramips/patches-5.15/700-net-ethernet-mediatek-support-net-labels.patch index c3e0a342a1..285e24429a 100644 --- a/target/linux/ramips/patches-5.15/700-net-ethernet-mediatek-support-net-labels.patch +++ b/target/linux/ramips/patches-5.15/700-net-ethernet-mediatek-support-net-labels.patch @@ -14,7 +14,7 @@ Signed-off-by: René van Dorst --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4259,6 +4259,7 @@ static const struct net_device_ops mtk_n +@@ -4261,6 +4261,7 @@ static const struct net_device_ops mtk_n static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) { @@ -22,7 +22,7 @@ Signed-off-by: René van Dorst const __be32 *_id = of_get_property(np, "reg", NULL); phy_interface_t phy_mode; struct phylink *phylink; -@@ -4387,6 +4388,9 @@ static int mtk_add_mac(struct mtk_eth *e +@@ -4389,6 +4390,9 @@ static int mtk_add_mac(struct mtk_eth *e register_netdevice_notifier(&mac->device_notifier); }