Merge Official Source
Signed-off-by: Tianling Shen <cnsztl@immortalwrt.org>
This commit is contained in:
commit
bdf6d3135c
2
include/kernel-6.1
Normal file
2
include/kernel-6.1
Normal file
@ -0,0 +1,2 @@
|
||||
LINUX_VERSION-6.1 = .29
|
||||
LINUX_KERNEL_HASH-6.1.29 = 1e736cc9bd6036379a1d915e518abd4c2c94ad0fd1ea0da961c3489308b8fcfb
|
||||
@ -75,6 +75,7 @@ endif
|
||||
rm -f $(LINUX_DIR)/.config.prev
|
||||
mv $(LINUX_DIR)/.config $(LINUX_DIR)/.config.old
|
||||
$(call Kernel/SetInitramfs/PreConfigure)
|
||||
echo "# CONFIG_INITRAMFS_PRESERVE_MTIME is not set" >> $(LINUX_DIR)/.config
|
||||
ifneq ($(CONFIG_TARGET_ROOTFS_INITRAMFS_SEPARATE),y)
|
||||
echo 'CONFIG_INITRAMFS_ROOT_UID=$(shell id -u)' >> $(LINUX_DIR)/.config
|
||||
echo 'CONFIG_INITRAMFS_ROOT_GID=$(shell id -g)' >> $(LINUX_DIR)/.config
|
||||
@ -99,6 +100,7 @@ define Kernel/SetNoInitramfs
|
||||
grep -v INITRAMFS $(LINUX_DIR)/.config.old > $(LINUX_DIR)/.config.set
|
||||
echo 'CONFIG_INITRAMFS_SOURCE=""' >> $(LINUX_DIR)/.config.set
|
||||
echo '# CONFIG_INITRAMFS_FORCE is not set' >> $(LINUX_DIR)/.config.set
|
||||
echo "# CONFIG_INITRAMFS_PRESERVE_MTIME is not set" >> $(LINUX_DIR)/.config.set
|
||||
endef
|
||||
|
||||
define Kernel/Configure/Default
|
||||
|
||||
@ -175,25 +175,21 @@ $(eval $(call SetupHostCommand,install,Please install GNU 'install', \
|
||||
$(eval $(call SetupHostCommand,perl,Please install Perl 5.x, \
|
||||
perl --version | grep "perl.*v5"))
|
||||
|
||||
$(eval $(call CleanupPython2))
|
||||
|
||||
$(eval $(call SetupHostCommand,python,Please install Python >= 3.6, \
|
||||
$(eval $(call SetupHostCommand,python,Please install Python >= 3.7, \
|
||||
python3.11 -V 2>&1 | grep 'Python 3', \
|
||||
python3.10 -V 2>&1 | grep 'Python 3', \
|
||||
python3.9 -V 2>&1 | grep 'Python 3', \
|
||||
python3.8 -V 2>&1 | grep 'Python 3', \
|
||||
python3.7 -V 2>&1 | grep 'Python 3', \
|
||||
python3.6 -V 2>&1 | grep 'Python 3', \
|
||||
python3 -V 2>&1 | grep -E 'Python 3\.([6-9]|[0-9][0-9])\.?'))
|
||||
python3 -V 2>&1 | grep -E 'Python 3\.([7-9]|[0-9][0-9])\.?'))
|
||||
|
||||
$(eval $(call SetupHostCommand,python3,Please install Python >= 3.6, \
|
||||
$(eval $(call SetupHostCommand,python3,Please install Python >= 3.7, \
|
||||
python3.11 -V 2>&1 | grep 'Python 3', \
|
||||
python3.10 -V 2>&1 | grep 'Python 3', \
|
||||
python3.9 -V 2>&1 | grep 'Python 3', \
|
||||
python3.8 -V 2>&1 | grep 'Python 3', \
|
||||
python3.7 -V 2>&1 | grep 'Python 3', \
|
||||
python3.6 -V 2>&1 | grep 'Python 3', \
|
||||
python3 -V 2>&1 | grep -E 'Python 3\.([6-9]|[0-9][0-9])\.?'))
|
||||
python3 -V 2>&1 | grep -E 'Python 3\.([7-9]|[0-9][0-9])\.?'))
|
||||
|
||||
$(eval $(call TestHostCommand,python3-distutils, \
|
||||
Please install the Python3 distutils module, \
|
||||
|
||||
@ -30,6 +30,8 @@ define Require
|
||||
printf "Checking '$(1)'... "
|
||||
if $(NO_TRACE_MAKE) -f $(firstword $(MAKEFILE_LIST)) check-$(1) >/dev/null 2>/dev/null; then \
|
||||
echo 'ok.'; \
|
||||
elif $(NO_TRACE_MAKE) -f $(firstword $(MAKEFILE_LIST)) check-$(1) >/dev/null 2>/dev/null; then \
|
||||
echo 'updated.'; \
|
||||
else \
|
||||
echo 'failed.'; \
|
||||
echo "$(PKG_NAME): $(strip $(2))" >> $(TMP_DIR)/.prereq-error; \
|
||||
@ -75,18 +77,6 @@ define RequireCHeader
|
||||
$$(eval $$(call Require,$(1),$(2)))
|
||||
endef
|
||||
|
||||
define CleanupPython2
|
||||
define Require/python2-cleanup
|
||||
if [ -f "$(STAGING_DIR_HOST)/bin/python" ] && \
|
||||
$(STAGING_DIR_HOST)/bin/python -V 2>&1 | \
|
||||
grep -q 'Python 2'; then \
|
||||
rm $(STAGING_DIR_HOST)/bin/python; \
|
||||
fi
|
||||
endef
|
||||
|
||||
$$(eval $$(call Require,python2-cleanup))
|
||||
endef
|
||||
|
||||
define QuoteHostCommand
|
||||
'$(subst ','"'"',$(strip $(1)))'
|
||||
endef
|
||||
@ -107,7 +97,7 @@ endef
|
||||
# 3+: candidates
|
||||
define SetupHostCommand
|
||||
define Require/$(1)
|
||||
[ -f "$(STAGING_DIR_HOST)/bin/$(strip $(1))" ] && exit 0; \
|
||||
mkdir -p "$(STAGING_DIR_HOST)/bin"; \
|
||||
for cmd in $(call QuoteHostCommand,$(3)) $(call QuoteHostCommand,$(4)) \
|
||||
$(call QuoteHostCommand,$(5)) $(call QuoteHostCommand,$(6)) \
|
||||
$(call QuoteHostCommand,$(7)) $(call QuoteHostCommand,$(8)) \
|
||||
@ -117,9 +107,13 @@ define SetupHostCommand
|
||||
bin="$$$$$$$$(PATH="$(subst $(space),:,$(filter-out $(STAGING_DIR_HOST)/%,$(subst :,$(space),$(PATH))))" \
|
||||
command -v "$$$$$$$${cmd%% *}")"; \
|
||||
if [ -x "$$$$$$$$bin" ] && eval "$$$$$$$$cmd" >/dev/null 2>/dev/null; then \
|
||||
mkdir -p "$(STAGING_DIR_HOST)/bin"; \
|
||||
case "$$$$$$$$(ls -dl -- $(STAGING_DIR_HOST)/bin/$(strip $(1)))" in \
|
||||
*" -> $$$$$$$$bin"*) \
|
||||
[ -x "$(STAGING_DIR_HOST)/bin/$(strip $(1))" ] && exit 0 \
|
||||
;; \
|
||||
esac; \
|
||||
ln -sf "$$$$$$$$bin" "$(STAGING_DIR_HOST)/bin/$(strip $(1))"; \
|
||||
exit 0; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
fi; \
|
||||
done; \
|
||||
|
||||
@ -27,6 +27,9 @@ araknis,an-700-ap-i-ac|\
|
||||
arduino,yun|\
|
||||
buffalo,bhr-4grv2|\
|
||||
devolo,magic-2-wifi|\
|
||||
dlink,dir-859-a1|\
|
||||
dlink,dir-859-a3|\
|
||||
dlink,dir-869-a1|\
|
||||
engenius,eap1200h|\
|
||||
engenius,eap1750h|\
|
||||
engenius,eap300-v2|\
|
||||
|
||||
@ -58,6 +58,10 @@ xiaomi,redmi-router-ax6000-ubootmod)
|
||||
ubootenv_add_uci_config "$envdev" "0x0" "0x1f000" "0x20000" "1"
|
||||
ubootenv_add_uci_config "$envdev2" "0x0" "0x1f000" "0x20000" "1"
|
||||
;;
|
||||
zyxel,ex5601-t0)
|
||||
local envdev=/dev/mtd$(find_mtd_index "u-boot-env")
|
||||
ubootenv_add_uci_config "$envdev" "0x0" "0x20000" "0x40000" "2"
|
||||
;;
|
||||
esac
|
||||
|
||||
config_load ubootenv
|
||||
|
||||
@ -897,10 +897,12 @@ define KernelPackage/crypto-sha1/mpc85xx
|
||||
AUTOLOAD+=$(call AutoLoad,09,sha1-ppc-spe)
|
||||
endef
|
||||
|
||||
ifndef CONFIG_TARGET_uml
|
||||
define KernelPackage/crypto-sha1/x86_64
|
||||
FILES+=$(LINUX_DIR)/arch/x86/crypto/sha1-ssse3.ko
|
||||
AUTOLOAD+=$(call AutoLoad,09,sha1-ssse3)
|
||||
endef
|
||||
endif
|
||||
|
||||
ifdef KernelPackage/crypto-sha1/$(ARCH)
|
||||
KernelPackage/crypto-sha1/$(CRYPTO_TARGET)=\
|
||||
@ -935,10 +937,12 @@ define KernelPackage/crypto-sha256/mpc85xx
|
||||
AUTOLOAD+=$(call AutoLoad,09,sha256-ppc-spe)
|
||||
endef
|
||||
|
||||
ifndef CONFIG_TARGET_uml
|
||||
define KernelPackage/crypto-sha256/x86_64
|
||||
FILES+=$(LINUX_DIR)/arch/x86/crypto/sha256-ssse3.ko
|
||||
AUTOLOAD+=$(call AutoLoad,09,sha256-ssse3)
|
||||
endef
|
||||
endif
|
||||
|
||||
ifdef KernelPackage/crypto-sha256/$(ARCH)
|
||||
KernelPackage/crypto-sha256/$(CRYPTO_TARGET)=\
|
||||
@ -977,10 +981,12 @@ endef
|
||||
|
||||
KernelPackage/crypto-sha512/tegra=$(KernelPackage/crypto-sha512/arm)
|
||||
|
||||
ifndef CONFIG_TARGET_uml
|
||||
define KernelPackage/crypto-sha512/x86_64
|
||||
FILES+=$(LINUX_DIR)/arch/x86/crypto/sha512-ssse3.ko
|
||||
AUTOLOAD+=$(call AutoLoad,09,sha512-ssse3)
|
||||
endef
|
||||
endif
|
||||
|
||||
ifdef KernelPackage/crypto-sha512/$(ARCH)
|
||||
KernelPackage/crypto-sha512/$(CRYPTO_TARGET)=\
|
||||
|
||||
@ -10,7 +10,7 @@ FS_MENU:=Filesystems
|
||||
define KernelPackage/fs-9p
|
||||
SUBMENU:=$(FS_MENU)
|
||||
TITLE:=Plan 9 Resource Sharing Support
|
||||
DEPENDS:=+kmod-9pnet
|
||||
DEPENDS:=+kmod-9pnet +LINUX_6_1:kmod-fs-netfs
|
||||
KCONFIG:=\
|
||||
CONFIG_9P_FS \
|
||||
CONFIG_9P_FS_POSIX_ACL=n \
|
||||
@ -269,7 +269,9 @@ define KernelPackage/fs-fscache
|
||||
CONFIG_FSCACHE_OBJECT_LIST=n \
|
||||
CONFIG_CACHEFILES \
|
||||
CONFIG_CACHEFILES_DEBUG=n \
|
||||
CONFIG_CACHEFILES_HISTOGRAM=n
|
||||
CONFIG_CACHEFILES_HISTOGRAM=n \
|
||||
CONFIG_CACHEFILES_ERROR_INJECTION=n@ge5.17 \
|
||||
CONFIG_CACHEFILES_ONDEMAND=n@ge5.19
|
||||
FILES:= \
|
||||
$(LINUX_DIR)/fs/fscache/fscache.ko \
|
||||
$(LINUX_DIR)/fs/cachefiles/cachefiles.ko
|
||||
|
||||
@ -52,7 +52,7 @@ define KernelPackage/hwmon-adt7410
|
||||
$(LINUX_DIR)/drivers/hwmon/adt7x10.ko \
|
||||
$(LINUX_DIR)/drivers/hwmon/adt7410.ko
|
||||
AUTOLOAD:=$(call AutoLoad,60,adt7x10 adt7410)
|
||||
$(call AddDepends/hwmon,+kmod-i2c-core)
|
||||
$(call AddDepends/hwmon,+kmod-i2c-core +LINUX_6_1:kmod-regmap-core)
|
||||
endef
|
||||
|
||||
define KernelPackage/hwmon-adt7410/description
|
||||
|
||||
@ -84,6 +84,20 @@ endef
|
||||
$(eval $(call KernelPackage,i2c-algo-pcf))
|
||||
|
||||
|
||||
I2C_CCGS_UCSI_MODULES:= \
|
||||
CONFIG_I2C_CCGX_UCSI:drivers/i2c/busses/i2c-ccgx-ucsi
|
||||
|
||||
define KernelPackage/i2c-ccgs-ucsi
|
||||
$(call i2c_defaults,$(I2C_CCGS_UCSI_MODULES),58)
|
||||
TITLE:=Cypress CCGx Type-C controller
|
||||
DEPENDS:=+kmod-i2c-core +kmod-regmap-core
|
||||
HIDDEN:=y
|
||||
endef
|
||||
|
||||
|
||||
$(eval $(call KernelPackage,i2c-ccgs-ucsi))
|
||||
|
||||
|
||||
I2C_DWCORE_MODULES:= \
|
||||
CONFIG_I2C_DESIGNWARE_CORE:drivers/i2c/busses/i2c-designware-core
|
||||
|
||||
@ -103,7 +117,7 @@ I2C_DWPCI_MODULES:= \
|
||||
define KernelPackage/i2c-designware-pci
|
||||
$(call i2c_defaults,$(I2C_DWPCI_MODULES),59)
|
||||
TITLE:=Synopsys DesignWare PCI
|
||||
DEPENDS:=@PCI_SUPPORT +kmod-i2c-designware-core
|
||||
DEPENDS:=@PCI_SUPPORT +kmod-i2c-designware-core +kmod-i2c-ccgs-ucsi
|
||||
endef
|
||||
|
||||
define KernelPackage/i2c-designware-pci/description
|
||||
|
||||
@ -134,6 +134,7 @@ define KernelPackage/lib-zstd
|
||||
FILES:= \
|
||||
$(LINUX_DIR)/crypto/zstd.ko \
|
||||
$(LINUX_DIR)/lib/xxhash.ko \
|
||||
$(LINUX_DIR)/lib/zstd/zstd_common.ko@ge6.1 \
|
||||
$(LINUX_DIR)/lib/zstd/zstd_compress.ko \
|
||||
$(LINUX_DIR)/lib/zstd/zstd_decompress.ko
|
||||
AUTOLOAD:=$(call AutoProbe,xxhash zstd zstd_compress zstd_decompress)
|
||||
|
||||
@ -1468,13 +1468,15 @@ $(eval $(call KernelPackage,sfc-falcon))
|
||||
define KernelPackage/wwan
|
||||
SUBMENU:=$(NETWORK_DEVICES_MENU)
|
||||
TITLE:=WWAN Driver Core
|
||||
KCONFIG:=CONFIG_WWAN
|
||||
KCONFIG:= \
|
||||
CONFIG_WWAN \
|
||||
CONFIG_WWAN_DEBUGFS=y@ge5.17
|
||||
FILES:=$(LINUX_DIR)/drivers/net/wwan/wwan.ko
|
||||
AUTOLOAD:=$(call AutoProbe,wwan)
|
||||
endef
|
||||
|
||||
define KernelPackage/wwan/description
|
||||
his driver provides a common framework for WWAN drivers.
|
||||
This driver provides a common framework for WWAN drivers.
|
||||
endef
|
||||
|
||||
$(eval $(call KernelPackage,wwan))
|
||||
|
||||
@ -1164,15 +1164,15 @@ define KernelPackage/nft-offload
|
||||
DEPENDS:=@IPV6 +kmod-nf-flow +kmod-nft-nat
|
||||
KCONFIG:= \
|
||||
CONFIG_NF_FLOW_TABLE_INET \
|
||||
CONFIG_NF_FLOW_TABLE_IPV4 \
|
||||
CONFIG_NF_FLOW_TABLE_IPV6 \
|
||||
CONFIG_NF_FLOW_TABLE_IPV4@lt5.17 \
|
||||
CONFIG_NF_FLOW_TABLE_IPV6@lt5.17 \
|
||||
CONFIG_NFT_FLOW_OFFLOAD
|
||||
FILES:= \
|
||||
$(LINUX_DIR)/net/netfilter/nf_flow_table_inet.ko \
|
||||
$(LINUX_DIR)/net/ipv4/netfilter/nf_flow_table_ipv4.ko \
|
||||
$(LINUX_DIR)/net/ipv6/netfilter/nf_flow_table_ipv6.ko \
|
||||
$(LINUX_DIR)/net/ipv4/netfilter/nf_flow_table_ipv4.ko@lt5.17 \
|
||||
$(LINUX_DIR)/net/ipv6/netfilter/nf_flow_table_ipv6.ko@lt5.17 \
|
||||
$(LINUX_DIR)/net/netfilter/nft_flow_offload.ko
|
||||
AUTOLOAD:=$(call AutoProbe,nf_flow_table_inet nf_flow_table_ipv4 nf_flow_table_ipv6 nft_flow_offload)
|
||||
AUTOLOAD:=$(call AutoProbe,nf_flow_table_inet nf_flow_table_ipv4@lt5.17 nf_flow_table_ipv6@lt5.17 nft_flow_offload)
|
||||
endef
|
||||
|
||||
$(eval $(call KernelPackage,nft-offload))
|
||||
|
||||
@ -92,7 +92,9 @@ define KernelPackage/vxlan
|
||||
+kmod-udptunnel4 \
|
||||
+IPV6:kmod-udptunnel6
|
||||
KCONFIG:=CONFIG_VXLAN
|
||||
FILES:=$(LINUX_DIR)/drivers/net/vxlan.ko
|
||||
FILES:= \
|
||||
$(LINUX_DIR)/drivers/net/vxlan.ko@lt5.18 \
|
||||
$(LINUX_DIR)/drivers/net/vxlan/vxlan.ko@ge5.18
|
||||
AUTOLOAD:=$(call AutoLoad,13,vxlan)
|
||||
endef
|
||||
|
||||
@ -1314,7 +1316,8 @@ define KernelPackage/9pnet
|
||||
CONFIG_NET_9P \
|
||||
CONFIG_NET_9P_DEBUG=n \
|
||||
CONFIG_NET_9P_XEN=n \
|
||||
CONFIG_NET_9P_VIRTIO
|
||||
CONFIG_NET_9P_VIRTIO \
|
||||
CONFIG_NET_9P_FD=n@ge5.17
|
||||
FILES:= \
|
||||
$(LINUX_DIR)/net/9p/9pnet.ko \
|
||||
$(LINUX_DIR)/net/9p/9pnet_virtio.ko
|
||||
|
||||
@ -57,7 +57,8 @@ define KernelPackage/bluetooth
|
||||
$(LINUX_DIR)/drivers/bluetooth/hci_uart.ko \
|
||||
$(LINUX_DIR)/drivers/bluetooth/btusb.ko \
|
||||
$(LINUX_DIR)/drivers/bluetooth/btintel.ko \
|
||||
$(LINUX_DIR)/drivers/bluetooth/btrtl.ko
|
||||
$(LINUX_DIR)/drivers/bluetooth/btrtl.ko \
|
||||
$(LINUX_DIR)/drivers/bluetooth/btmtk.ko@ge5.17
|
||||
AUTOLOAD:=$(call AutoProbe,bluetooth rfcomm bnep hidp hci_uart btusb)
|
||||
endef
|
||||
|
||||
|
||||
@ -1156,7 +1156,9 @@ $(eval $(call KernelPackage,usb-net-aqc111))
|
||||
|
||||
define KernelPackage/usb-net-asix
|
||||
TITLE:=Kernel module for USB-to-Ethernet Asix convertors
|
||||
DEPENDS:=+kmod-libphy +kmod-net-selftests +kmod-mdio-devres +kmod-phy-ax88796b
|
||||
DEPENDS:= \
|
||||
+kmod-libphy +kmod-net-selftests +kmod-mdio-devres +kmod-phy-ax88796b \
|
||||
+LINUX_6_1:kmod-phylink
|
||||
KCONFIG:=CONFIG_USB_NET_AX8817X
|
||||
FILES:=$(LINUX_DIR)/drivers/$(USBNET_DIR)/asix.ko
|
||||
AUTOLOAD:=$(call AutoProbe,asix)
|
||||
|
||||
@ -11,7 +11,7 @@ include $(INCLUDE_DIR)/kernel.mk
|
||||
PKG_NAME:=mac80211
|
||||
|
||||
PKG_VERSION:=6.1.24
|
||||
PKG_RELEASE:=2
|
||||
PKG_RELEASE:=3
|
||||
# PKG_SOURCE_URL:=@KERNEL/linux/kernel/projects/backports/stable/v5.15.58/
|
||||
PKG_SOURCE_URL:=http://mirror2.openwrt.org/sources/
|
||||
PKG_HASH:=5d39aca7e34c33cb9b3e366117b2e86841b7bdd37933679d6b1e61be6b150648
|
||||
|
||||
@ -1,64 +0,0 @@
|
||||
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
|
||||
Date: Mon, 8 Jun 2015 16:11:40 +0200
|
||||
Subject: [PATCH] brcmfmac: register wiphy(s) during module_init
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
This is needed by OpenWrt which expects all PHYs to be created after
|
||||
module loads successfully.
|
||||
|
||||
Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
|
||||
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
|
||||
@@ -459,6 +459,7 @@ struct brcmf_fw {
|
||||
u32 curpos;
|
||||
unsigned int board_index;
|
||||
void (*done)(struct device *dev, int err, struct brcmf_fw_request *req);
|
||||
+ struct completion *completion;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
@@ -686,6 +687,8 @@ static void brcmf_fw_request_done(const
|
||||
fwctx->req = NULL;
|
||||
}
|
||||
fwctx->done(fwctx->dev, ret, fwctx->req);
|
||||
+ if (fwctx->completion)
|
||||
+ complete(fwctx->completion);
|
||||
kfree(fwctx);
|
||||
}
|
||||
|
||||
@@ -751,6 +754,8 @@ int brcmf_fw_get_firmwares(struct device
|
||||
{
|
||||
struct brcmf_fw_item *first = &req->items[0];
|
||||
struct brcmf_fw *fwctx;
|
||||
+ struct completion completion;
|
||||
+ unsigned long time_left;
|
||||
char *alt_path = NULL;
|
||||
int ret;
|
||||
|
||||
@@ -768,6 +773,9 @@ int brcmf_fw_get_firmwares(struct device
|
||||
fwctx->dev = dev;
|
||||
fwctx->req = req;
|
||||
fwctx->done = fw_cb;
|
||||
+
|
||||
+ init_completion(&completion);
|
||||
+ fwctx->completion = &completion;
|
||||
|
||||
/* First try alternative board-specific path if any */
|
||||
if (fwctx->req->board_types[0])
|
||||
@@ -787,6 +795,12 @@ int brcmf_fw_get_firmwares(struct device
|
||||
if (ret < 0)
|
||||
brcmf_fw_request_done(NULL, fwctx);
|
||||
|
||||
+
|
||||
+ time_left = wait_for_completion_timeout(&completion,
|
||||
+ msecs_to_jiffies(5000));
|
||||
+ if (!time_left && fwctx)
|
||||
+ fwctx->completion = NULL;
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -6,12 +6,12 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=libcap
|
||||
PKG_VERSION:=2.68
|
||||
PKG_VERSION:=2.69
|
||||
PKG_RELEASE:=1
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
|
||||
PKG_SOURCE_URL:=@KERNEL/linux/libs/security/linux-privs/libcap2
|
||||
PKG_HASH:=90be3b6d41be5f81ae4b03ec76012b0d27c829293684f6c05b65d5f9cce724b2
|
||||
PKG_HASH:=f311f8f3dad84699d0566d1d6f7ec943a9298b28f714cae3c931dfd57492d7eb
|
||||
|
||||
PKG_MAINTAINER:=Paul Wassi <p.wassi@gmx.at>
|
||||
PKG_LICENSE:=GPL-2.0-only
|
||||
|
||||
@ -1,12 +1,12 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=libxml2
|
||||
PKG_VERSION:=2.11.3
|
||||
PKG_VERSION:=2.11.4
|
||||
PKG_RELEASE:=1
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
|
||||
PKG_SOURCE_URL:=@GNOME/libxml2/$(basename $(PKG_VERSION))
|
||||
PKG_HASH:=f1acae1664bda006cd81bfc238238217043d586d06659d5c0e3d1bcebe040870
|
||||
PKG_HASH:=737e1d7f8ab3f139729ca13a2494fd17bf30ddb4b7a427cf336252cab57f57f7
|
||||
|
||||
PKG_LICENSE:=MIT
|
||||
PKG_LICENSE_FILES:=COPYING
|
||||
|
||||
@ -8,12 +8,12 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=lldpd
|
||||
PKG_VERSION:=1.0.16
|
||||
PKG_VERSION:=1.0.17
|
||||
PKG_RELEASE:=1
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
|
||||
PKG_SOURCE_URL:=https://github.com/lldpd/lldpd/releases/download/$(PKG_VERSION)/
|
||||
PKG_HASH:=7753c6e31e938923185f4e10c4ab328929729e22ee4a9687d08881fb82c092ee
|
||||
PKG_HASH:=89ae691a4917ac9e0ec3b8b2c1e634cc402d43b804f98850c73bd1c7df380882
|
||||
|
||||
PKG_MAINTAINER:=Stijn Tintel <stijn@linux-ipv6.be>
|
||||
PKG_LICENSE:=ISC
|
||||
|
||||
@ -5,14 +5,14 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=busybox
|
||||
PKG_VERSION:=1.36.0
|
||||
PKG_VERSION:=1.36.1
|
||||
PKG_RELEASE:=1
|
||||
PKG_FLAGS:=essential
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
|
||||
PKG_SOURCE_URL:=https://www.busybox.net/downloads \
|
||||
http://sources.buildroot.net
|
||||
PKG_HASH:=542750c8af7cb2630e201780b4f99f3dcceeb06f505b479ec68241c1e6af61a5
|
||||
PKG_HASH:=b8cc24c9574d809e7279c3be349795c5d5ceb6fdf19ca709f80cde50e47de314
|
||||
|
||||
PKG_BUILD_DEPENDS:=BUSYBOX_CONFIG_PAM:libpam
|
||||
PKG_BUILD_PARALLEL:=1
|
||||
|
||||
@ -312,7 +312,11 @@ mvswitch_config_init(struct phy_device *pdev)
|
||||
priv->orig_features = dev->features;
|
||||
|
||||
#ifdef HEADER_MODE
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6,1,0)
|
||||
dev->priv_flags |= IFF_NO_IP_ALIGN;
|
||||
#else
|
||||
dev->extra_priv_flags |= IFF_NO_IP_ALIGN;
|
||||
#endif
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
#else
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
@ -372,7 +376,11 @@ mvswitch_detach(struct phy_device *pdev)
|
||||
dev->eth_mangle_rx = NULL;
|
||||
dev->eth_mangle_tx = NULL;
|
||||
dev->features = priv->orig_features;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6,1,0)
|
||||
dev->priv_flags &= ~IFF_NO_IP_ALIGN;
|
||||
#else
|
||||
dev->extra_priv_flags &= ~IFF_NO_IP_ALIGN;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
|
||||
|
||||
#include "qca9563_dlink_dir-8x9-a1.dtsi"
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
/ {
|
||||
model = "D-Link DIR-859 A1";
|
||||
|
||||
64
target/linux/ath79/dts/qca9563_dlink_dir-859-a3.dts
Normal file
64
target/linux/ath79/dts/qca9563_dlink_dir-859-a3.dts
Normal file
@ -0,0 +1,64 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
|
||||
|
||||
#include "qca9563_dlink_dir-8x9-a1.dtsi"
|
||||
|
||||
#include <dt-bindings/leds/common.h>
|
||||
|
||||
/ {
|
||||
compatible = "dlink,dir-859-a3", "qca,qca9563";
|
||||
model = "D-Link DIR-859 A3";
|
||||
|
||||
aliases {
|
||||
label-mac-device = &wmac;
|
||||
led-boot = &led_power;
|
||||
led-failsafe = &led_power;
|
||||
led-running = &led_power;
|
||||
led-upgrade = &led_power;
|
||||
};
|
||||
|
||||
leds {
|
||||
compatible = "gpio-leds";
|
||||
|
||||
led_power: power {
|
||||
label = "green:power";
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
function = LED_FUNCTION_POWER;
|
||||
gpios = <&gpio 15 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
||||
wan {
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
function = LED_FUNCTION_WAN;
|
||||
gpios = <&gpio 16 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
||||
wlan {
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
function = LED_FUNCTION_WLAN;
|
||||
gpios = <&gpio 19 GPIO_ACTIVE_LOW>;
|
||||
linux,default-trigger = "phy0tpt";
|
||||
};
|
||||
|
||||
wps {
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
function = LED_FUNCTION_WPS;
|
||||
gpios = <&gpio 8 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&ath10k {
|
||||
nvmem-cells = <&calibration_ath10k>, <&macaddr_bdcfg_ethaddr 2>;
|
||||
nvmem-cell-names = "calibration", "mac-address";
|
||||
};
|
||||
|
||||
&bdcfg {
|
||||
macaddr_bdcfg_ethaddr: ethaddr {
|
||||
#nvmem-cell-cells = <1>;
|
||||
};
|
||||
};
|
||||
|
||||
&wmac {
|
||||
nvmem-cells = <&calibration_ath9k>, <&macaddr_bdcfg_ethaddr 0>;
|
||||
nvmem-cell-names = "calibration", "mac-address";
|
||||
};
|
||||
@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
|
||||
|
||||
#include "qca9563_dlink_dir-8x9-a1.dtsi"
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
/ {
|
||||
model = "D-Link DIR-869 A1";
|
||||
@ -28,4 +27,3 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@ -6,17 +6,18 @@
|
||||
#include <dt-bindings/input/input.h>
|
||||
|
||||
/ {
|
||||
|
||||
keys {
|
||||
compatible = "gpio-keys";
|
||||
|
||||
wps {
|
||||
label = "wps";
|
||||
linux,code = <KEY_WPS_BUTTON>;
|
||||
gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
|
||||
debounce-interval = <60>;
|
||||
};
|
||||
|
||||
reset {
|
||||
label = "reset";
|
||||
linux,code = <KEY_RESTART>;
|
||||
gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
|
||||
debounce-interval = <60>;
|
||||
@ -27,7 +28,7 @@
|
||||
&pcie {
|
||||
status = "okay";
|
||||
|
||||
wifi@0,0 {
|
||||
ath10k: wifi@0,0 {
|
||||
compatible = "qcom,ath10k";
|
||||
reg = <0x0000 0 0 0 0>;
|
||||
|
||||
@ -42,7 +43,7 @@
|
||||
flash@0 {
|
||||
compatible = "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <50000000>;
|
||||
spi-max-frequency = <25000000>;
|
||||
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
@ -51,19 +52,19 @@
|
||||
|
||||
partition@0 {
|
||||
label = "bootloader";
|
||||
reg = <0x000000 0x40000>;
|
||||
reg = <0x000000 0x040000>;
|
||||
read-only;
|
||||
};
|
||||
|
||||
partition@40000 {
|
||||
bdcfg: partition@40000 {
|
||||
compatible = "u-boot,env";
|
||||
label = "bdcfg";
|
||||
reg = <0x040000 0x10000>;
|
||||
read-only;
|
||||
reg = <0x040000 0x010000>;
|
||||
};
|
||||
|
||||
partition@50000 {
|
||||
label = "devdata";
|
||||
reg = <0x050000 0x10000>;
|
||||
reg = <0x050000 0x010000>;
|
||||
read-only;
|
||||
|
||||
compatible = "nvmem-cells";
|
||||
@ -81,7 +82,7 @@
|
||||
|
||||
partition@60000 {
|
||||
label = "devconf";
|
||||
reg = <0x060000 0x10000>;
|
||||
reg = <0x060000 0x010000>;
|
||||
read-only;
|
||||
};
|
||||
|
||||
|
||||
@ -234,6 +234,9 @@ dlink,dap-1365-a1)
|
||||
dlink,dir-859-a1)
|
||||
ucidef_set_led_switch "internet" "WAN" "green:internet" "switch0" "0x20"
|
||||
;;
|
||||
dlink,dir-859-a3)
|
||||
ucidef_set_led_switch "wan" "WAN" "green:wan" "switch0" "0x20"
|
||||
;;
|
||||
engenius,ens202ext-v1|\
|
||||
engenius,enstationac-v1)
|
||||
ucidef_set_rssimon "wlan0" "200000" "1"
|
||||
|
||||
@ -291,6 +291,7 @@ ath79_setup_interfaces()
|
||||
dlink,dir-842-c2|\
|
||||
dlink,dir-842-c3|\
|
||||
dlink,dir-859-a1|\
|
||||
dlink,dir-859-a3|\
|
||||
dlink,dir-869-a1|\
|
||||
engenius,epg5000|\
|
||||
engenius,esr1200|\
|
||||
@ -661,6 +662,10 @@ ath79_setup_macs()
|
||||
lan_mac=$(mtd_get_mac_text "devdata" 0xc9)
|
||||
wan_mac=$(mtd_get_mac_text "devdata" 0x79)
|
||||
;;
|
||||
dlink,dir-859-a3)
|
||||
lan_mac=$(get_mac_label)
|
||||
wan_mac=$(macaddr_add "$lan_mac" 3)
|
||||
;;
|
||||
qihoo,c301|\
|
||||
wd,mynet-n600|\
|
||||
wd,mynet-n750)
|
||||
|
||||
@ -22,6 +22,8 @@ dlink,dap-3662-a1)
|
||||
fixwrgg
|
||||
;;
|
||||
dlink,dir-629-a1|\
|
||||
dlink,dir-859-a1|\
|
||||
dlink,dir-859-a3|\
|
||||
dlink,dir-869-a1|\
|
||||
qihoo,c301)
|
||||
fix_seama_header
|
||||
|
||||
@ -15,6 +15,8 @@ CONFIG_MARVELL_PHY=y
|
||||
CONFIG_MICREL_PHY=y
|
||||
CONFIG_MTD_REDBOOT_PARTS=y
|
||||
CONFIG_MTD_SPLIT_EVA_FW=y
|
||||
CONFIG_NVMEM_SYSFS=y
|
||||
CONFIG_NVMEM_U_BOOT_ENV=y
|
||||
CONFIG_PHY_AR7100_USB=y
|
||||
CONFIG_PHY_AR7200_USB=y
|
||||
CONFIG_REALTEK_PHY=y
|
||||
|
||||
@ -1144,18 +1144,28 @@ define Device/dlink_dir-842-c3
|
||||
endef
|
||||
TARGET_DEVICES += dlink_dir-842-c3
|
||||
|
||||
define Device/dlink_dir-859-a1
|
||||
define Device/dlink_dir-859-ax
|
||||
$(Device/seama)
|
||||
SOC := qca9563
|
||||
DEVICE_VENDOR := D-Link
|
||||
DEVICE_MODEL := DIR-859
|
||||
DEVICE_VARIANT := A1
|
||||
IMAGE_SIZE := 15872k
|
||||
DEVICE_PACKAGES := kmod-usb2 kmod-ath10k-ct-smallbuffers ath10k-firmware-qca988x-ct
|
||||
SEAMA_SIGNATURE := wrgac37_dlink.2013gui_dir859
|
||||
endef
|
||||
|
||||
define Device/dlink_dir-859-a1
|
||||
$(Device/dlink_dir-859-ax)
|
||||
DEVICE_VARIANT := A1
|
||||
endef
|
||||
TARGET_DEVICES += dlink_dir-859-a1
|
||||
|
||||
define Device/dlink_dir-859-a3
|
||||
$(Device/dlink_dir-859-ax)
|
||||
DEVICE_VARIANT := A3
|
||||
endef
|
||||
TARGET_DEVICES += dlink_dir-859-a3
|
||||
|
||||
define Device/dlink_dir-869-a1
|
||||
$(Device/seama)
|
||||
SOC := qca9563
|
||||
|
||||
@ -0,0 +1,187 @@
|
||||
From 6c7f552a48b49a8612786a28a2239fbc24fac289 Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Fri, 30 Dec 2022 14:52:51 -0700
|
||||
Subject: [PATCH 19/29] mm: add vma_has_recency()
|
||||
|
||||
Add vma_has_recency() to indicate whether a VMA may exhibit temporal
|
||||
locality that the LRU algorithm relies on.
|
||||
|
||||
This function returns false for VMAs marked by VM_SEQ_READ or
|
||||
VM_RAND_READ. While the former flag indicates linear access, i.e., a
|
||||
special case of spatial locality, both flags indicate a lack of temporal
|
||||
locality, i.e., the reuse of an area within a relatively small duration.
|
||||
|
||||
"Recency" is chosen over "locality" to avoid confusion between temporal
|
||||
and spatial localities.
|
||||
|
||||
Before this patch, the active/inactive LRU only ignored the accessed bit
|
||||
from VMAs marked by VM_SEQ_READ. After this patch, the active/inactive
|
||||
LRU and MGLRU share the same logic: they both ignore the accessed bit if
|
||||
vma_has_recency() returns false.
|
||||
|
||||
For the active/inactive LRU, the following fio test showed a [6, 8]%
|
||||
increase in IOPS when randomly accessing mapped files under memory
|
||||
pressure.
|
||||
|
||||
kb=$(awk '/MemTotal/ { print $2 }' /proc/meminfo)
|
||||
kb=$((kb - 8*1024*1024))
|
||||
|
||||
modprobe brd rd_nr=1 rd_size=$kb
|
||||
dd if=/dev/zero of=/dev/ram0 bs=1M
|
||||
|
||||
mkfs.ext4 /dev/ram0
|
||||
mount /dev/ram0 /mnt/
|
||||
swapoff -a
|
||||
|
||||
fio --name=test --directory=/mnt/ --ioengine=mmap --numjobs=8 \
|
||||
--size=8G --rw=randrw --time_based --runtime=10m \
|
||||
--group_reporting
|
||||
|
||||
The discussion that led to this patch is here [1]. Additional test
|
||||
results are available in that thread.
|
||||
|
||||
[1] https://lore.kernel.org/r/Y31s%2FK8T85jh05wH@google.com/
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221230215252.2628425-1-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
|
||||
Cc: Andrea Righi <andrea.righi@canonical.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
include/linux/mm_inline.h | 9 +++++++++
|
||||
mm/memory.c | 8 ++++----
|
||||
mm/rmap.c | 42 +++++++++++++++++----------------------
|
||||
mm/vmscan.c | 5 ++++-
|
||||
4 files changed, 35 insertions(+), 29 deletions(-)
|
||||
|
||||
--- a/include/linux/mm_inline.h
|
||||
+++ b/include/linux/mm_inline.h
|
||||
@@ -578,4 +578,12 @@ pte_install_uffd_wp_if_needed(struct vm_
|
||||
#endif
|
||||
}
|
||||
|
||||
+static inline bool vma_has_recency(struct vm_area_struct *vma)
|
||||
+{
|
||||
+ if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
|
||||
+ return false;
|
||||
+
|
||||
+ return true;
|
||||
+}
|
||||
+
|
||||
#endif
|
||||
--- a/mm/memory.c
|
||||
+++ b/mm/memory.c
|
||||
@@ -1435,8 +1435,7 @@ again:
|
||||
force_flush = 1;
|
||||
set_page_dirty(page);
|
||||
}
|
||||
- if (pte_young(ptent) &&
|
||||
- likely(!(vma->vm_flags & VM_SEQ_READ)))
|
||||
+ if (pte_young(ptent) && likely(vma_has_recency(vma)))
|
||||
mark_page_accessed(page);
|
||||
}
|
||||
rss[mm_counter(page)]--;
|
||||
@@ -5170,8 +5169,8 @@ static inline void mm_account_fault(stru
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
static void lru_gen_enter_fault(struct vm_area_struct *vma)
|
||||
{
|
||||
- /* the LRU algorithm doesn't apply to sequential or random reads */
|
||||
- current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ));
|
||||
+ /* the LRU algorithm only applies to accesses with recency */
|
||||
+ current->in_lru_fault = vma_has_recency(vma);
|
||||
}
|
||||
|
||||
static void lru_gen_exit_fault(void)
|
||||
--- a/mm/rmap.c
|
||||
+++ b/mm/rmap.c
|
||||
@@ -823,25 +823,14 @@ static bool folio_referenced_one(struct
|
||||
}
|
||||
|
||||
if (pvmw.pte) {
|
||||
- if (lru_gen_enabled() && pte_young(*pvmw.pte) &&
|
||||
- !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) {
|
||||
+ if (lru_gen_enabled() && pte_young(*pvmw.pte)) {
|
||||
lru_gen_look_around(&pvmw);
|
||||
referenced++;
|
||||
}
|
||||
|
||||
if (ptep_clear_flush_young_notify(vma, address,
|
||||
- pvmw.pte)) {
|
||||
- /*
|
||||
- * Don't treat a reference through
|
||||
- * a sequentially read mapping as such.
|
||||
- * If the folio has been used in another mapping,
|
||||
- * we will catch it; if this other mapping is
|
||||
- * already gone, the unmap path will have set
|
||||
- * the referenced flag or activated the folio.
|
||||
- */
|
||||
- if (likely(!(vma->vm_flags & VM_SEQ_READ)))
|
||||
- referenced++;
|
||||
- }
|
||||
+ pvmw.pte))
|
||||
+ referenced++;
|
||||
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
|
||||
if (pmdp_clear_flush_young_notify(vma, address,
|
||||
pvmw.pmd))
|
||||
@@ -875,7 +864,20 @@ static bool invalid_folio_referenced_vma
|
||||
struct folio_referenced_arg *pra = arg;
|
||||
struct mem_cgroup *memcg = pra->memcg;
|
||||
|
||||
- if (!mm_match_cgroup(vma->vm_mm, memcg))
|
||||
+ /*
|
||||
+ * Ignore references from this mapping if it has no recency. If the
|
||||
+ * page has been used in another mapping, we will catch it; if this
|
||||
+ * other mapping is already gone, the unmap path will have set the
|
||||
+ * referenced flag or activated the page in zap_pte_range().
|
||||
+ */
|
||||
+ if (!vma_has_recency(vma))
|
||||
+ return true;
|
||||
+
|
||||
+ /*
|
||||
+ * If we are reclaiming on behalf of a cgroup, skip counting on behalf
|
||||
+ * of references from different cgroups.
|
||||
+ */
|
||||
+ if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@@ -906,6 +908,7 @@ int folio_referenced(struct folio *folio
|
||||
.arg = (void *)&pra,
|
||||
.anon_lock = folio_lock_anon_vma_read,
|
||||
.try_lock = true,
|
||||
+ .invalid_vma = invalid_folio_referenced_vma,
|
||||
};
|
||||
|
||||
*vm_flags = 0;
|
||||
@@ -921,15 +924,6 @@ int folio_referenced(struct folio *folio
|
||||
return 1;
|
||||
}
|
||||
|
||||
- /*
|
||||
- * If we are reclaiming on behalf of a cgroup, skip
|
||||
- * counting on behalf of references from different
|
||||
- * cgroups
|
||||
- */
|
||||
- if (memcg) {
|
||||
- rwc.invalid_vma = invalid_folio_referenced_vma;
|
||||
- }
|
||||
-
|
||||
rmap_walk(folio, &rwc);
|
||||
*vm_flags = pra.vm_flags;
|
||||
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -3766,7 +3766,10 @@ static int should_skip_vma(unsigned long
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
return true;
|
||||
|
||||
- if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL | VM_SEQ_READ | VM_RAND_READ))
|
||||
+ if (!vma_has_recency(vma))
|
||||
+ return true;
|
||||
+
|
||||
+ if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL))
|
||||
return true;
|
||||
|
||||
if (vma == get_gate_vma(vma->vm_mm))
|
||||
@ -0,0 +1,125 @@
|
||||
From 686c3d4f71de9e0e7a27f03a5617a712385f90cd Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Fri, 30 Dec 2022 14:52:52 -0700
|
||||
Subject: [PATCH 20/29] mm: support POSIX_FADV_NOREUSE
|
||||
|
||||
This patch adds POSIX_FADV_NOREUSE to vma_has_recency() so that the LRU
|
||||
algorithm can ignore access to mapped files marked by this flag.
|
||||
|
||||
The advantages of POSIX_FADV_NOREUSE are:
|
||||
1. Unlike MADV_SEQUENTIAL and MADV_RANDOM, it does not alter the
|
||||
default readahead behavior.
|
||||
2. Unlike MADV_SEQUENTIAL and MADV_RANDOM, it does not split VMAs and
|
||||
therefore does not take mmap_lock.
|
||||
3. Unlike MADV_COLD, setting it has a negligible cost, regardless of
|
||||
how many pages it affects.
|
||||
|
||||
Its limitations are:
|
||||
1. Like POSIX_FADV_RANDOM and POSIX_FADV_SEQUENTIAL, it currently does
|
||||
not support range. IOW, its scope is the entire file.
|
||||
2. It currently does not ignore access through file descriptors.
|
||||
Specifically, for the active/inactive LRU, given a file page shared
|
||||
by two users and one of them having set POSIX_FADV_NOREUSE on the
|
||||
file, this page will be activated upon the second user accessing
|
||||
it. This corner case can be covered by checking POSIX_FADV_NOREUSE
|
||||
before calling mark_page_accessed() on the read path. But it is
|
||||
considered not worth the effort.
|
||||
|
||||
There have been a few attempts to support POSIX_FADV_NOREUSE, e.g., [1].
|
||||
This time the goal is to fill a niche: a few desktop applications, e.g.,
|
||||
large file transferring and video encoding/decoding, want fast file
|
||||
streaming with mmap() rather than direct IO. Among those applications, an
|
||||
SVT-AV1 regression was reported when running with MGLRU [2]. The
|
||||
following test can reproduce that regression.
|
||||
|
||||
kb=$(awk '/MemTotal/ { print $2 }' /proc/meminfo)
|
||||
kb=$((kb - 8*1024*1024))
|
||||
|
||||
modprobe brd rd_nr=1 rd_size=$kb
|
||||
dd if=/dev/zero of=/dev/ram0 bs=1M
|
||||
|
||||
mkfs.ext4 /dev/ram0
|
||||
mount /dev/ram0 /mnt/
|
||||
swapoff -a
|
||||
|
||||
fallocate -l 8G /mnt/swapfile
|
||||
mkswap /mnt/swapfile
|
||||
swapon /mnt/swapfile
|
||||
|
||||
wget http://ultravideo.cs.tut.fi/video/Bosphorus_3840x2160_120fps_420_8bit_YUV_Y4M.7z
|
||||
7z e -o/mnt/ Bosphorus_3840x2160_120fps_420_8bit_YUV_Y4M.7z
|
||||
SvtAv1EncApp --preset 12 -w 3840 -h 2160 \
|
||||
-i /mnt/Bosphorus_3840x2160.y4m
|
||||
|
||||
For MGLRU, the following change showed a [9-11]% increase in FPS,
|
||||
which makes it on par with the active/inactive LRU.
|
||||
|
||||
patch Source/App/EncApp/EbAppMain.c <<EOF
|
||||
31a32
|
||||
> #include <fcntl.h>
|
||||
35d35
|
||||
< #include <fcntl.h> /* _O_BINARY */
|
||||
117a118
|
||||
> posix_fadvise(config->mmap.fd, 0, 0, POSIX_FADV_NOREUSE);
|
||||
EOF
|
||||
|
||||
[1] https://lore.kernel.org/r/1308923350-7932-1-git-send-email-andrea@betterlinux.com/
|
||||
[2] https://openbenchmarking.org/result/2209259-PTS-MGLRU8GB57
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221230215252.2628425-2-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
|
||||
Cc: Andrea Righi <andrea.righi@canonical.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
include/linux/fs.h | 2 ++
|
||||
include/linux/mm_inline.h | 3 +++
|
||||
mm/fadvise.c | 5 ++++-
|
||||
3 files changed, 9 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/include/linux/fs.h
|
||||
+++ b/include/linux/fs.h
|
||||
@@ -166,6 +166,8 @@ typedef int (dio_iodone_t)(struct kiocb
|
||||
/* File supports DIRECT IO */
|
||||
#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000)
|
||||
|
||||
+#define FMODE_NOREUSE ((__force fmode_t)0x400000)
|
||||
+
|
||||
/* File was opened by fanotify and shouldn't generate fanotify events */
|
||||
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
|
||||
|
||||
--- a/include/linux/mm_inline.h
|
||||
+++ b/include/linux/mm_inline.h
|
||||
@@ -583,6 +583,9 @@ static inline bool vma_has_recency(struc
|
||||
if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
|
||||
return false;
|
||||
|
||||
+ if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
|
||||
+ return false;
|
||||
+
|
||||
return true;
|
||||
}
|
||||
|
||||
--- a/mm/fadvise.c
|
||||
+++ b/mm/fadvise.c
|
||||
@@ -80,7 +80,7 @@ int generic_fadvise(struct file *file, l
|
||||
case POSIX_FADV_NORMAL:
|
||||
file->f_ra.ra_pages = bdi->ra_pages;
|
||||
spin_lock(&file->f_lock);
|
||||
- file->f_mode &= ~FMODE_RANDOM;
|
||||
+ file->f_mode &= ~(FMODE_RANDOM | FMODE_NOREUSE);
|
||||
spin_unlock(&file->f_lock);
|
||||
break;
|
||||
case POSIX_FADV_RANDOM:
|
||||
@@ -107,6 +107,9 @@ int generic_fadvise(struct file *file, l
|
||||
force_page_cache_readahead(mapping, file, start_index, nrpages);
|
||||
break;
|
||||
case POSIX_FADV_NOREUSE:
|
||||
+ spin_lock(&file->f_lock);
|
||||
+ file->f_mode |= FMODE_NOREUSE;
|
||||
+ spin_unlock(&file->f_lock);
|
||||
break;
|
||||
case POSIX_FADV_DONTNEED:
|
||||
__filemap_fdatawrite_range(mapping, offset, endbyte,
|
||||
@ -0,0 +1,348 @@
|
||||
From 348fdbada9fb3f0bf1a53651be46319105af187f Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Wed, 21 Dec 2022 21:18:59 -0700
|
||||
Subject: [PATCH 21/29] mm: multi-gen LRU: rename lru_gen_struct to
|
||||
lru_gen_folio
|
||||
|
||||
Patch series "mm: multi-gen LRU: memcg LRU", v3.
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs,
|
||||
since each node and memcg combination has an LRU of folios (see
|
||||
mem_cgroup_lruvec()).
|
||||
|
||||
Its goal is to improve the scalability of global reclaim, which is
|
||||
critical to system-wide memory overcommit in data centers. Note that
|
||||
memcg reclaim is currently out of scope.
|
||||
|
||||
Its memory bloat is a pointer to each lruvec and negligible to each
|
||||
pglist_data. In terms of traversing memcgs during global reclaim, it
|
||||
improves the best-case complexity from O(n) to O(1) and does not affect
|
||||
the worst-case complexity O(n). Therefore, on average, it has a sublinear
|
||||
complexity in contrast to the current linear complexity.
|
||||
|
||||
The basic structure of an memcg LRU can be understood by an analogy to
|
||||
the active/inactive LRU (of folios):
|
||||
1. It has the young and the old (generations), i.e., the counterparts
|
||||
to the active and the inactive;
|
||||
2. The increment of max_seq triggers promotion, i.e., the counterpart
|
||||
to activation;
|
||||
3. Other events trigger similar operations, e.g., offlining an memcg
|
||||
triggers demotion, i.e., the counterpart to deactivation.
|
||||
|
||||
In terms of global reclaim, it has two distinct features:
|
||||
1. Sharding, which allows each thread to start at a random memcg (in
|
||||
the old generation) and improves parallelism;
|
||||
2. Eventual fairness, which allows direct reclaim to bail out at will
|
||||
and reduces latency without affecting fairness over some time.
|
||||
|
||||
The commit message in patch 6 details the workflow:
|
||||
https://lore.kernel.org/r/20221222041905.2431096-7-yuzhao@google.com/
|
||||
|
||||
The following is a simple test to quickly verify its effectiveness.
|
||||
|
||||
Test design:
|
||||
1. Create multiple memcgs.
|
||||
2. Each memcg contains a job (fio).
|
||||
3. All jobs access the same amount of memory randomly.
|
||||
4. The system does not experience global memory pressure.
|
||||
5. Periodically write to the root memory.reclaim.
|
||||
|
||||
Desired outcome:
|
||||
1. All memcgs have similar pgsteal counts, i.e., stddev(pgsteal)
|
||||
over mean(pgsteal) is close to 0%.
|
||||
2. The total pgsteal is close to the total requested through
|
||||
memory.reclaim, i.e., sum(pgsteal) over sum(requested) is close
|
||||
to 100%.
|
||||
|
||||
Actual outcome [1]:
|
||||
MGLRU off MGLRU on
|
||||
stddev(pgsteal) / mean(pgsteal) 75% 20%
|
||||
sum(pgsteal) / sum(requested) 425% 95%
|
||||
|
||||
####################################################################
|
||||
MEMCGS=128
|
||||
|
||||
for ((memcg = 0; memcg < $MEMCGS; memcg++)); do
|
||||
mkdir /sys/fs/cgroup/memcg$memcg
|
||||
done
|
||||
|
||||
start() {
|
||||
echo $BASHPID > /sys/fs/cgroup/memcg$memcg/cgroup.procs
|
||||
|
||||
fio -name=memcg$memcg --numjobs=1 --ioengine=mmap \
|
||||
--filename=/dev/zero --size=1920M --rw=randrw \
|
||||
--rate=64m,64m --random_distribution=random \
|
||||
--fadvise_hint=0 --time_based --runtime=10h \
|
||||
--group_reporting --minimal
|
||||
}
|
||||
|
||||
for ((memcg = 0; memcg < $MEMCGS; memcg++)); do
|
||||
start &
|
||||
done
|
||||
|
||||
sleep 600
|
||||
|
||||
for ((i = 0; i < 600; i++)); do
|
||||
echo 256m >/sys/fs/cgroup/memory.reclaim
|
||||
sleep 6
|
||||
done
|
||||
|
||||
for ((memcg = 0; memcg < $MEMCGS; memcg++)); do
|
||||
grep "pgsteal " /sys/fs/cgroup/memcg$memcg/memory.stat
|
||||
done
|
||||
####################################################################
|
||||
|
||||
[1]: This was obtained from running the above script (touches less
|
||||
than 256GB memory) on an EPYC 7B13 with 512GB DRAM for over an
|
||||
hour.
|
||||
|
||||
This patch (of 8):
|
||||
|
||||
The new name lru_gen_folio will be more distinct from the coming
|
||||
lru_gen_memcg.
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221222041905.2431096-1-yuzhao@google.com
|
||||
Link: https://lkml.kernel.org/r/20221222041905.2431096-2-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Jonathan Corbet <corbet@lwn.net>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Cc: Michal Hocko <mhocko@kernel.org>
|
||||
Cc: Mike Rapoport <rppt@kernel.org>
|
||||
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
Cc: Suren Baghdasaryan <surenb@google.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
include/linux/mm_inline.h | 4 ++--
|
||||
include/linux/mmzone.h | 6 +++---
|
||||
mm/vmscan.c | 34 +++++++++++++++++-----------------
|
||||
mm/workingset.c | 4 ++--
|
||||
4 files changed, 24 insertions(+), 24 deletions(-)
|
||||
|
||||
--- a/include/linux/mm_inline.h
|
||||
+++ b/include/linux/mm_inline.h
|
||||
@@ -178,7 +178,7 @@ static inline void lru_gen_update_size(s
|
||||
int zone = folio_zonenum(folio);
|
||||
int delta = folio_nr_pages(folio);
|
||||
enum lru_list lru = type * LRU_INACTIVE_FILE;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
|
||||
VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
|
||||
@@ -224,7 +224,7 @@ static inline bool lru_gen_add_folio(str
|
||||
int gen = folio_lru_gen(folio);
|
||||
int type = folio_is_file_lru(folio);
|
||||
int zone = folio_zonenum(folio);
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
|
||||
|
||||
--- a/include/linux/mmzone.h
|
||||
+++ b/include/linux/mmzone.h
|
||||
@@ -404,7 +404,7 @@ enum {
|
||||
* The number of pages in each generation is eventually consistent and therefore
|
||||
* can be transiently negative when reset_batch_size() is pending.
|
||||
*/
|
||||
-struct lru_gen_struct {
|
||||
+struct lru_gen_folio {
|
||||
/* the aging increments the youngest generation number */
|
||||
unsigned long max_seq;
|
||||
/* the eviction increments the oldest generation numbers */
|
||||
@@ -461,7 +461,7 @@ struct lru_gen_mm_state {
|
||||
struct lru_gen_mm_walk {
|
||||
/* the lruvec under reclaim */
|
||||
struct lruvec *lruvec;
|
||||
- /* unstable max_seq from lru_gen_struct */
|
||||
+ /* unstable max_seq from lru_gen_folio */
|
||||
unsigned long max_seq;
|
||||
/* the next address within an mm to scan */
|
||||
unsigned long next_addr;
|
||||
@@ -524,7 +524,7 @@ struct lruvec {
|
||||
unsigned long flags;
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
/* evictable pages divided into generations */
|
||||
- struct lru_gen_struct lrugen;
|
||||
+ struct lru_gen_folio lrugen;
|
||||
/* to concurrently iterate lru_gen_mm_list */
|
||||
struct lru_gen_mm_state mm_state;
|
||||
#endif
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -3190,7 +3190,7 @@ static int get_nr_gens(struct lruvec *lr
|
||||
|
||||
static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
|
||||
{
|
||||
- /* see the comment on lru_gen_struct */
|
||||
+ /* see the comment on lru_gen_folio */
|
||||
return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
|
||||
get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
|
||||
get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
|
||||
@@ -3596,7 +3596,7 @@ struct ctrl_pos {
|
||||
static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
|
||||
struct ctrl_pos *pos)
|
||||
{
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
|
||||
|
||||
pos->refaulted = lrugen->avg_refaulted[type][tier] +
|
||||
@@ -3611,7 +3611,7 @@ static void read_ctrl_pos(struct lruvec
|
||||
static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
|
||||
{
|
||||
int hist, tier;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
|
||||
unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
|
||||
|
||||
@@ -3688,7 +3688,7 @@ static int folio_update_gen(struct folio
|
||||
static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
|
||||
{
|
||||
int type = folio_is_file_lru(folio);
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
|
||||
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
|
||||
|
||||
@@ -3733,7 +3733,7 @@ static void update_batch_size(struct lru
|
||||
static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
|
||||
{
|
||||
int gen, type, zone;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
walk->batched = 0;
|
||||
|
||||
@@ -4253,7 +4253,7 @@ static bool inc_min_seq(struct lruvec *l
|
||||
{
|
||||
int zone;
|
||||
int remaining = MAX_LRU_BATCH;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
|
||||
|
||||
if (type == LRU_GEN_ANON && !can_swap)
|
||||
@@ -4289,7 +4289,7 @@ static bool try_to_inc_min_seq(struct lr
|
||||
{
|
||||
int gen, type, zone;
|
||||
bool success = false;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
DEFINE_MIN_SEQ(lruvec);
|
||||
|
||||
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
|
||||
@@ -4310,7 +4310,7 @@ next:
|
||||
;
|
||||
}
|
||||
|
||||
- /* see the comment on lru_gen_struct */
|
||||
+ /* see the comment on lru_gen_folio */
|
||||
if (can_swap) {
|
||||
min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
|
||||
min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
|
||||
@@ -4332,7 +4332,7 @@ static void inc_max_seq(struct lruvec *l
|
||||
{
|
||||
int prev, next;
|
||||
int type, zone;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
spin_lock_irq(&lruvec->lru_lock);
|
||||
|
||||
@@ -4390,7 +4390,7 @@ static bool try_to_inc_max_seq(struct lr
|
||||
bool success;
|
||||
struct lru_gen_mm_walk *walk;
|
||||
struct mm_struct *mm = NULL;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
|
||||
|
||||
@@ -4455,7 +4455,7 @@ static bool should_run_aging(struct lruv
|
||||
unsigned long old = 0;
|
||||
unsigned long young = 0;
|
||||
unsigned long total = 0;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
|
||||
for (type = !can_swap; type < ANON_AND_FILE; type++) {
|
||||
@@ -4740,7 +4740,7 @@ static bool sort_folio(struct lruvec *lr
|
||||
int delta = folio_nr_pages(folio);
|
||||
int refs = folio_lru_refs(folio);
|
||||
int tier = lru_tier_from_refs(refs);
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
|
||||
|
||||
@@ -4840,7 +4840,7 @@ static int scan_folios(struct lruvec *lr
|
||||
int scanned = 0;
|
||||
int isolated = 0;
|
||||
int remaining = MAX_LRU_BATCH;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
|
||||
VM_WARN_ON_ONCE(!list_empty(list));
|
||||
@@ -5240,7 +5240,7 @@ done:
|
||||
|
||||
static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
|
||||
{
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
if (lrugen->enabled) {
|
||||
enum lru_list lru;
|
||||
@@ -5522,7 +5522,7 @@ static void lru_gen_seq_show_full(struct
|
||||
int i;
|
||||
int type, tier;
|
||||
int hist = lru_hist_from_seq(seq);
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
|
||||
seq_printf(m, " %10d", tier);
|
||||
@@ -5572,7 +5572,7 @@ static int lru_gen_seq_show(struct seq_f
|
||||
unsigned long seq;
|
||||
bool full = !debugfs_real_fops(m->file)->write;
|
||||
struct lruvec *lruvec = v;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
int nid = lruvec_pgdat(lruvec)->node_id;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
DEFINE_MAX_SEQ(lruvec);
|
||||
@@ -5826,7 +5826,7 @@ void lru_gen_init_lruvec(struct lruvec *
|
||||
{
|
||||
int i;
|
||||
int gen, type, zone;
|
||||
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
|
||||
lrugen->max_seq = MIN_NR_GENS + 1;
|
||||
lrugen->enabled = lru_gen_enabled();
|
||||
--- a/mm/workingset.c
|
||||
+++ b/mm/workingset.c
|
||||
@@ -223,7 +223,7 @@ static void *lru_gen_eviction(struct fol
|
||||
unsigned long token;
|
||||
unsigned long min_seq;
|
||||
struct lruvec *lruvec;
|
||||
- struct lru_gen_struct *lrugen;
|
||||
+ struct lru_gen_folio *lrugen;
|
||||
int type = folio_is_file_lru(folio);
|
||||
int delta = folio_nr_pages(folio);
|
||||
int refs = folio_lru_refs(folio);
|
||||
@@ -252,7 +252,7 @@ static void lru_gen_refault(struct folio
|
||||
unsigned long token;
|
||||
unsigned long min_seq;
|
||||
struct lruvec *lruvec;
|
||||
- struct lru_gen_struct *lrugen;
|
||||
+ struct lru_gen_folio *lrugen;
|
||||
struct mem_cgroup *memcg;
|
||||
struct pglist_data *pgdat;
|
||||
int type = folio_is_file_lru(folio);
|
||||
@ -0,0 +1,162 @@
|
||||
From afd37e73db04c7e6b47411120ac5f6a7eca51fec Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Wed, 21 Dec 2022 21:19:00 -0700
|
||||
Subject: [PATCH 22/29] mm: multi-gen LRU: rename lrugen->lists[] to
|
||||
lrugen->folios[]
|
||||
|
||||
lru_gen_folio will be chained into per-node lists by the coming
|
||||
lrugen->list.
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221222041905.2431096-3-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Jonathan Corbet <corbet@lwn.net>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Cc: Michal Hocko <mhocko@kernel.org>
|
||||
Cc: Mike Rapoport <rppt@kernel.org>
|
||||
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
Cc: Suren Baghdasaryan <surenb@google.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
include/linux/mm_inline.h | 4 ++--
|
||||
include/linux/mmzone.h | 8 ++++----
|
||||
mm/vmscan.c | 20 ++++++++++----------
|
||||
3 files changed, 16 insertions(+), 16 deletions(-)
|
||||
|
||||
--- a/include/linux/mm_inline.h
|
||||
+++ b/include/linux/mm_inline.h
|
||||
@@ -256,9 +256,9 @@ static inline bool lru_gen_add_folio(str
|
||||
lru_gen_update_size(lruvec, folio, -1, gen);
|
||||
/* for folio_rotate_reclaimable() */
|
||||
if (reclaiming)
|
||||
- list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
|
||||
+ list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
|
||||
else
|
||||
- list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
|
||||
+ list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
|
||||
|
||||
return true;
|
||||
}
|
||||
--- a/include/linux/mmzone.h
|
||||
+++ b/include/linux/mmzone.h
|
||||
@@ -312,7 +312,7 @@ enum lruvec_flags {
|
||||
* They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
|
||||
* offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
|
||||
* corresponding generation. The gen counter in folio->flags stores gen+1 while
|
||||
- * a page is on one of lrugen->lists[]. Otherwise it stores 0.
|
||||
+ * a page is on one of lrugen->folios[]. Otherwise it stores 0.
|
||||
*
|
||||
* A page is added to the youngest generation on faulting. The aging needs to
|
||||
* check the accessed bit at least twice before handing this page over to the
|
||||
@@ -324,8 +324,8 @@ enum lruvec_flags {
|
||||
* rest of generations, if they exist, are considered inactive. See
|
||||
* lru_gen_is_active().
|
||||
*
|
||||
- * PG_active is always cleared while a page is on one of lrugen->lists[] so that
|
||||
- * the aging needs not to worry about it. And it's set again when a page
|
||||
+ * PG_active is always cleared while a page is on one of lrugen->folios[] so
|
||||
+ * that the aging needs not to worry about it. And it's set again when a page
|
||||
* considered active is isolated for non-reclaiming purposes, e.g., migration.
|
||||
* See lru_gen_add_folio() and lru_gen_del_folio().
|
||||
*
|
||||
@@ -412,7 +412,7 @@ struct lru_gen_folio {
|
||||
/* the birth time of each generation in jiffies */
|
||||
unsigned long timestamps[MAX_NR_GENS];
|
||||
/* the multi-gen LRU lists, lazily sorted on eviction */
|
||||
- struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
|
||||
+ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
|
||||
/* the multi-gen LRU sizes, eventually consistent */
|
||||
long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
|
||||
/* the exponential moving average of refaulted */
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -4261,7 +4261,7 @@ static bool inc_min_seq(struct lruvec *l
|
||||
|
||||
/* prevent cold/hot inversion if force_scan is true */
|
||||
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
||||
- struct list_head *head = &lrugen->lists[old_gen][type][zone];
|
||||
+ struct list_head *head = &lrugen->folios[old_gen][type][zone];
|
||||
|
||||
while (!list_empty(head)) {
|
||||
struct folio *folio = lru_to_folio(head);
|
||||
@@ -4272,7 +4272,7 @@ static bool inc_min_seq(struct lruvec *l
|
||||
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
|
||||
|
||||
new_gen = folio_inc_gen(lruvec, folio, false);
|
||||
- list_move_tail(&folio->lru, &lrugen->lists[new_gen][type][zone]);
|
||||
+ list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
|
||||
|
||||
if (!--remaining)
|
||||
return false;
|
||||
@@ -4300,7 +4300,7 @@ static bool try_to_inc_min_seq(struct lr
|
||||
gen = lru_gen_from_seq(min_seq[type]);
|
||||
|
||||
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
||||
- if (!list_empty(&lrugen->lists[gen][type][zone]))
|
||||
+ if (!list_empty(&lrugen->folios[gen][type][zone]))
|
||||
goto next;
|
||||
}
|
||||
|
||||
@@ -4765,7 +4765,7 @@ static bool sort_folio(struct lruvec *lr
|
||||
|
||||
/* promoted */
|
||||
if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
|
||||
- list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
|
||||
+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -4774,7 +4774,7 @@ static bool sort_folio(struct lruvec *lr
|
||||
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
|
||||
|
||||
gen = folio_inc_gen(lruvec, folio, false);
|
||||
- list_move_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
|
||||
+ list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
|
||||
|
||||
WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
|
||||
lrugen->protected[hist][type][tier - 1] + delta);
|
||||
@@ -4786,7 +4786,7 @@ static bool sort_folio(struct lruvec *lr
|
||||
if (folio_test_locked(folio) || folio_test_writeback(folio) ||
|
||||
(type == LRU_GEN_FILE && folio_test_dirty(folio))) {
|
||||
gen = folio_inc_gen(lruvec, folio, true);
|
||||
- list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
|
||||
+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -4853,7 +4853,7 @@ static int scan_folios(struct lruvec *lr
|
||||
for (zone = sc->reclaim_idx; zone >= 0; zone--) {
|
||||
LIST_HEAD(moved);
|
||||
int skipped = 0;
|
||||
- struct list_head *head = &lrugen->lists[gen][type][zone];
|
||||
+ struct list_head *head = &lrugen->folios[gen][type][zone];
|
||||
|
||||
while (!list_empty(head)) {
|
||||
struct folio *folio = lru_to_folio(head);
|
||||
@@ -5253,7 +5253,7 @@ static bool __maybe_unused state_is_vali
|
||||
int gen, type, zone;
|
||||
|
||||
for_each_gen_type_zone(gen, type, zone) {
|
||||
- if (!list_empty(&lrugen->lists[gen][type][zone]))
|
||||
+ if (!list_empty(&lrugen->folios[gen][type][zone]))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -5298,7 +5298,7 @@ static bool drain_evictable(struct lruve
|
||||
int remaining = MAX_LRU_BATCH;
|
||||
|
||||
for_each_gen_type_zone(gen, type, zone) {
|
||||
- struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
|
||||
+ struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
|
||||
|
||||
while (!list_empty(head)) {
|
||||
bool success;
|
||||
@@ -5835,7 +5835,7 @@ void lru_gen_init_lruvec(struct lruvec *
|
||||
lrugen->timestamps[i] = jiffies;
|
||||
|
||||
for_each_gen_type_zone(gen, type, zone)
|
||||
- INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
|
||||
+ INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
|
||||
|
||||
lruvec->mm_state.seq = MIN_NR_GENS;
|
||||
init_waitqueue_head(&lruvec->mm_state.wait);
|
||||
@ -0,0 +1,187 @@
|
||||
From ce45f1c4b32cf69b166f56ef5bc6c761e06ed4e5 Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Wed, 21 Dec 2022 21:19:01 -0700
|
||||
Subject: [PATCH 23/29] mm: multi-gen LRU: remove eviction fairness safeguard
|
||||
|
||||
Recall that the eviction consumes the oldest generation: first it
|
||||
bucket-sorts folios whose gen counters were updated by the aging and
|
||||
reclaims the rest; then it increments lrugen->min_seq.
|
||||
|
||||
The current eviction fairness safeguard for global reclaim has a
|
||||
dilemma: when there are multiple eligible memcgs, should it continue
|
||||
or stop upon meeting the reclaim goal? If it continues, it overshoots
|
||||
and increases direct reclaim latency; if it stops, it loses fairness
|
||||
between memcgs it has taken memory away from and those it has yet to.
|
||||
|
||||
With memcg LRU, the eviction, while ensuring eventual fairness, will
|
||||
stop upon meeting its goal. Therefore the current eviction fairness
|
||||
safeguard for global reclaim will not be needed.
|
||||
|
||||
Note that memcg LRU only applies to global reclaim. For memcg reclaim,
|
||||
the eviction will continue, even if it is overshooting. This becomes
|
||||
unconditional due to code simplification.
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221222041905.2431096-4-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Jonathan Corbet <corbet@lwn.net>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Cc: Michal Hocko <mhocko@kernel.org>
|
||||
Cc: Mike Rapoport <rppt@kernel.org>
|
||||
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
Cc: Suren Baghdasaryan <surenb@google.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
mm/vmscan.c | 82 +++++++++++++++--------------------------------------
|
||||
1 file changed, 23 insertions(+), 59 deletions(-)
|
||||
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -448,6 +448,11 @@ static bool cgroup_reclaim(struct scan_c
|
||||
return sc->target_mem_cgroup;
|
||||
}
|
||||
|
||||
+static bool global_reclaim(struct scan_control *sc)
|
||||
+{
|
||||
+ return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
|
||||
+}
|
||||
+
|
||||
/**
|
||||
* writeback_throttling_sane - is the usual dirty throttling mechanism available?
|
||||
* @sc: scan_control in question
|
||||
@@ -498,6 +503,11 @@ static bool cgroup_reclaim(struct scan_c
|
||||
return false;
|
||||
}
|
||||
|
||||
+static bool global_reclaim(struct scan_control *sc)
|
||||
+{
|
||||
+ return true;
|
||||
+}
|
||||
+
|
||||
static bool writeback_throttling_sane(struct scan_control *sc)
|
||||
{
|
||||
return true;
|
||||
@@ -4996,8 +5006,7 @@ static int isolate_folios(struct lruvec
|
||||
return scanned;
|
||||
}
|
||||
|
||||
-static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
|
||||
- bool *need_swapping)
|
||||
+static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
|
||||
{
|
||||
int type;
|
||||
int scanned;
|
||||
@@ -5086,9 +5095,6 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
- if (need_swapping && type == LRU_GEN_ANON)
|
||||
- *need_swapping = true;
|
||||
-
|
||||
return scanned;
|
||||
}
|
||||
|
||||
@@ -5127,67 +5133,26 @@ done:
|
||||
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
|
||||
}
|
||||
|
||||
-static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
|
||||
- struct scan_control *sc, bool need_swapping)
|
||||
+static unsigned long get_nr_to_reclaim(struct scan_control *sc)
|
||||
{
|
||||
- int i;
|
||||
- DEFINE_MAX_SEQ(lruvec);
|
||||
-
|
||||
- if (!current_is_kswapd()) {
|
||||
- /* age each memcg at most once to ensure fairness */
|
||||
- if (max_seq - seq > 1)
|
||||
- return true;
|
||||
-
|
||||
- /* over-swapping can increase allocation latency */
|
||||
- if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
|
||||
- return true;
|
||||
-
|
||||
- /* give this thread a chance to exit and free its memory */
|
||||
- if (fatal_signal_pending(current)) {
|
||||
- sc->nr_reclaimed += MIN_LRU_BATCH;
|
||||
- return true;
|
||||
- }
|
||||
-
|
||||
- if (cgroup_reclaim(sc))
|
||||
- return false;
|
||||
- } else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
|
||||
- return false;
|
||||
-
|
||||
- /* keep scanning at low priorities to ensure fairness */
|
||||
- if (sc->priority > DEF_PRIORITY - 2)
|
||||
- return false;
|
||||
-
|
||||
- /*
|
||||
- * A minimum amount of work was done under global memory pressure. For
|
||||
- * kswapd, it may be overshooting. For direct reclaim, the allocation
|
||||
- * may succeed if all suitable zones are somewhat safe. In either case,
|
||||
- * it's better to stop now, and restart later if necessary.
|
||||
- */
|
||||
- for (i = 0; i <= sc->reclaim_idx; i++) {
|
||||
- unsigned long wmark;
|
||||
- struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
|
||||
-
|
||||
- if (!managed_zone(zone))
|
||||
- continue;
|
||||
-
|
||||
- wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
|
||||
- if (wmark > zone_page_state(zone, NR_FREE_PAGES))
|
||||
- return false;
|
||||
- }
|
||||
+ /* don't abort memcg reclaim to ensure fairness */
|
||||
+ if (!global_reclaim(sc))
|
||||
+ return -1;
|
||||
|
||||
- sc->nr_reclaimed += MIN_LRU_BATCH;
|
||||
+ /* discount the previous progress for kswapd */
|
||||
+ if (current_is_kswapd())
|
||||
+ return sc->nr_to_reclaim + sc->last_reclaimed;
|
||||
|
||||
- return true;
|
||||
+ return max(sc->nr_to_reclaim, compact_gap(sc->order));
|
||||
}
|
||||
|
||||
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
{
|
||||
struct blk_plug plug;
|
||||
bool need_aging = false;
|
||||
- bool need_swapping = false;
|
||||
unsigned long scanned = 0;
|
||||
unsigned long reclaimed = sc->nr_reclaimed;
|
||||
- DEFINE_MAX_SEQ(lruvec);
|
||||
+ unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
|
||||
|
||||
lru_add_drain();
|
||||
|
||||
@@ -5211,7 +5176,7 @@ static void lru_gen_shrink_lruvec(struct
|
||||
if (!nr_to_scan)
|
||||
goto done;
|
||||
|
||||
- delta = evict_folios(lruvec, sc, swappiness, &need_swapping);
|
||||
+ delta = evict_folios(lruvec, sc, swappiness);
|
||||
if (!delta)
|
||||
goto done;
|
||||
|
||||
@@ -5219,7 +5184,7 @@ static void lru_gen_shrink_lruvec(struct
|
||||
if (scanned >= nr_to_scan)
|
||||
break;
|
||||
|
||||
- if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
|
||||
+ if (sc->nr_reclaimed >= nr_to_reclaim)
|
||||
break;
|
||||
|
||||
cond_resched();
|
||||
@@ -5669,7 +5634,7 @@ static int run_eviction(struct lruvec *l
|
||||
if (sc->nr_reclaimed >= nr_to_reclaim)
|
||||
return 0;
|
||||
|
||||
- if (!evict_folios(lruvec, sc, swappiness, NULL))
|
||||
+ if (!evict_folios(lruvec, sc, swappiness))
|
||||
return 0;
|
||||
|
||||
cond_resched();
|
||||
@ -0,0 +1,287 @@
|
||||
From e20b7386fccc18c791796eb1dc1a91eee3ccf801 Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Wed, 21 Dec 2022 21:19:02 -0700
|
||||
Subject: [PATCH 24/29] mm: multi-gen LRU: remove aging fairness safeguard
|
||||
|
||||
Recall that the aging produces the youngest generation: first it scans
|
||||
for accessed folios and updates their gen counters; then it increments
|
||||
lrugen->max_seq.
|
||||
|
||||
The current aging fairness safeguard for kswapd uses two passes to
|
||||
ensure the fairness to multiple eligible memcgs. On the first pass,
|
||||
which is shared with the eviction, it checks whether all eligible
|
||||
memcgs are low on cold folios. If so, it requires a second pass, on
|
||||
which it ages all those memcgs at the same time.
|
||||
|
||||
With memcg LRU, the aging, while ensuring eventual fairness, will run
|
||||
when necessary. Therefore the current aging fairness safeguard for
|
||||
kswapd will not be needed.
|
||||
|
||||
Note that memcg LRU only applies to global reclaim. For memcg reclaim,
|
||||
the aging can be unfair to different memcgs, i.e., their
|
||||
lrugen->max_seq can be incremented at different paces.
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221222041905.2431096-5-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Jonathan Corbet <corbet@lwn.net>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Cc: Michal Hocko <mhocko@kernel.org>
|
||||
Cc: Mike Rapoport <rppt@kernel.org>
|
||||
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
Cc: Suren Baghdasaryan <surenb@google.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
mm/vmscan.c | 126 ++++++++++++++++++++++++----------------------------
|
||||
1 file changed, 59 insertions(+), 67 deletions(-)
|
||||
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -136,7 +136,6 @@ struct scan_control {
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
/* help kswapd make better choices among multiple memcgs */
|
||||
- unsigned int memcgs_need_aging:1;
|
||||
unsigned long last_reclaimed;
|
||||
#endif
|
||||
|
||||
@@ -4458,7 +4457,7 @@ done:
|
||||
return true;
|
||||
}
|
||||
|
||||
-static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq,
|
||||
+static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
|
||||
struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
|
||||
{
|
||||
int gen, type, zone;
|
||||
@@ -4467,6 +4466,13 @@ static bool should_run_aging(struct lruv
|
||||
unsigned long total = 0;
|
||||
struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
+ DEFINE_MIN_SEQ(lruvec);
|
||||
+
|
||||
+ /* whether this lruvec is completely out of cold folios */
|
||||
+ if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
|
||||
+ *nr_to_scan = 0;
|
||||
+ return true;
|
||||
+ }
|
||||
|
||||
for (type = !can_swap; type < ANON_AND_FILE; type++) {
|
||||
unsigned long seq;
|
||||
@@ -4495,8 +4501,6 @@ static bool should_run_aging(struct lruv
|
||||
* stalls when the number of generations reaches MIN_NR_GENS. Hence, the
|
||||
* ideal number of generations is MIN_NR_GENS+1.
|
||||
*/
|
||||
- if (min_seq[!can_swap] + MIN_NR_GENS > max_seq)
|
||||
- return true;
|
||||
if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
|
||||
return false;
|
||||
|
||||
@@ -4515,40 +4519,54 @@ static bool should_run_aging(struct lruv
|
||||
return false;
|
||||
}
|
||||
|
||||
-static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl)
|
||||
+static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
|
||||
{
|
||||
- bool need_aging;
|
||||
- unsigned long nr_to_scan;
|
||||
- int swappiness = get_swappiness(lruvec, sc);
|
||||
+ int gen, type, zone;
|
||||
+ unsigned long total = 0;
|
||||
+ bool can_swap = get_swappiness(lruvec, sc);
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
DEFINE_MAX_SEQ(lruvec);
|
||||
DEFINE_MIN_SEQ(lruvec);
|
||||
|
||||
- VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
|
||||
+ for (type = !can_swap; type < ANON_AND_FILE; type++) {
|
||||
+ unsigned long seq;
|
||||
|
||||
- mem_cgroup_calculate_protection(NULL, memcg);
|
||||
+ for (seq = min_seq[type]; seq <= max_seq; seq++) {
|
||||
+ gen = lru_gen_from_seq(seq);
|
||||
|
||||
- if (mem_cgroup_below_min(memcg))
|
||||
- return false;
|
||||
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
|
||||
+ total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
|
||||
+ }
|
||||
+ }
|
||||
|
||||
- need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
|
||||
+ /* whether the size is big enough to be helpful */
|
||||
+ return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
|
||||
+}
|
||||
|
||||
- if (min_ttl) {
|
||||
- int gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
|
||||
- unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
|
||||
+static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc,
|
||||
+ unsigned long min_ttl)
|
||||
+{
|
||||
+ int gen;
|
||||
+ unsigned long birth;
|
||||
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
+ DEFINE_MIN_SEQ(lruvec);
|
||||
|
||||
- if (time_is_after_jiffies(birth + min_ttl))
|
||||
- return false;
|
||||
+ VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
|
||||
|
||||
- /* the size is likely too small to be helpful */
|
||||
- if (!nr_to_scan && sc->priority != DEF_PRIORITY)
|
||||
- return false;
|
||||
- }
|
||||
+ /* see the comment on lru_gen_folio */
|
||||
+ gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
|
||||
+ birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
|
||||
|
||||
- if (need_aging)
|
||||
- try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false);
|
||||
+ if (time_is_after_jiffies(birth + min_ttl))
|
||||
+ return false;
|
||||
|
||||
- return true;
|
||||
+ if (!lruvec_is_sizable(lruvec, sc))
|
||||
+ return false;
|
||||
+
|
||||
+ mem_cgroup_calculate_protection(NULL, memcg);
|
||||
+
|
||||
+ return !mem_cgroup_below_min(memcg);
|
||||
}
|
||||
|
||||
/* to protect the working set of the last N jiffies */
|
||||
@@ -4557,46 +4575,32 @@ static unsigned long lru_gen_min_ttl __r
|
||||
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
- bool success = false;
|
||||
unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
|
||||
|
||||
VM_WARN_ON_ONCE(!current_is_kswapd());
|
||||
|
||||
sc->last_reclaimed = sc->nr_reclaimed;
|
||||
|
||||
- /*
|
||||
- * To reduce the chance of going into the aging path, which can be
|
||||
- * costly, optimistically skip it if the flag below was cleared in the
|
||||
- * eviction path. This improves the overall performance when multiple
|
||||
- * memcgs are available.
|
||||
- */
|
||||
- if (!sc->memcgs_need_aging) {
|
||||
- sc->memcgs_need_aging = true;
|
||||
+ /* check the order to exclude compaction-induced reclaim */
|
||||
+ if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
|
||||
return;
|
||||
- }
|
||||
-
|
||||
- set_mm_walk(pgdat);
|
||||
|
||||
memcg = mem_cgroup_iter(NULL, NULL, NULL);
|
||||
do {
|
||||
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
||||
|
||||
- if (age_lruvec(lruvec, sc, min_ttl))
|
||||
- success = true;
|
||||
+ if (lruvec_is_reclaimable(lruvec, sc, min_ttl)) {
|
||||
+ mem_cgroup_iter_break(NULL, memcg);
|
||||
+ return;
|
||||
+ }
|
||||
|
||||
cond_resched();
|
||||
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
|
||||
|
||||
- clear_mm_walk();
|
||||
-
|
||||
- /* check the order to exclude compaction-induced reclaim */
|
||||
- if (success || !min_ttl || sc->order)
|
||||
- return;
|
||||
-
|
||||
/*
|
||||
* The main goal is to OOM kill if every generation from all memcgs is
|
||||
* younger than min_ttl. However, another possibility is all memcgs are
|
||||
- * either below min or empty.
|
||||
+ * either too small or below min.
|
||||
*/
|
||||
if (mutex_trylock(&oom_lock)) {
|
||||
struct oom_control oc = {
|
||||
@@ -5104,33 +5108,27 @@ retry:
|
||||
* reclaim.
|
||||
*/
|
||||
static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
|
||||
- bool can_swap, bool *need_aging)
|
||||
+ bool can_swap)
|
||||
{
|
||||
unsigned long nr_to_scan;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
DEFINE_MAX_SEQ(lruvec);
|
||||
- DEFINE_MIN_SEQ(lruvec);
|
||||
|
||||
if (mem_cgroup_below_min(memcg) ||
|
||||
(mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
|
||||
return 0;
|
||||
|
||||
- *need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan);
|
||||
- if (!*need_aging)
|
||||
+ if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
|
||||
return nr_to_scan;
|
||||
|
||||
/* skip the aging path at the default priority */
|
||||
if (sc->priority == DEF_PRIORITY)
|
||||
- goto done;
|
||||
+ return nr_to_scan;
|
||||
|
||||
- /* leave the work to lru_gen_age_node() */
|
||||
- if (current_is_kswapd())
|
||||
- return 0;
|
||||
+ try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false);
|
||||
|
||||
- if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false))
|
||||
- return nr_to_scan;
|
||||
-done:
|
||||
- return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
|
||||
+ /* skip this lruvec as it's low on cold folios */
|
||||
+ return 0;
|
||||
}
|
||||
|
||||
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
|
||||
@@ -5149,9 +5147,7 @@ static unsigned long get_nr_to_reclaim(s
|
||||
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
{
|
||||
struct blk_plug plug;
|
||||
- bool need_aging = false;
|
||||
unsigned long scanned = 0;
|
||||
- unsigned long reclaimed = sc->nr_reclaimed;
|
||||
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
|
||||
|
||||
lru_add_drain();
|
||||
@@ -5172,13 +5168,13 @@ static void lru_gen_shrink_lruvec(struct
|
||||
else
|
||||
swappiness = 0;
|
||||
|
||||
- nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness, &need_aging);
|
||||
+ nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
|
||||
if (!nr_to_scan)
|
||||
- goto done;
|
||||
+ break;
|
||||
|
||||
delta = evict_folios(lruvec, sc, swappiness);
|
||||
if (!delta)
|
||||
- goto done;
|
||||
+ break;
|
||||
|
||||
scanned += delta;
|
||||
if (scanned >= nr_to_scan)
|
||||
@@ -5190,10 +5186,6 @@ static void lru_gen_shrink_lruvec(struct
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
- /* see the comment in lru_gen_age_node() */
|
||||
- if (sc->nr_reclaimed - reclaimed >= MIN_LRU_BATCH && !need_aging)
|
||||
- sc->memcgs_need_aging = false;
|
||||
-done:
|
||||
clear_mm_walk();
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
@ -0,0 +1,161 @@
|
||||
From 107d54931df3c28d81648122e219bf0034ef4e99 Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Wed, 21 Dec 2022 21:19:03 -0700
|
||||
Subject: [PATCH 25/29] mm: multi-gen LRU: shuffle should_run_aging()
|
||||
|
||||
Move should_run_aging() next to its only caller left.
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221222041905.2431096-6-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Jonathan Corbet <corbet@lwn.net>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Cc: Michal Hocko <mhocko@kernel.org>
|
||||
Cc: Mike Rapoport <rppt@kernel.org>
|
||||
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
Cc: Suren Baghdasaryan <surenb@google.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
mm/vmscan.c | 124 ++++++++++++++++++++++++++--------------------------
|
||||
1 file changed, 62 insertions(+), 62 deletions(-)
|
||||
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -4457,68 +4457,6 @@ done:
|
||||
return true;
|
||||
}
|
||||
|
||||
-static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
|
||||
- struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
|
||||
-{
|
||||
- int gen, type, zone;
|
||||
- unsigned long old = 0;
|
||||
- unsigned long young = 0;
|
||||
- unsigned long total = 0;
|
||||
- struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
- struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
- DEFINE_MIN_SEQ(lruvec);
|
||||
-
|
||||
- /* whether this lruvec is completely out of cold folios */
|
||||
- if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
|
||||
- *nr_to_scan = 0;
|
||||
- return true;
|
||||
- }
|
||||
-
|
||||
- for (type = !can_swap; type < ANON_AND_FILE; type++) {
|
||||
- unsigned long seq;
|
||||
-
|
||||
- for (seq = min_seq[type]; seq <= max_seq; seq++) {
|
||||
- unsigned long size = 0;
|
||||
-
|
||||
- gen = lru_gen_from_seq(seq);
|
||||
-
|
||||
- for (zone = 0; zone < MAX_NR_ZONES; zone++)
|
||||
- size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
|
||||
-
|
||||
- total += size;
|
||||
- if (seq == max_seq)
|
||||
- young += size;
|
||||
- else if (seq + MIN_NR_GENS == max_seq)
|
||||
- old += size;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* try to scrape all its memory if this memcg was deleted */
|
||||
- *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
|
||||
-
|
||||
- /*
|
||||
- * The aging tries to be lazy to reduce the overhead, while the eviction
|
||||
- * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
|
||||
- * ideal number of generations is MIN_NR_GENS+1.
|
||||
- */
|
||||
- if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
|
||||
- return false;
|
||||
-
|
||||
- /*
|
||||
- * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
|
||||
- * of the total number of pages for each generation. A reasonable range
|
||||
- * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
|
||||
- * aging cares about the upper bound of hot pages, while the eviction
|
||||
- * cares about the lower bound of cold pages.
|
||||
- */
|
||||
- if (young * MIN_NR_GENS > total)
|
||||
- return true;
|
||||
- if (old * (MIN_NR_GENS + 2) < total)
|
||||
- return true;
|
||||
-
|
||||
- return false;
|
||||
-}
|
||||
-
|
||||
static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
|
||||
{
|
||||
int gen, type, zone;
|
||||
@@ -5102,6 +5040,68 @@ retry:
|
||||
return scanned;
|
||||
}
|
||||
|
||||
+static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
|
||||
+ struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
|
||||
+{
|
||||
+ int gen, type, zone;
|
||||
+ unsigned long old = 0;
|
||||
+ unsigned long young = 0;
|
||||
+ unsigned long total = 0;
|
||||
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
+ DEFINE_MIN_SEQ(lruvec);
|
||||
+
|
||||
+ /* whether this lruvec is completely out of cold folios */
|
||||
+ if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
|
||||
+ *nr_to_scan = 0;
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ for (type = !can_swap; type < ANON_AND_FILE; type++) {
|
||||
+ unsigned long seq;
|
||||
+
|
||||
+ for (seq = min_seq[type]; seq <= max_seq; seq++) {
|
||||
+ unsigned long size = 0;
|
||||
+
|
||||
+ gen = lru_gen_from_seq(seq);
|
||||
+
|
||||
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
|
||||
+ size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
|
||||
+
|
||||
+ total += size;
|
||||
+ if (seq == max_seq)
|
||||
+ young += size;
|
||||
+ else if (seq + MIN_NR_GENS == max_seq)
|
||||
+ old += size;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* try to scrape all its memory if this memcg was deleted */
|
||||
+ *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
|
||||
+
|
||||
+ /*
|
||||
+ * The aging tries to be lazy to reduce the overhead, while the eviction
|
||||
+ * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
|
||||
+ * ideal number of generations is MIN_NR_GENS+1.
|
||||
+ */
|
||||
+ if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
|
||||
+ return false;
|
||||
+
|
||||
+ /*
|
||||
+ * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
|
||||
+ * of the total number of pages for each generation. A reasonable range
|
||||
+ * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
|
||||
+ * aging cares about the upper bound of hot pages, while the eviction
|
||||
+ * cares about the lower bound of cold pages.
|
||||
+ */
|
||||
+ if (young * MIN_NR_GENS > total)
|
||||
+ return true;
|
||||
+ if (old * (MIN_NR_GENS + 2) < total)
|
||||
+ return true;
|
||||
+
|
||||
+ return false;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* For future optimizations:
|
||||
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
|
||||
@ -0,0 +1,868 @@
|
||||
From fa6363828d314e837c5f79e97ea5e8c0d2f7f062 Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Wed, 21 Dec 2022 21:19:04 -0700
|
||||
Subject: [PATCH 26/29] mm: multi-gen LRU: per-node lru_gen_folio lists
|
||||
|
||||
For each node, memcgs are divided into two generations: the old and
|
||||
the young. For each generation, memcgs are randomly sharded into
|
||||
multiple bins to improve scalability. For each bin, an RCU hlist_nulls
|
||||
is virtually divided into three segments: the head, the tail and the
|
||||
default.
|
||||
|
||||
An onlining memcg is added to the tail of a random bin in the old
|
||||
generation. The eviction starts at the head of a random bin in the old
|
||||
generation. The per-node memcg generation counter, whose reminder (mod
|
||||
2) indexes the old generation, is incremented when all its bins become
|
||||
empty.
|
||||
|
||||
There are four operations:
|
||||
1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in
|
||||
its current generation (old or young) and updates its "seg" to
|
||||
"head";
|
||||
2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in
|
||||
its current generation (old or young) and updates its "seg" to
|
||||
"tail";
|
||||
3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in
|
||||
the old generation, updates its "gen" to "old" and resets its "seg"
|
||||
to "default";
|
||||
4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin
|
||||
in the young generation, updates its "gen" to "young" and resets
|
||||
its "seg" to "default".
|
||||
|
||||
The events that trigger the above operations are:
|
||||
1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
|
||||
2. The first attempt to reclaim an memcg below low, which triggers
|
||||
MEMCG_LRU_TAIL;
|
||||
3. The first attempt to reclaim an memcg below reclaimable size
|
||||
threshold, which triggers MEMCG_LRU_TAIL;
|
||||
4. The second attempt to reclaim an memcg below reclaimable size
|
||||
threshold, which triggers MEMCG_LRU_YOUNG;
|
||||
5. Attempting to reclaim an memcg below min, which triggers
|
||||
MEMCG_LRU_YOUNG;
|
||||
6. Finishing the aging on the eviction path, which triggers
|
||||
MEMCG_LRU_YOUNG;
|
||||
7. Offlining an memcg, which triggers MEMCG_LRU_OLD.
|
||||
|
||||
Note that memcg LRU only applies to global reclaim, and the
|
||||
round-robin incrementing of their max_seq counters ensures the
|
||||
eventual fairness to all eligible memcgs. For memcg reclaim, it still
|
||||
relies on mem_cgroup_iter().
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221222041905.2431096-7-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Jonathan Corbet <corbet@lwn.net>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Cc: Michal Hocko <mhocko@kernel.org>
|
||||
Cc: Mike Rapoport <rppt@kernel.org>
|
||||
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
Cc: Suren Baghdasaryan <surenb@google.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
include/linux/memcontrol.h | 10 +
|
||||
include/linux/mm_inline.h | 17 ++
|
||||
include/linux/mmzone.h | 117 +++++++++++-
|
||||
mm/memcontrol.c | 16 ++
|
||||
mm/folio_alloc.c | 1 +
|
||||
mm/vmscan.c | 373 +++++++++++++++++++++++++++++++++----
|
||||
6 files changed, 499 insertions(+), 35 deletions(-)
|
||||
|
||||
--- a/include/linux/memcontrol.h
|
||||
+++ b/include/linux/memcontrol.h
|
||||
@@ -790,6 +790,11 @@ static inline void obj_cgroup_put(struct
|
||||
percpu_ref_put(&objcg->refcnt);
|
||||
}
|
||||
|
||||
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
|
||||
+{
|
||||
+ return !memcg || css_tryget(&memcg->css);
|
||||
+}
|
||||
+
|
||||
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
|
||||
{
|
||||
if (memcg)
|
||||
@@ -1290,6 +1295,11 @@ static inline void obj_cgroup_put(struct
|
||||
{
|
||||
}
|
||||
|
||||
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
|
||||
+{
|
||||
+ return true;
|
||||
+}
|
||||
+
|
||||
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
--- a/include/linux/mm_inline.h
|
||||
+++ b/include/linux/mm_inline.h
|
||||
@@ -122,6 +122,18 @@ static inline bool lru_gen_in_fault(void
|
||||
return current->in_lru_fault;
|
||||
}
|
||||
|
||||
+#ifdef CONFIG_MEMCG
|
||||
+static inline int lru_gen_memcg_seg(struct lruvec *lruvec)
|
||||
+{
|
||||
+ return READ_ONCE(lruvec->lrugen.seg);
|
||||
+}
|
||||
+#else
|
||||
+static inline int lru_gen_memcg_seg(struct lruvec *lruvec)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
static inline int lru_gen_from_seq(unsigned long seq)
|
||||
{
|
||||
return seq % MAX_NR_GENS;
|
||||
@@ -297,6 +309,11 @@ static inline bool lru_gen_in_fault(void
|
||||
return false;
|
||||
}
|
||||
|
||||
+static inline int lru_gen_memcg_seg(struct lruvec *lruvec)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
|
||||
{
|
||||
return false;
|
||||
--- a/include/linux/mmzone.h
|
||||
+++ b/include/linux/mmzone.h
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
+#include <linux/list_nulls.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/cache.h>
|
||||
@@ -367,6 +368,15 @@ struct page_vma_mapped_walk;
|
||||
#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
|
||||
#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
|
||||
|
||||
+/* see the comment on MEMCG_NR_GENS */
|
||||
+enum {
|
||||
+ MEMCG_LRU_NOP,
|
||||
+ MEMCG_LRU_HEAD,
|
||||
+ MEMCG_LRU_TAIL,
|
||||
+ MEMCG_LRU_OLD,
|
||||
+ MEMCG_LRU_YOUNG,
|
||||
+};
|
||||
+
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
|
||||
enum {
|
||||
@@ -426,6 +436,14 @@ struct lru_gen_folio {
|
||||
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
|
||||
/* whether the multi-gen LRU is enabled */
|
||||
bool enabled;
|
||||
+#ifdef CONFIG_MEMCG
|
||||
+ /* the memcg generation this lru_gen_folio belongs to */
|
||||
+ u8 gen;
|
||||
+ /* the list segment this lru_gen_folio belongs to */
|
||||
+ u8 seg;
|
||||
+ /* per-node lru_gen_folio list for global reclaim */
|
||||
+ struct hlist_nulls_node list;
|
||||
+#endif
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -479,12 +497,87 @@ void lru_gen_init_lruvec(struct lruvec *
|
||||
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
+
|
||||
+/*
|
||||
+ * For each node, memcgs are divided into two generations: the old and the
|
||||
+ * young. For each generation, memcgs are randomly sharded into multiple bins
|
||||
+ * to improve scalability. For each bin, the hlist_nulls is virtually divided
|
||||
+ * into three segments: the head, the tail and the default.
|
||||
+ *
|
||||
+ * An onlining memcg is added to the tail of a random bin in the old generation.
|
||||
+ * The eviction starts at the head of a random bin in the old generation. The
|
||||
+ * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
|
||||
+ * the old generation, is incremented when all its bins become empty.
|
||||
+ *
|
||||
+ * There are four operations:
|
||||
+ * 1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in its
|
||||
+ * current generation (old or young) and updates its "seg" to "head";
|
||||
+ * 2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in its
|
||||
+ * current generation (old or young) and updates its "seg" to "tail";
|
||||
+ * 3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in the old
|
||||
+ * generation, updates its "gen" to "old" and resets its "seg" to "default";
|
||||
+ * 4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin in the
|
||||
+ * young generation, updates its "gen" to "young" and resets its "seg" to
|
||||
+ * "default".
|
||||
+ *
|
||||
+ * The events that trigger the above operations are:
|
||||
+ * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
|
||||
+ * 2. The first attempt to reclaim an memcg below low, which triggers
|
||||
+ * MEMCG_LRU_TAIL;
|
||||
+ * 3. The first attempt to reclaim an memcg below reclaimable size threshold,
|
||||
+ * which triggers MEMCG_LRU_TAIL;
|
||||
+ * 4. The second attempt to reclaim an memcg below reclaimable size threshold,
|
||||
+ * which triggers MEMCG_LRU_YOUNG;
|
||||
+ * 5. Attempting to reclaim an memcg below min, which triggers MEMCG_LRU_YOUNG;
|
||||
+ * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
|
||||
+ * 7. Offlining an memcg, which triggers MEMCG_LRU_OLD.
|
||||
+ *
|
||||
+ * Note that memcg LRU only applies to global reclaim, and the round-robin
|
||||
+ * incrementing of their max_seq counters ensures the eventual fairness to all
|
||||
+ * eligible memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
|
||||
+ */
|
||||
+#define MEMCG_NR_GENS 2
|
||||
+#define MEMCG_NR_BINS 8
|
||||
+
|
||||
+struct lru_gen_memcg {
|
||||
+ /* the per-node memcg generation counter */
|
||||
+ unsigned long seq;
|
||||
+ /* each memcg has one lru_gen_folio per node */
|
||||
+ unsigned long nr_memcgs[MEMCG_NR_GENS];
|
||||
+ /* per-node lru_gen_folio list for global reclaim */
|
||||
+ struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS];
|
||||
+ /* protects the above */
|
||||
+ spinlock_t lock;
|
||||
+};
|
||||
+
|
||||
+void lru_gen_init_pgdat(struct pglist_data *pgdat);
|
||||
+
|
||||
void lru_gen_init_memcg(struct mem_cgroup *memcg);
|
||||
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
|
||||
-#endif
|
||||
+void lru_gen_online_memcg(struct mem_cgroup *memcg);
|
||||
+void lru_gen_offline_memcg(struct mem_cgroup *memcg);
|
||||
+void lru_gen_release_memcg(struct mem_cgroup *memcg);
|
||||
+void lru_gen_rotate_memcg(struct lruvec *lruvec, int op);
|
||||
+
|
||||
+#else /* !CONFIG_MEMCG */
|
||||
+
|
||||
+#define MEMCG_NR_GENS 1
|
||||
+
|
||||
+struct lru_gen_memcg {
|
||||
+};
|
||||
+
|
||||
+static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
+#endif /* CONFIG_MEMCG */
|
||||
|
||||
#else /* !CONFIG_LRU_GEN */
|
||||
|
||||
+static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
|
||||
{
|
||||
}
|
||||
@@ -494,6 +587,7 @@ static inline void lru_gen_look_around(s
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
+
|
||||
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
@@ -501,7 +595,24 @@ static inline void lru_gen_init_memcg(st
|
||||
static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
-#endif
|
||||
+
|
||||
+static inline void lru_gen_online_memcg(struct mem_cgroup *memcg)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
+static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
+static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
+static inline void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
+#endif /* CONFIG_MEMCG */
|
||||
|
||||
#endif /* CONFIG_LRU_GEN */
|
||||
|
||||
@@ -1219,6 +1330,8 @@ typedef struct pglist_data {
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
/* kswap mm walk data */
|
||||
struct lru_gen_mm_walk mm_walk;
|
||||
+ /* lru_gen_folio list */
|
||||
+ struct lru_gen_memcg memcg_lru;
|
||||
#endif
|
||||
|
||||
CACHELINE_PADDING(_pad2_);
|
||||
--- a/mm/memcontrol.c
|
||||
+++ b/mm/memcontrol.c
|
||||
@@ -477,6 +477,16 @@ static void mem_cgroup_update_tree(struc
|
||||
struct mem_cgroup_per_node *mz;
|
||||
struct mem_cgroup_tree_per_node *mctz;
|
||||
|
||||
+ if (lru_gen_enabled()) {
|
||||
+ struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
|
||||
+
|
||||
+ /* see the comment on MEMCG_NR_GENS */
|
||||
+ if (soft_limit_excess(memcg) && lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
|
||||
+ lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
|
||||
+
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
mctz = soft_limit_tree.rb_tree_per_node[nid];
|
||||
if (!mctz)
|
||||
return;
|
||||
@@ -3522,6 +3532,9 @@ unsigned long mem_cgroup_soft_limit_recl
|
||||
struct mem_cgroup_tree_per_node *mctz;
|
||||
unsigned long excess;
|
||||
|
||||
+ if (lru_gen_enabled())
|
||||
+ return 0;
|
||||
+
|
||||
if (order > 0)
|
||||
return 0;
|
||||
|
||||
@@ -5382,6 +5395,7 @@ static int mem_cgroup_css_online(struct
|
||||
if (unlikely(mem_cgroup_is_root(memcg)))
|
||||
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
|
||||
2UL*HZ);
|
||||
+ lru_gen_online_memcg(memcg);
|
||||
return 0;
|
||||
offline_kmem:
|
||||
memcg_offline_kmem(memcg);
|
||||
@@ -5413,6 +5427,7 @@ static void mem_cgroup_css_offline(struc
|
||||
memcg_offline_kmem(memcg);
|
||||
reparent_shrinker_deferred(memcg);
|
||||
wb_memcg_offline(memcg);
|
||||
+ lru_gen_offline_memcg(memcg);
|
||||
|
||||
drain_all_stock(memcg);
|
||||
|
||||
@@ -5424,6 +5439,7 @@ static void mem_cgroup_css_released(stru
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
invalidate_reclaim_iterators(memcg);
|
||||
+ lru_gen_release_memcg(memcg);
|
||||
}
|
||||
|
||||
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||
--- a/mm/page_alloc.c
|
||||
+++ b/mm/page_alloc.c
|
||||
@@ -7957,6 +7957,7 @@ static void __init free_area_init_node(i
|
||||
pgdat_set_deferred_range(pgdat);
|
||||
|
||||
free_area_init_core(pgdat);
|
||||
+ lru_gen_init_pgdat(pgdat);
|
||||
}
|
||||
|
||||
static void __init free_area_init_memoryless_node(int nid)
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -54,6 +54,8 @@
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/debugfs.h>
|
||||
+#include <linux/rculist_nulls.h>
|
||||
+#include <linux/random.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/div64.h>
|
||||
@@ -134,11 +136,6 @@ struct scan_control {
|
||||
/* Always discard instead of demoting to lower tier memory */
|
||||
unsigned int no_demotion:1;
|
||||
|
||||
-#ifdef CONFIG_LRU_GEN
|
||||
- /* help kswapd make better choices among multiple memcgs */
|
||||
- unsigned long last_reclaimed;
|
||||
-#endif
|
||||
-
|
||||
/* Allocation order */
|
||||
s8 order;
|
||||
|
||||
@@ -3160,6 +3157,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_ca
|
||||
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
|
||||
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
|
||||
|
||||
+#define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS)
|
||||
+#define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS)
|
||||
+
|
||||
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
|
||||
{
|
||||
struct pglist_data *pgdat = NODE_DATA(nid);
|
||||
@@ -4443,8 +4443,7 @@ done:
|
||||
if (sc->priority <= DEF_PRIORITY - 2)
|
||||
wait_event_killable(lruvec->mm_state.wait,
|
||||
max_seq < READ_ONCE(lrugen->max_seq));
|
||||
-
|
||||
- return max_seq < READ_ONCE(lrugen->max_seq);
|
||||
+ return false;
|
||||
}
|
||||
|
||||
VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
|
||||
@@ -4517,8 +4516,6 @@ static void lru_gen_age_node(struct pgli
|
||||
|
||||
VM_WARN_ON_ONCE(!current_is_kswapd());
|
||||
|
||||
- sc->last_reclaimed = sc->nr_reclaimed;
|
||||
-
|
||||
/* check the order to exclude compaction-induced reclaim */
|
||||
if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
|
||||
return;
|
||||
@@ -5107,8 +5104,7 @@ static bool should_run_aging(struct lruv
|
||||
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
|
||||
* reclaim.
|
||||
*/
|
||||
-static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
|
||||
- bool can_swap)
|
||||
+static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap)
|
||||
{
|
||||
unsigned long nr_to_scan;
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
@@ -5125,10 +5121,8 @@ static unsigned long get_nr_to_scan(stru
|
||||
if (sc->priority == DEF_PRIORITY)
|
||||
return nr_to_scan;
|
||||
|
||||
- try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false);
|
||||
-
|
||||
/* skip this lruvec as it's low on cold folios */
|
||||
- return 0;
|
||||
+ return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0;
|
||||
}
|
||||
|
||||
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
|
||||
@@ -5137,29 +5131,18 @@ static unsigned long get_nr_to_reclaim(s
|
||||
if (!global_reclaim(sc))
|
||||
return -1;
|
||||
|
||||
- /* discount the previous progress for kswapd */
|
||||
- if (current_is_kswapd())
|
||||
- return sc->nr_to_reclaim + sc->last_reclaimed;
|
||||
-
|
||||
return max(sc->nr_to_reclaim, compact_gap(sc->order));
|
||||
}
|
||||
|
||||
-static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
+static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
{
|
||||
- struct blk_plug plug;
|
||||
+ long nr_to_scan;
|
||||
unsigned long scanned = 0;
|
||||
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
|
||||
|
||||
- lru_add_drain();
|
||||
-
|
||||
- blk_start_plug(&plug);
|
||||
-
|
||||
- set_mm_walk(lruvec_pgdat(lruvec));
|
||||
-
|
||||
while (true) {
|
||||
int delta;
|
||||
int swappiness;
|
||||
- unsigned long nr_to_scan;
|
||||
|
||||
if (sc->may_swap)
|
||||
swappiness = get_swappiness(lruvec, sc);
|
||||
@@ -5169,7 +5152,7 @@ static void lru_gen_shrink_lruvec(struct
|
||||
swappiness = 0;
|
||||
|
||||
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
|
||||
- if (!nr_to_scan)
|
||||
+ if (nr_to_scan <= 0)
|
||||
break;
|
||||
|
||||
delta = evict_folios(lruvec, sc, swappiness);
|
||||
@@ -5186,10 +5169,250 @@ static void lru_gen_shrink_lruvec(struct
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
+ /* whether try_to_inc_max_seq() was successful */
|
||||
+ return nr_to_scan < 0;
|
||||
+}
|
||||
+
|
||||
+static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
|
||||
+{
|
||||
+ bool success;
|
||||
+ unsigned long scanned = sc->nr_scanned;
|
||||
+ unsigned long reclaimed = sc->nr_reclaimed;
|
||||
+ int seg = lru_gen_memcg_seg(lruvec);
|
||||
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
|
||||
+
|
||||
+ /* see the comment on MEMCG_NR_GENS */
|
||||
+ if (!lruvec_is_sizable(lruvec, sc))
|
||||
+ return seg != MEMCG_LRU_TAIL ? MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
|
||||
+
|
||||
+ mem_cgroup_calculate_protection(NULL, memcg);
|
||||
+
|
||||
+ if (mem_cgroup_below_min(memcg))
|
||||
+ return MEMCG_LRU_YOUNG;
|
||||
+
|
||||
+ if (mem_cgroup_below_low(memcg)) {
|
||||
+ /* see the comment on MEMCG_NR_GENS */
|
||||
+ if (seg != MEMCG_LRU_TAIL)
|
||||
+ return MEMCG_LRU_TAIL;
|
||||
+
|
||||
+ memcg_memory_event(memcg, MEMCG_LOW);
|
||||
+ }
|
||||
+
|
||||
+ success = try_to_shrink_lruvec(lruvec, sc);
|
||||
+
|
||||
+ shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
|
||||
+
|
||||
+ vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
|
||||
+ sc->nr_reclaimed - reclaimed);
|
||||
+
|
||||
+ sc->nr_reclaimed += current->reclaim_state->reclaimed_slab;
|
||||
+ current->reclaim_state->reclaimed_slab = 0;
|
||||
+
|
||||
+ return success ? MEMCG_LRU_YOUNG : 0;
|
||||
+}
|
||||
+
|
||||
+#ifdef CONFIG_MEMCG
|
||||
+
|
||||
+static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
|
||||
+{
|
||||
+ int gen;
|
||||
+ int bin;
|
||||
+ int first_bin;
|
||||
+ struct lruvec *lruvec;
|
||||
+ struct lru_gen_folio *lrugen;
|
||||
+ const struct hlist_nulls_node *pos;
|
||||
+ int op = 0;
|
||||
+ struct mem_cgroup *memcg = NULL;
|
||||
+ unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
|
||||
+
|
||||
+ bin = first_bin = prandom_u32_max(MEMCG_NR_BINS);
|
||||
+restart:
|
||||
+ gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
|
||||
+
|
||||
+ rcu_read_lock();
|
||||
+
|
||||
+ hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) {
|
||||
+ if (op)
|
||||
+ lru_gen_rotate_memcg(lruvec, op);
|
||||
+
|
||||
+ mem_cgroup_put(memcg);
|
||||
+
|
||||
+ lruvec = container_of(lrugen, struct lruvec, lrugen);
|
||||
+ memcg = lruvec_memcg(lruvec);
|
||||
+
|
||||
+ if (!mem_cgroup_tryget(memcg)) {
|
||||
+ op = 0;
|
||||
+ memcg = NULL;
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ rcu_read_unlock();
|
||||
+
|
||||
+ op = shrink_one(lruvec, sc);
|
||||
+
|
||||
+ if (sc->nr_reclaimed >= nr_to_reclaim)
|
||||
+ goto success;
|
||||
+
|
||||
+ rcu_read_lock();
|
||||
+ }
|
||||
+
|
||||
+ rcu_read_unlock();
|
||||
+
|
||||
+ /* restart if raced with lru_gen_rotate_memcg() */
|
||||
+ if (gen != get_nulls_value(pos))
|
||||
+ goto restart;
|
||||
+
|
||||
+ /* try the rest of the bins of the current generation */
|
||||
+ bin = get_memcg_bin(bin + 1);
|
||||
+ if (bin != first_bin)
|
||||
+ goto restart;
|
||||
+success:
|
||||
+ if (op)
|
||||
+ lru_gen_rotate_memcg(lruvec, op);
|
||||
+
|
||||
+ mem_cgroup_put(memcg);
|
||||
+}
|
||||
+
|
||||
+static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
+{
|
||||
+ struct blk_plug plug;
|
||||
+
|
||||
+ VM_WARN_ON_ONCE(global_reclaim(sc));
|
||||
+
|
||||
+ lru_add_drain();
|
||||
+
|
||||
+ blk_start_plug(&plug);
|
||||
+
|
||||
+ set_mm_walk(lruvec_pgdat(lruvec));
|
||||
+
|
||||
+ if (try_to_shrink_lruvec(lruvec, sc))
|
||||
+ lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
|
||||
+
|
||||
+ clear_mm_walk();
|
||||
+
|
||||
+ blk_finish_plug(&plug);
|
||||
+}
|
||||
+
|
||||
+#else /* !CONFIG_MEMCG */
|
||||
+
|
||||
+static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
|
||||
+{
|
||||
+ BUILD_BUG();
|
||||
+}
|
||||
+
|
||||
+static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
+{
|
||||
+ BUILD_BUG();
|
||||
+}
|
||||
+
|
||||
+#endif
|
||||
+
|
||||
+static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
|
||||
+{
|
||||
+ int priority;
|
||||
+ unsigned long reclaimable;
|
||||
+ struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
|
||||
+
|
||||
+ if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH)
|
||||
+ return;
|
||||
+ /*
|
||||
+ * Determine the initial priority based on ((total / MEMCG_NR_GENS) >>
|
||||
+ * priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, where the
|
||||
+ * estimated reclaimed_to_scanned_ratio = inactive / total.
|
||||
+ */
|
||||
+ reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
|
||||
+ if (get_swappiness(lruvec, sc))
|
||||
+ reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
|
||||
+
|
||||
+ reclaimable /= MEMCG_NR_GENS;
|
||||
+
|
||||
+ /* round down reclaimable and round up sc->nr_to_reclaim */
|
||||
+ priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1);
|
||||
+
|
||||
+ sc->priority = clamp(priority, 0, DEF_PRIORITY);
|
||||
+}
|
||||
+
|
||||
+static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
|
||||
+{
|
||||
+ struct blk_plug plug;
|
||||
+ unsigned long reclaimed = sc->nr_reclaimed;
|
||||
+
|
||||
+ VM_WARN_ON_ONCE(!global_reclaim(sc));
|
||||
+
|
||||
+ lru_add_drain();
|
||||
+
|
||||
+ blk_start_plug(&plug);
|
||||
+
|
||||
+ set_mm_walk(pgdat);
|
||||
+
|
||||
+ set_initial_priority(pgdat, sc);
|
||||
+
|
||||
+ if (current_is_kswapd())
|
||||
+ sc->nr_reclaimed = 0;
|
||||
+
|
||||
+ if (mem_cgroup_disabled())
|
||||
+ shrink_one(&pgdat->__lruvec, sc);
|
||||
+ else
|
||||
+ shrink_many(pgdat, sc);
|
||||
+
|
||||
+ if (current_is_kswapd())
|
||||
+ sc->nr_reclaimed += reclaimed;
|
||||
+
|
||||
clear_mm_walk();
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
+
|
||||
+ /* kswapd should never fail */
|
||||
+ pgdat->kswapd_failures = 0;
|
||||
+}
|
||||
+
|
||||
+#ifdef CONFIG_MEMCG
|
||||
+void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
|
||||
+{
|
||||
+ int seg;
|
||||
+ int old, new;
|
||||
+ int bin = prandom_u32_max(MEMCG_NR_BINS);
|
||||
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
|
||||
+
|
||||
+ spin_lock(&pgdat->memcg_lru.lock);
|
||||
+
|
||||
+ VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
|
||||
+
|
||||
+ seg = 0;
|
||||
+ new = old = lruvec->lrugen.gen;
|
||||
+
|
||||
+ /* see the comment on MEMCG_NR_GENS */
|
||||
+ if (op == MEMCG_LRU_HEAD)
|
||||
+ seg = MEMCG_LRU_HEAD;
|
||||
+ else if (op == MEMCG_LRU_TAIL)
|
||||
+ seg = MEMCG_LRU_TAIL;
|
||||
+ else if (op == MEMCG_LRU_OLD)
|
||||
+ new = get_memcg_gen(pgdat->memcg_lru.seq);
|
||||
+ else if (op == MEMCG_LRU_YOUNG)
|
||||
+ new = get_memcg_gen(pgdat->memcg_lru.seq + 1);
|
||||
+ else
|
||||
+ VM_WARN_ON_ONCE(true);
|
||||
+
|
||||
+ hlist_nulls_del_rcu(&lruvec->lrugen.list);
|
||||
+
|
||||
+ if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD)
|
||||
+ hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
|
||||
+ else
|
||||
+ hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
|
||||
+
|
||||
+ pgdat->memcg_lru.nr_memcgs[old]--;
|
||||
+ pgdat->memcg_lru.nr_memcgs[new]++;
|
||||
+
|
||||
+ lruvec->lrugen.gen = new;
|
||||
+ WRITE_ONCE(lruvec->lrugen.seg, seg);
|
||||
+
|
||||
+ if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
|
||||
+ WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
|
||||
+
|
||||
+ spin_unlock(&pgdat->memcg_lru.lock);
|
||||
}
|
||||
+#endif
|
||||
|
||||
/******************************************************************************
|
||||
* state change
|
||||
@@ -5647,11 +5870,11 @@ static int run_cmd(char cmd, int memcg_i
|
||||
|
||||
if (!mem_cgroup_disabled()) {
|
||||
rcu_read_lock();
|
||||
+
|
||||
memcg = mem_cgroup_from_id(memcg_id);
|
||||
-#ifdef CONFIG_MEMCG
|
||||
- if (memcg && !css_tryget(&memcg->css))
|
||||
+ if (!mem_cgroup_tryget(memcg))
|
||||
memcg = NULL;
|
||||
-#endif
|
||||
+
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!memcg)
|
||||
@@ -5799,6 +6022,19 @@ void lru_gen_init_lruvec(struct lruvec *
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
+
|
||||
+void lru_gen_init_pgdat(struct pglist_data *pgdat)
|
||||
+{
|
||||
+ int i, j;
|
||||
+
|
||||
+ spin_lock_init(&pgdat->memcg_lru.lock);
|
||||
+
|
||||
+ for (i = 0; i < MEMCG_NR_GENS; i++) {
|
||||
+ for (j = 0; j < MEMCG_NR_BINS; j++)
|
||||
+ INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
void lru_gen_init_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
INIT_LIST_HEAD(&memcg->mm_list.fifo);
|
||||
@@ -5822,7 +6058,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
|
||||
}
|
||||
}
|
||||
}
|
||||
-#endif
|
||||
+
|
||||
+void lru_gen_online_memcg(struct mem_cgroup *memcg)
|
||||
+{
|
||||
+ int gen;
|
||||
+ int nid;
|
||||
+ int bin = prandom_u32_max(MEMCG_NR_BINS);
|
||||
+
|
||||
+ for_each_node(nid) {
|
||||
+ struct pglist_data *pgdat = NODE_DATA(nid);
|
||||
+ struct lruvec *lruvec = get_lruvec(memcg, nid);
|
||||
+
|
||||
+ spin_lock(&pgdat->memcg_lru.lock);
|
||||
+
|
||||
+ VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
|
||||
+
|
||||
+ gen = get_memcg_gen(pgdat->memcg_lru.seq);
|
||||
+
|
||||
+ hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
|
||||
+ pgdat->memcg_lru.nr_memcgs[gen]++;
|
||||
+
|
||||
+ lruvec->lrugen.gen = gen;
|
||||
+
|
||||
+ spin_unlock(&pgdat->memcg_lru.lock);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+void lru_gen_offline_memcg(struct mem_cgroup *memcg)
|
||||
+{
|
||||
+ int nid;
|
||||
+
|
||||
+ for_each_node(nid) {
|
||||
+ struct lruvec *lruvec = get_lruvec(memcg, nid);
|
||||
+
|
||||
+ lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+void lru_gen_release_memcg(struct mem_cgroup *memcg)
|
||||
+{
|
||||
+ int gen;
|
||||
+ int nid;
|
||||
+
|
||||
+ for_each_node(nid) {
|
||||
+ struct pglist_data *pgdat = NODE_DATA(nid);
|
||||
+ struct lruvec *lruvec = get_lruvec(memcg, nid);
|
||||
+
|
||||
+ spin_lock(&pgdat->memcg_lru.lock);
|
||||
+
|
||||
+ VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
|
||||
+
|
||||
+ gen = lruvec->lrugen.gen;
|
||||
+
|
||||
+ hlist_nulls_del_rcu(&lruvec->lrugen.list);
|
||||
+ pgdat->memcg_lru.nr_memcgs[gen]--;
|
||||
+
|
||||
+ if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
|
||||
+ WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
|
||||
+
|
||||
+ spin_unlock(&pgdat->memcg_lru.lock);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+#endif /* CONFIG_MEMCG */
|
||||
|
||||
static int __init init_lru_gen(void)
|
||||
{
|
||||
@@ -5849,6 +6147,10 @@ static void lru_gen_shrink_lruvec(struct
|
||||
{
|
||||
}
|
||||
|
||||
+static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
#endif /* CONFIG_LRU_GEN */
|
||||
|
||||
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
@@ -5862,7 +6164,7 @@ static void shrink_lruvec(struct lruvec
|
||||
bool proportional_reclaim;
|
||||
struct blk_plug plug;
|
||||
|
||||
- if (lru_gen_enabled()) {
|
||||
+ if (lru_gen_enabled() && !global_reclaim(sc)) {
|
||||
lru_gen_shrink_lruvec(lruvec, sc);
|
||||
return;
|
||||
}
|
||||
@@ -6105,6 +6407,11 @@ static void shrink_node(pg_data_t *pgdat
|
||||
struct lruvec *target_lruvec;
|
||||
bool reclaimable = false;
|
||||
|
||||
+ if (lru_gen_enabled() && global_reclaim(sc)) {
|
||||
+ lru_gen_shrink_node(pgdat, sc);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
|
||||
|
||||
again:
|
||||
@ -0,0 +1,196 @@
|
||||
From 93147736b5b3a21bea24313bfc7a696829932009 Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Wed, 21 Dec 2022 21:19:05 -0700
|
||||
Subject: [PATCH 27/29] mm: multi-gen LRU: clarify scan_control flags
|
||||
|
||||
Among the flags in scan_control:
|
||||
1. sc->may_swap, which indicates swap constraint due to memsw.max, is
|
||||
supported as usual.
|
||||
2. sc->proactive, which indicates reclaim by memory.reclaim, may not
|
||||
opportunistically skip the aging path, since it is considered less
|
||||
latency sensitive.
|
||||
3. !(sc->gfp_mask & __GFP_IO), which indicates IO constraint, lowers
|
||||
swappiness to prioritize file LRU, since clean file folios are more
|
||||
likely to exist.
|
||||
4. sc->may_writefolio and sc->may_unmap, which indicates opportunistic
|
||||
reclaim, are rejected, since unmapped clean folios are already
|
||||
prioritized. Scanning for more of them is likely futile and can
|
||||
cause high reclaim latency when there is a large number of memcgs.
|
||||
|
||||
The rest are handled by the existing code.
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221222041905.2431096-8-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Jonathan Corbet <corbet@lwn.net>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Cc: Michal Hocko <mhocko@kernel.org>
|
||||
Cc: Mike Rapoport <rppt@kernel.org>
|
||||
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
Cc: Suren Baghdasaryan <surenb@google.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
mm/vmscan.c | 55 +++++++++++++++++++++++++++--------------------------
|
||||
1 file changed, 28 insertions(+), 27 deletions(-)
|
||||
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -3185,6 +3185,9 @@ static int get_swappiness(struct lruvec
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
|
||||
|
||||
+ if (!sc->may_swap)
|
||||
+ return 0;
|
||||
+
|
||||
if (!can_demote(pgdat->node_id, sc) &&
|
||||
mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
|
||||
return 0;
|
||||
@@ -4226,7 +4229,7 @@ static void walk_mm(struct lruvec *lruve
|
||||
} while (err == -EAGAIN);
|
||||
}
|
||||
|
||||
-static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat)
|
||||
+static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc)
|
||||
{
|
||||
struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
|
||||
|
||||
@@ -4234,7 +4237,7 @@ static struct lru_gen_mm_walk *set_mm_wa
|
||||
VM_WARN_ON_ONCE(walk);
|
||||
|
||||
walk = &pgdat->mm_walk;
|
||||
- } else if (!pgdat && !walk) {
|
||||
+ } else if (!walk && force_alloc) {
|
||||
VM_WARN_ON_ONCE(current_is_kswapd());
|
||||
|
||||
walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
|
||||
@@ -4420,7 +4423,7 @@ static bool try_to_inc_max_seq(struct lr
|
||||
goto done;
|
||||
}
|
||||
|
||||
- walk = set_mm_walk(NULL);
|
||||
+ walk = set_mm_walk(NULL, true);
|
||||
if (!walk) {
|
||||
success = iterate_mm_list_nowalk(lruvec, max_seq);
|
||||
goto done;
|
||||
@@ -4489,8 +4492,6 @@ static bool lruvec_is_reclaimable(struct
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
DEFINE_MIN_SEQ(lruvec);
|
||||
|
||||
- VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
|
||||
-
|
||||
/* see the comment on lru_gen_folio */
|
||||
gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
|
||||
birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
|
||||
@@ -4746,12 +4747,8 @@ static bool isolate_folio(struct lruvec
|
||||
{
|
||||
bool success;
|
||||
|
||||
- /* unmapping inhibited */
|
||||
- if (!sc->may_unmap && folio_mapped(folio))
|
||||
- return false;
|
||||
-
|
||||
/* swapping inhibited */
|
||||
- if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
|
||||
+ if (!(sc->gfp_mask & __GFP_IO) &&
|
||||
(folio_test_dirty(folio) ||
|
||||
(folio_test_anon(folio) && !folio_test_swapcache(folio))))
|
||||
return false;
|
||||
@@ -4848,9 +4845,8 @@ static int scan_folios(struct lruvec *lr
|
||||
__count_vm_events(PGSCAN_ANON + type, isolated);
|
||||
|
||||
/*
|
||||
- * There might not be eligible pages due to reclaim_idx, may_unmap and
|
||||
- * may_writepage. Check the remaining to prevent livelock if it's not
|
||||
- * making progress.
|
||||
+ * There might not be eligible pages due to reclaim_idx. Check the
|
||||
+ * remaining to prevent livelock if it's not making progress.
|
||||
*/
|
||||
return isolated || !remaining ? scanned : 0;
|
||||
}
|
||||
@@ -5110,8 +5106,7 @@ static long get_nr_to_scan(struct lruvec
|
||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||
DEFINE_MAX_SEQ(lruvec);
|
||||
|
||||
- if (mem_cgroup_below_min(memcg) ||
|
||||
- (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
|
||||
+ if (mem_cgroup_below_min(memcg))
|
||||
return 0;
|
||||
|
||||
if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
|
||||
@@ -5139,17 +5134,14 @@ static bool try_to_shrink_lruvec(struct
|
||||
long nr_to_scan;
|
||||
unsigned long scanned = 0;
|
||||
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
|
||||
+ int swappiness = get_swappiness(lruvec, sc);
|
||||
+
|
||||
+ /* clean file folios are more likely to exist */
|
||||
+ if (swappiness && !(sc->gfp_mask & __GFP_IO))
|
||||
+ swappiness = 1;
|
||||
|
||||
while (true) {
|
||||
int delta;
|
||||
- int swappiness;
|
||||
-
|
||||
- if (sc->may_swap)
|
||||
- swappiness = get_swappiness(lruvec, sc);
|
||||
- else if (!cgroup_reclaim(sc) && get_swappiness(lruvec, sc))
|
||||
- swappiness = 1;
|
||||
- else
|
||||
- swappiness = 0;
|
||||
|
||||
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
|
||||
if (nr_to_scan <= 0)
|
||||
@@ -5279,12 +5271,13 @@ static void lru_gen_shrink_lruvec(struct
|
||||
struct blk_plug plug;
|
||||
|
||||
VM_WARN_ON_ONCE(global_reclaim(sc));
|
||||
+ VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap);
|
||||
|
||||
lru_add_drain();
|
||||
|
||||
blk_start_plug(&plug);
|
||||
|
||||
- set_mm_walk(lruvec_pgdat(lruvec));
|
||||
+ set_mm_walk(NULL, sc->proactive);
|
||||
|
||||
if (try_to_shrink_lruvec(lruvec, sc))
|
||||
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
|
||||
@@ -5340,11 +5333,19 @@ static void lru_gen_shrink_node(struct p
|
||||
|
||||
VM_WARN_ON_ONCE(!global_reclaim(sc));
|
||||
|
||||
+ /*
|
||||
+ * Unmapped clean folios are already prioritized. Scanning for more of
|
||||
+ * them is likely futile and can cause high reclaim latency when there
|
||||
+ * is a large number of memcgs.
|
||||
+ */
|
||||
+ if (!sc->may_writepage || !sc->may_unmap)
|
||||
+ goto done;
|
||||
+
|
||||
lru_add_drain();
|
||||
|
||||
blk_start_plug(&plug);
|
||||
|
||||
- set_mm_walk(pgdat);
|
||||
+ set_mm_walk(NULL, sc->proactive);
|
||||
|
||||
set_initial_priority(pgdat, sc);
|
||||
|
||||
@@ -5362,7 +5363,7 @@ static void lru_gen_shrink_node(struct p
|
||||
clear_mm_walk();
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
-
|
||||
+done:
|
||||
/* kswapd should never fail */
|
||||
pgdat->kswapd_failures = 0;
|
||||
}
|
||||
@@ -5934,7 +5935,7 @@ static ssize_t lru_gen_seq_write(struct
|
||||
set_task_reclaim_state(current, &sc.reclaim_state);
|
||||
flags = memalloc_noreclaim_save();
|
||||
blk_start_plug(&plug);
|
||||
- if (!set_mm_walk(NULL)) {
|
||||
+ if (!set_mm_walk(NULL, true)) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
@ -0,0 +1,34 @@
|
||||
From cf3297e4c7a928da8b2b2f0baff2f9c69ea57952 Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Wed, 21 Dec 2022 21:19:06 -0700
|
||||
Subject: [PATCH 28/29] mm: multi-gen LRU: simplify arch_has_hw_pte_young()
|
||||
check
|
||||
|
||||
Scanning page tables when hardware does not set the accessed bit has
|
||||
no real use cases.
|
||||
|
||||
Link: https://lkml.kernel.org/r/20221222041905.2431096-9-yuzhao@google.com
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Jonathan Corbet <corbet@lwn.net>
|
||||
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
||||
Cc: Michal Hocko <mhocko@kernel.org>
|
||||
Cc: Mike Rapoport <rppt@kernel.org>
|
||||
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
Cc: Suren Baghdasaryan <surenb@google.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
mm/vmscan.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -4418,7 +4418,7 @@ static bool try_to_inc_max_seq(struct lr
|
||||
* handful of PTEs. Spreading the work out over a period of time usually
|
||||
* is less efficient, but it avoids bursty page faults.
|
||||
*/
|
||||
- if (!force_scan && !(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
|
||||
+ if (!arch_has_hw_pte_young() || !get_cap(LRU_GEN_MM_WALK)) {
|
||||
success = iterate_mm_list_nowalk(lruvec, max_seq);
|
||||
goto done;
|
||||
}
|
||||
@ -0,0 +1,88 @@
|
||||
From cc67f962cc53f6e1dfa92eb85b7b26fe83a3c66f Mon Sep 17 00:00:00 2001
|
||||
From: Yu Zhao <yuzhao@google.com>
|
||||
Date: Mon, 13 Feb 2023 00:53:22 -0700
|
||||
Subject: [PATCH 29/29] mm: multi-gen LRU: avoid futile retries
|
||||
|
||||
Recall that the per-node memcg LRU has two generations and they alternate
|
||||
when the last memcg (of a given node) is moved from one to the other.
|
||||
Each generation is also sharded into multiple bins to improve scalability.
|
||||
A reclaimer starts with a random bin (in the old generation) and, if it
|
||||
fails, it will retry, i.e., to try the rest of the bins.
|
||||
|
||||
If a reclaimer fails with the last memcg, it should move this memcg to the
|
||||
young generation first, which causes the generations to alternate, and
|
||||
then retry. Otherwise, the retries will be futile because all other bins
|
||||
are empty.
|
||||
|
||||
Link: https://lkml.kernel.org/r/20230213075322.1416966-1-yuzhao@google.com
|
||||
Fixes: e4dde56cd208 ("mm: multi-gen LRU: per-node lru_gen_folio lists")
|
||||
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
||||
Reported-by: T.J. Mercier <tjmercier@google.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
---
|
||||
mm/vmscan.c | 25 +++++++++++++++----------
|
||||
1 file changed, 15 insertions(+), 10 deletions(-)
|
||||
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -5208,18 +5208,20 @@ static int shrink_one(struct lruvec *lru
|
||||
|
||||
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
|
||||
{
|
||||
+ int op;
|
||||
int gen;
|
||||
int bin;
|
||||
int first_bin;
|
||||
struct lruvec *lruvec;
|
||||
struct lru_gen_folio *lrugen;
|
||||
+ struct mem_cgroup *memcg;
|
||||
const struct hlist_nulls_node *pos;
|
||||
- int op = 0;
|
||||
- struct mem_cgroup *memcg = NULL;
|
||||
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
|
||||
|
||||
bin = first_bin = prandom_u32_max(MEMCG_NR_BINS);
|
||||
restart:
|
||||
+ op = 0;
|
||||
+ memcg = NULL;
|
||||
gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -5243,14 +5245,22 @@ restart:
|
||||
|
||||
op = shrink_one(lruvec, sc);
|
||||
|
||||
- if (sc->nr_reclaimed >= nr_to_reclaim)
|
||||
- goto success;
|
||||
-
|
||||
rcu_read_lock();
|
||||
+
|
||||
+ if (sc->nr_reclaimed >= nr_to_reclaim)
|
||||
+ break;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
+ if (op)
|
||||
+ lru_gen_rotate_memcg(lruvec, op);
|
||||
+
|
||||
+ mem_cgroup_put(memcg);
|
||||
+
|
||||
+ if (sc->nr_reclaimed >= nr_to_reclaim)
|
||||
+ return;
|
||||
+
|
||||
/* restart if raced with lru_gen_rotate_memcg() */
|
||||
if (gen != get_nulls_value(pos))
|
||||
goto restart;
|
||||
@@ -5259,11 +5269,6 @@ restart:
|
||||
bin = get_memcg_bin(bin + 1);
|
||||
if (bin != first_bin)
|
||||
goto restart;
|
||||
-success:
|
||||
- if (op)
|
||||
- lru_gen_rotate_memcg(lruvec, op);
|
||||
-
|
||||
- mem_cgroup_put(memcg);
|
||||
}
|
||||
|
||||
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
@ -0,0 +1,65 @@
|
||||
From 63db0cb35e1cb3b3c134906d1062f65513fdda2d Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
|
||||
Date: Tue, 4 Oct 2022 10:37:09 +0200
|
||||
Subject: [PATCH] mtd: core: simplify (a bit) code find partition-matching
|
||||
dynamic OF node
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
1. Don't hardcode "partition-" string twice
|
||||
2. Use simpler logic & use ->name to avoid of_property_read_string()
|
||||
3. Use mtd_get_of_node() helper
|
||||
|
||||
Cc: Christian Marangi <ansuelsmth@gmail.com>
|
||||
Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
|
||||
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
Link: https://lore.kernel.org/linux-mtd/20221004083710.27704-1-zajec5@gmail.com
|
||||
---
|
||||
drivers/mtd/mtdcore.c | 16 +++++++---------
|
||||
1 file changed, 7 insertions(+), 9 deletions(-)
|
||||
|
||||
--- a/drivers/mtd/mtdcore.c
|
||||
+++ b/drivers/mtd/mtdcore.c
|
||||
@@ -551,18 +551,16 @@ static void mtd_check_of_node(struct mtd
|
||||
struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
|
||||
const char *pname, *prefix = "partition-";
|
||||
int plen, mtd_name_len, offset, prefix_len;
|
||||
- struct mtd_info *parent;
|
||||
bool found = false;
|
||||
|
||||
/* Check if MTD already has a device node */
|
||||
- if (dev_of_node(&mtd->dev))
|
||||
+ if (mtd_get_of_node(mtd))
|
||||
return;
|
||||
|
||||
/* Check if a partitions node exist */
|
||||
if (!mtd_is_partition(mtd))
|
||||
return;
|
||||
- parent = mtd->parent;
|
||||
- parent_dn = of_node_get(dev_of_node(&parent->dev));
|
||||
+ parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
|
||||
if (!parent_dn)
|
||||
return;
|
||||
|
||||
@@ -575,15 +573,15 @@ static void mtd_check_of_node(struct mtd
|
||||
|
||||
/* Search if a partition is defined with the same name */
|
||||
for_each_child_of_node(partitions, mtd_dn) {
|
||||
- offset = 0;
|
||||
-
|
||||
/* Skip partition with no/wrong prefix */
|
||||
- if (!of_node_name_prefix(mtd_dn, "partition-"))
|
||||
+ if (!of_node_name_prefix(mtd_dn, prefix))
|
||||
continue;
|
||||
|
||||
/* Label have priority. Check that first */
|
||||
- if (of_property_read_string(mtd_dn, "label", &pname)) {
|
||||
- of_property_read_string(mtd_dn, "name", &pname);
|
||||
+ if (!of_property_read_string(mtd_dn, "label", &pname)) {
|
||||
+ offset = 0;
|
||||
+ } else {
|
||||
+ pname = mtd_dn->name;
|
||||
offset = prefix_len;
|
||||
}
|
||||
|
||||
@ -0,0 +1,84 @@
|
||||
From ddb8cefb7af288950447ca6eeeafb09977dab56f Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
|
||||
Date: Tue, 4 Oct 2022 10:37:10 +0200
|
||||
Subject: [PATCH] mtd: core: try to find OF node for every MTD partition
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
So far this feature was limited to the top-level "nvmem-cells" node.
|
||||
There are multiple parsers creating partitions and subpartitions
|
||||
dynamically. Extend that code to handle them too.
|
||||
|
||||
This allows finding partition-* node for every MTD (sub)partition.
|
||||
|
||||
Random example:
|
||||
|
||||
partitions {
|
||||
compatible = "brcm,bcm947xx-cfe-partitions";
|
||||
|
||||
partition-firmware {
|
||||
compatible = "brcm,trx";
|
||||
|
||||
partition-loader {
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
Cc: Christian Marangi <ansuelsmth@gmail.com>
|
||||
Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
|
||||
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
Link: https://lore.kernel.org/linux-mtd/20221004083710.27704-2-zajec5@gmail.com
|
||||
---
|
||||
drivers/mtd/mtdcore.c | 18 ++++++------------
|
||||
1 file changed, 6 insertions(+), 12 deletions(-)
|
||||
|
||||
--- a/drivers/mtd/mtdcore.c
|
||||
+++ b/drivers/mtd/mtdcore.c
|
||||
@@ -551,20 +551,22 @@ static void mtd_check_of_node(struct mtd
|
||||
struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
|
||||
const char *pname, *prefix = "partition-";
|
||||
int plen, mtd_name_len, offset, prefix_len;
|
||||
- bool found = false;
|
||||
|
||||
/* Check if MTD already has a device node */
|
||||
if (mtd_get_of_node(mtd))
|
||||
return;
|
||||
|
||||
- /* Check if a partitions node exist */
|
||||
if (!mtd_is_partition(mtd))
|
||||
return;
|
||||
+
|
||||
parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
|
||||
if (!parent_dn)
|
||||
return;
|
||||
|
||||
- partitions = of_get_child_by_name(parent_dn, "partitions");
|
||||
+ if (mtd_is_partition(mtd->parent))
|
||||
+ partitions = of_node_get(parent_dn);
|
||||
+ else
|
||||
+ partitions = of_get_child_by_name(parent_dn, "partitions");
|
||||
if (!partitions)
|
||||
goto exit_parent;
|
||||
|
||||
@@ -588,19 +590,11 @@ static void mtd_check_of_node(struct mtd
|
||||
plen = strlen(pname) - offset;
|
||||
if (plen == mtd_name_len &&
|
||||
!strncmp(mtd->name, pname + offset, plen)) {
|
||||
- found = true;
|
||||
+ mtd_set_of_node(mtd, mtd_dn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
- if (!found)
|
||||
- goto exit_partitions;
|
||||
-
|
||||
- /* Set of_node only for nvmem */
|
||||
- if (of_device_is_compatible(mtd_dn, "nvmem-cells"))
|
||||
- mtd_set_of_node(mtd, mtd_dn);
|
||||
-
|
||||
-exit_partitions:
|
||||
of_node_put(partitions);
|
||||
exit_parent:
|
||||
of_node_put(parent_dn);
|
||||
@ -0,0 +1,47 @@
|
||||
From 26422ac78e9d8767bd4aabfbae616b15edbf6a1b Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
|
||||
Date: Sat, 22 Oct 2022 23:13:18 +0200
|
||||
Subject: [PATCH] mtd: core: set ROOT_DEV for partitions marked as rootfs in DT
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
This adds support for "linux,rootfs" binding that is used to mark flash
|
||||
partition containing rootfs. It's useful for devices using device tree
|
||||
that don't have bootloader passing root info in cmdline.
|
||||
|
||||
Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
|
||||
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
Link: https://lore.kernel.org/linux-mtd/20221022211318.32009-2-zajec5@gmail.com
|
||||
---
|
||||
drivers/mtd/mtdcore.c | 12 ++++++++++++
|
||||
1 file changed, 12 insertions(+)
|
||||
|
||||
--- a/drivers/mtd/mtdcore.c
|
||||
+++ b/drivers/mtd/mtdcore.c
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <linux/leds.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/nvmem-provider.h>
|
||||
+#include <linux/root_dev.h>
|
||||
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
@@ -737,6 +738,17 @@ int add_mtd_device(struct mtd_info *mtd)
|
||||
not->add(mtd);
|
||||
|
||||
mutex_unlock(&mtd_table_mutex);
|
||||
+
|
||||
+ if (of_find_property(mtd_get_of_node(mtd), "linux,rootfs", NULL)) {
|
||||
+ if (IS_BUILTIN(CONFIG_MTD)) {
|
||||
+ pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
|
||||
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
|
||||
+ } else {
|
||||
+ pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
|
||||
+ mtd->index, mtd->name);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/* We _know_ we aren't being removed, because
|
||||
our caller is still holding us here. So none
|
||||
of this try_ nonsense, and no bitching about it
|
||||
@ -0,0 +1,229 @@
|
||||
From aec4d5f5ffd0f0092bd9dc21ea90e0bc237d4b74 Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
|
||||
Date: Sat, 15 Oct 2022 11:29:50 +0200
|
||||
Subject: [PATCH] mtd: parsers: add TP-Link SafeLoader partitions table parser
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
This parser deals with most TP-Link home routers. It reads info about
|
||||
partitions and registers them in the MTD subsystem.
|
||||
|
||||
Example from TP-Link Archer C5 V2:
|
||||
|
||||
spi-nor spi0.0: s25fl128s1 (16384 Kbytes)
|
||||
15 tplink-safeloader partitions found on MTD device spi0.0
|
||||
Creating 15 MTD partitions on "spi0.0":
|
||||
0x000000000000-0x000000040000 : "fs-uboot"
|
||||
0x000000040000-0x000000440000 : "os-image"
|
||||
0x000000440000-0x000000e40000 : "rootfs"
|
||||
0x000000e40000-0x000000e40200 : "default-mac"
|
||||
0x000000e40200-0x000000e40400 : "pin"
|
||||
0x000000e40400-0x000000e40600 : "product-info"
|
||||
0x000000e50000-0x000000e60000 : "partition-table"
|
||||
0x000000e60000-0x000000e60200 : "soft-version"
|
||||
0x000000e61000-0x000000e70000 : "support-list"
|
||||
0x000000e70000-0x000000e80000 : "profile"
|
||||
0x000000e80000-0x000000e90000 : "default-config"
|
||||
0x000000e90000-0x000000ee0000 : "user-config"
|
||||
0x000000ee0000-0x000000fe0000 : "log"
|
||||
0x000000fe0000-0x000000ff0000 : "radio_bk"
|
||||
0x000000ff0000-0x000001000000 : "radio"
|
||||
|
||||
Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
|
||||
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
Link: https://lore.kernel.org/linux-mtd/20221015092950.27467-2-zajec5@gmail.com
|
||||
---
|
||||
drivers/mtd/parsers/Kconfig | 15 +++
|
||||
drivers/mtd/parsers/Makefile | 1 +
|
||||
drivers/mtd/parsers/tplink_safeloader.c | 150 ++++++++++++++++++++++++
|
||||
3 files changed, 166 insertions(+)
|
||||
create mode 100644 drivers/mtd/parsers/tplink_safeloader.c
|
||||
|
||||
--- a/drivers/mtd/parsers/Kconfig
|
||||
+++ b/drivers/mtd/parsers/Kconfig
|
||||
@@ -123,6 +123,21 @@ config MTD_AFS_PARTS
|
||||
for your particular device. It won't happen automatically. The
|
||||
'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
|
||||
|
||||
+config MTD_PARSER_TPLINK_SAFELOADER
|
||||
+ tristate "TP-Link Safeloader partitions parser"
|
||||
+ depends on MTD && (ARCH_BCM_5301X || ATH79 || SOC_MT7620 || SOC_MT7621 || COMPILE_TEST)
|
||||
+ help
|
||||
+ TP-Link home routers use flash partitions to store various data. Info
|
||||
+ about flash space layout is stored in a partitions table using a
|
||||
+ custom ASCII-based format.
|
||||
+
|
||||
+ That format was first found in devices with SafeLoader bootloader and
|
||||
+ was named after it. Later it was adapted to CFE and U-Boot
|
||||
+ bootloaders.
|
||||
+
|
||||
+ This driver reads partitions table, parses it and creates MTD
|
||||
+ partitions.
|
||||
+
|
||||
config MTD_PARSER_TRX
|
||||
tristate "Parser for TRX format partitions"
|
||||
depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || RALINK || COMPILE_TEST)
|
||||
--- a/drivers/mtd/parsers/Makefile
|
||||
+++ b/drivers/mtd/parsers/Makefile
|
||||
@@ -10,6 +10,7 @@ ofpart-$(CONFIG_MTD_OF_PARTS_BCM4908) +=
|
||||
ofpart-$(CONFIG_MTD_OF_PARTS_LINKSYS_NS)+= ofpart_linksys_ns.o
|
||||
obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
|
||||
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
|
||||
+obj-$(CONFIG_MTD_PARSER_TPLINK_SAFELOADER) += tplink_safeloader.o
|
||||
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
|
||||
obj-$(CONFIG_MTD_SERCOMM_PARTS) += scpart.o
|
||||
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
|
||||
--- /dev/null
|
||||
+++ b/drivers/mtd/parsers/tplink_safeloader.c
|
||||
@@ -0,0 +1,150 @@
|
||||
+// SPDX-License-Identifier: GPL-2.0-only
|
||||
+/*
|
||||
+ * Copyright © 2022 Rafał Miłecki <rafal@milecki.pl>
|
||||
+ */
|
||||
+
|
||||
+#include <linux/kernel.h>
|
||||
+#include <linux/module.h>
|
||||
+#include <linux/mtd/mtd.h>
|
||||
+#include <linux/mtd/partitions.h>
|
||||
+#include <linux/of.h>
|
||||
+#include <linux/slab.h>
|
||||
+
|
||||
+#define TPLINK_SAFELOADER_DATA_OFFSET 4
|
||||
+#define TPLINK_SAFELOADER_MAX_PARTS 32
|
||||
+
|
||||
+struct safeloader_cmn_header {
|
||||
+ __be32 size;
|
||||
+ uint32_t unused;
|
||||
+} __packed;
|
||||
+
|
||||
+static void *mtd_parser_tplink_safeloader_read_table(struct mtd_info *mtd)
|
||||
+{
|
||||
+ struct safeloader_cmn_header hdr;
|
||||
+ struct device_node *np;
|
||||
+ size_t bytes_read;
|
||||
+ size_t offset;
|
||||
+ size_t size;
|
||||
+ char *buf;
|
||||
+ int err;
|
||||
+
|
||||
+ np = mtd_get_of_node(mtd);
|
||||
+ if (mtd_is_partition(mtd))
|
||||
+ of_node_get(np);
|
||||
+ else
|
||||
+ np = of_get_child_by_name(np, "partitions");
|
||||
+
|
||||
+ if (of_property_read_u32(np, "partitions-table-offset", (u32 *)&offset)) {
|
||||
+ pr_err("Failed to get partitions table offset\n");
|
||||
+ goto err_put;
|
||||
+ }
|
||||
+
|
||||
+ err = mtd_read(mtd, offset, sizeof(hdr), &bytes_read, (uint8_t *)&hdr);
|
||||
+ if (err && !mtd_is_bitflip(err)) {
|
||||
+ pr_err("Failed to read from %s at 0x%zx\n", mtd->name, offset);
|
||||
+ goto err_put;
|
||||
+ }
|
||||
+
|
||||
+ size = be32_to_cpu(hdr.size);
|
||||
+
|
||||
+ buf = kmalloc(size + 1, GFP_KERNEL);
|
||||
+ if (!buf)
|
||||
+ goto err_put;
|
||||
+
|
||||
+ err = mtd_read(mtd, offset + sizeof(hdr), size, &bytes_read, buf);
|
||||
+ if (err && !mtd_is_bitflip(err)) {
|
||||
+ pr_err("Failed to read from %s at 0x%zx\n", mtd->name, offset + sizeof(hdr));
|
||||
+ goto err_kfree;
|
||||
+ }
|
||||
+
|
||||
+ buf[size] = '\0';
|
||||
+
|
||||
+ of_node_put(np);
|
||||
+
|
||||
+ return buf;
|
||||
+
|
||||
+err_kfree:
|
||||
+ kfree(buf);
|
||||
+err_put:
|
||||
+ of_node_put(np);
|
||||
+ return NULL;
|
||||
+}
|
||||
+
|
||||
+static int mtd_parser_tplink_safeloader_parse(struct mtd_info *mtd,
|
||||
+ const struct mtd_partition **pparts,
|
||||
+ struct mtd_part_parser_data *data)
|
||||
+{
|
||||
+ struct mtd_partition *parts;
|
||||
+ char name[65];
|
||||
+ size_t offset;
|
||||
+ size_t bytes;
|
||||
+ char *buf;
|
||||
+ int idx;
|
||||
+ int err;
|
||||
+
|
||||
+ parts = kcalloc(TPLINK_SAFELOADER_MAX_PARTS, sizeof(*parts), GFP_KERNEL);
|
||||
+ if (!parts) {
|
||||
+ err = -ENOMEM;
|
||||
+ goto err_out;
|
||||
+ }
|
||||
+
|
||||
+ buf = mtd_parser_tplink_safeloader_read_table(mtd);
|
||||
+ if (!buf) {
|
||||
+ err = -ENOENT;
|
||||
+ goto err_out;
|
||||
+ }
|
||||
+
|
||||
+ for (idx = 0, offset = TPLINK_SAFELOADER_DATA_OFFSET;
|
||||
+ idx < TPLINK_SAFELOADER_MAX_PARTS &&
|
||||
+ sscanf(buf + offset, "partition %64s base 0x%llx size 0x%llx%zn\n",
|
||||
+ name, &parts[idx].offset, &parts[idx].size, &bytes) == 3;
|
||||
+ idx++, offset += bytes + 1) {
|
||||
+ parts[idx].name = kstrdup(name, GFP_KERNEL);
|
||||
+ if (!parts[idx].name) {
|
||||
+ err = -ENOMEM;
|
||||
+ goto err_free;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (idx == TPLINK_SAFELOADER_MAX_PARTS)
|
||||
+ pr_warn("Reached maximum number of partitions!\n");
|
||||
+
|
||||
+ kfree(buf);
|
||||
+
|
||||
+ *pparts = parts;
|
||||
+
|
||||
+ return idx;
|
||||
+
|
||||
+err_free:
|
||||
+ for (idx -= 1; idx >= 0; idx--)
|
||||
+ kfree(parts[idx].name);
|
||||
+err_out:
|
||||
+ return err;
|
||||
+};
|
||||
+
|
||||
+static void mtd_parser_tplink_safeloader_cleanup(const struct mtd_partition *pparts,
|
||||
+ int nr_parts)
|
||||
+{
|
||||
+ int i;
|
||||
+
|
||||
+ for (i = 0; i < nr_parts; i++)
|
||||
+ kfree(pparts[i].name);
|
||||
+
|
||||
+ kfree(pparts);
|
||||
+}
|
||||
+
|
||||
+static const struct of_device_id mtd_parser_tplink_safeloader_of_match_table[] = {
|
||||
+ { .compatible = "tplink,safeloader-partitions" },
|
||||
+ {},
|
||||
+};
|
||||
+MODULE_DEVICE_TABLE(of, mtd_parser_tplink_safeloader_of_match_table);
|
||||
+
|
||||
+static struct mtd_part_parser mtd_parser_tplink_safeloader = {
|
||||
+ .parse_fn = mtd_parser_tplink_safeloader_parse,
|
||||
+ .cleanup = mtd_parser_tplink_safeloader_cleanup,
|
||||
+ .name = "tplink-safeloader",
|
||||
+ .of_match_table = mtd_parser_tplink_safeloader_of_match_table,
|
||||
+};
|
||||
+module_mtd_part_parser(mtd_parser_tplink_safeloader);
|
||||
+
|
||||
+MODULE_LICENSE("GPL");
|
||||
@ -0,0 +1,35 @@
|
||||
From ebed787a0becb9354f0a23620a5130cccd6c730c Mon Sep 17 00:00:00 2001
|
||||
From: Daniel Golle <daniel@makrotopia.org>
|
||||
Date: Thu, 19 Jan 2023 03:45:43 +0000
|
||||
Subject: [PATCH] mtd: spinand: macronix: use scratch buffer for DMA operation
|
||||
|
||||
The mx35lf1ge4ab_get_eccsr() function uses an SPI DMA operation to
|
||||
read the eccsr, hence the buffer should not be on stack. Since commit
|
||||
380583227c0c7f ("spi: spi-mem: Add extra sanity checks on the op param")
|
||||
the kernel emmits a warning and blocks such operations.
|
||||
|
||||
Use the scratch buffer to get eccsr instead of trying to directly read
|
||||
into a stack-allocated variable.
|
||||
|
||||
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Reviewed-by: Dhruva Gole <d-gole@ti.com>
|
||||
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
Link: https://lore.kernel.org/linux-mtd/Y8i85zM0u4XdM46z@makrotopia.org
|
||||
---
|
||||
drivers/mtd/nand/spi/macronix.c | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/drivers/mtd/nand/spi/macronix.c
|
||||
+++ b/drivers/mtd/nand/spi/macronix.c
|
||||
@@ -83,9 +83,10 @@ static int mx35lf1ge4ab_ecc_get_status(s
|
||||
* in order to avoid forcing the wear-leveling layer to move
|
||||
* data around if it's not necessary.
|
||||
*/
|
||||
- if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
|
||||
+ if (mx35lf1ge4ab_get_eccsr(spinand, spinand->scratchbuf))
|
||||
return nanddev_get_ecc_conf(nand)->strength;
|
||||
|
||||
+ eccsr = *spinand->scratchbuf;
|
||||
if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength ||
|
||||
!eccsr))
|
||||
return nanddev_get_ecc_conf(nand)->strength;
|
||||
@ -0,0 +1,47 @@
|
||||
From 281f7a6c1a33fffcde32001bacbb4f672140fbf9 Mon Sep 17 00:00:00 2001
|
||||
From: Michael Walle <michael@walle.cc>
|
||||
Date: Wed, 8 Mar 2023 09:20:21 +0100
|
||||
Subject: [PATCH] mtd: core: prepare mtd_otp_nvmem_add() to handle
|
||||
-EPROBE_DEFER
|
||||
|
||||
NVMEM soon will get the ability for nvmem layouts and these might
|
||||
not be ready when nvmem_register() is called and thus it might
|
||||
return -EPROBE_DEFER. Don't print the error message in this case.
|
||||
|
||||
Signed-off-by: Michael Walle <michael@walle.cc>
|
||||
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
Link: https://lore.kernel.org/linux-mtd/20230308082021.870459-4-michael@walle.cc
|
||||
---
|
||||
drivers/mtd/mtdcore.c | 7 +++----
|
||||
1 file changed, 3 insertions(+), 4 deletions(-)
|
||||
|
||||
--- a/drivers/mtd/mtdcore.c
|
||||
+++ b/drivers/mtd/mtdcore.c
|
||||
@@ -953,8 +953,8 @@ static int mtd_otp_nvmem_add(struct mtd_
|
||||
nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
|
||||
mtd_nvmem_user_otp_reg_read);
|
||||
if (IS_ERR(nvmem)) {
|
||||
- dev_err(dev, "Failed to register OTP NVMEM device\n");
|
||||
- return PTR_ERR(nvmem);
|
||||
+ err = PTR_ERR(nvmem);
|
||||
+ goto err;
|
||||
}
|
||||
mtd->otp_user_nvmem = nvmem;
|
||||
}
|
||||
@@ -971,7 +971,6 @@ static int mtd_otp_nvmem_add(struct mtd_
|
||||
nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
|
||||
mtd_nvmem_fact_otp_reg_read);
|
||||
if (IS_ERR(nvmem)) {
|
||||
- dev_err(dev, "Failed to register OTP NVMEM device\n");
|
||||
err = PTR_ERR(nvmem);
|
||||
goto err;
|
||||
}
|
||||
@@ -983,7 +982,7 @@ static int mtd_otp_nvmem_add(struct mtd_
|
||||
|
||||
err:
|
||||
nvmem_unregister(mtd->otp_user_nvmem);
|
||||
- return err;
|
||||
+ return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -0,0 +1,56 @@
|
||||
From: Qingfang DENG <qingfang.deng@siflower.com.cn>
|
||||
Date: Fri, 3 Feb 2023 09:16:11 +0800
|
||||
Subject: [PATCH] net: page_pool: use in_softirq() instead
|
||||
|
||||
We use BH context only for synchronization, so we don't care if it's
|
||||
actually serving softirq or not.
|
||||
|
||||
As a side node, in case of threaded NAPI, in_serving_softirq() will
|
||||
return false because it's in process context with BH off, making
|
||||
page_pool_recycle_in_cache() unreachable.
|
||||
|
||||
Signed-off-by: Qingfang DENG <qingfang.deng@siflower.com.cn>
|
||||
---
|
||||
|
||||
--- a/include/net/page_pool.h
|
||||
+++ b/include/net/page_pool.h
|
||||
@@ -386,7 +386,7 @@ static inline void page_pool_nid_changed
|
||||
static inline void page_pool_ring_lock(struct page_pool *pool)
|
||||
__acquires(&pool->ring.producer_lock)
|
||||
{
|
||||
- if (in_serving_softirq())
|
||||
+ if (in_softirq())
|
||||
spin_lock(&pool->ring.producer_lock);
|
||||
else
|
||||
spin_lock_bh(&pool->ring.producer_lock);
|
||||
@@ -395,7 +395,7 @@ static inline void page_pool_ring_lock(s
|
||||
static inline void page_pool_ring_unlock(struct page_pool *pool)
|
||||
__releases(&pool->ring.producer_lock)
|
||||
{
|
||||
- if (in_serving_softirq())
|
||||
+ if (in_softirq())
|
||||
spin_unlock(&pool->ring.producer_lock);
|
||||
else
|
||||
spin_unlock_bh(&pool->ring.producer_lock);
|
||||
--- a/net/core/page_pool.c
|
||||
+++ b/net/core/page_pool.c
|
||||
@@ -511,8 +511,8 @@ static void page_pool_return_page(struct
|
||||
static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
|
||||
{
|
||||
int ret;
|
||||
- /* BH protection not needed if current is serving softirq */
|
||||
- if (in_serving_softirq())
|
||||
+ /* BH protection not needed if current is softirq */
|
||||
+ if (in_softirq())
|
||||
ret = ptr_ring_produce(&pool->ring, page);
|
||||
else
|
||||
ret = ptr_ring_produce_bh(&pool->ring, page);
|
||||
@@ -570,7 +570,7 @@ __page_pool_put_page(struct page_pool *p
|
||||
page_pool_dma_sync_for_device(pool, page,
|
||||
dma_sync_size);
|
||||
|
||||
- if (allow_direct && in_serving_softirq() &&
|
||||
+ if (allow_direct && in_softirq() &&
|
||||
page_pool_recycle_in_cache(page, pool))
|
||||
return NULL;
|
||||
|
||||
@ -0,0 +1,41 @@
|
||||
From 7390609b0121a1b982c5ecdfcd72dc328e5784ee Mon Sep 17 00:00:00 2001
|
||||
From: Michael Walle <michael@walle.cc>
|
||||
Date: Mon, 6 Feb 2023 13:43:42 +0000
|
||||
Subject: [PATCH] net: add helper eth_addr_add()
|
||||
|
||||
Add a helper to add an offset to a ethernet address. This comes in handy
|
||||
if you have a base ethernet address for multiple interfaces.
|
||||
|
||||
Signed-off-by: Michael Walle <michael@walle.cc>
|
||||
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
|
||||
Acked-by: Jakub Kicinski <kuba@kernel.org>
|
||||
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
Link: https://lore.kernel.org/r/20230206134356.839737-9-srinivas.kandagatla@linaro.org
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
---
|
||||
include/linux/etherdevice.h | 14 ++++++++++++++
|
||||
1 file changed, 14 insertions(+)
|
||||
|
||||
--- a/include/linux/etherdevice.h
|
||||
+++ b/include/linux/etherdevice.h
|
||||
@@ -508,6 +508,20 @@ static inline void eth_addr_inc(u8 *addr
|
||||
}
|
||||
|
||||
/**
|
||||
+ * eth_addr_add() - Add (or subtract) an offset to/from the given MAC address.
|
||||
+ *
|
||||
+ * @offset: Offset to add.
|
||||
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
|
||||
+ */
|
||||
+static inline void eth_addr_add(u8 *addr, long offset)
|
||||
+{
|
||||
+ u64 u = ether_addr_to_u64(addr);
|
||||
+
|
||||
+ u += offset;
|
||||
+ u64_to_ether_addr(u, addr);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
|
||||
* @dev: Pointer to a device structure
|
||||
* @addr: Pointer to a six-byte array containing the Ethernet address
|
||||
@ -0,0 +1,394 @@
|
||||
From 4765a9722e09765866e131ec31f7b9cf4c1f4854 Mon Sep 17 00:00:00 2001
|
||||
From: Daniel Golle <daniel@makrotopia.org>
|
||||
Date: Sun, 19 Mar 2023 12:57:50 +0000
|
||||
Subject: [PATCH] net: pcs: add driver for MediaTek SGMII PCS
|
||||
|
||||
The SGMII core found in several MediaTek SoCs is identical to what can
|
||||
also be found in MediaTek's MT7531 Ethernet switch IC.
|
||||
As this has not always been clear, both drivers developed different
|
||||
implementations to deal with the PCS.
|
||||
Recently Alexander Couzens pointed out this fact which lead to the
|
||||
development of this shared driver.
|
||||
|
||||
Add a dedicated driver, mostly by copying the code now found in the
|
||||
Ethernet driver. The now redundant code will be removed by a follow-up
|
||||
commit.
|
||||
|
||||
Suggested-by: Alexander Couzens <lynxis@fe80.eu>
|
||||
Suggested-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Tested-by: Frank Wunderlich <frank-w@public-files.de>
|
||||
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
MAINTAINERS | 8 +
|
||||
drivers/net/pcs/Kconfig | 7 +
|
||||
drivers/net/pcs/Makefile | 1 +
|
||||
drivers/net/pcs/pcs-mtk-lynxi.c | 305 ++++++++++++++++++++++++++++++
|
||||
include/linux/pcs/pcs-mtk-lynxi.h | 13 ++
|
||||
5 files changed, 334 insertions(+)
|
||||
create mode 100644 drivers/net/pcs/pcs-mtk-lynxi.c
|
||||
create mode 100644 include/linux/pcs/pcs-mtk-lynxi.h
|
||||
|
||||
--- a/MAINTAINERS
|
||||
+++ b/MAINTAINERS
|
||||
@@ -12926,6 +12926,14 @@ L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/mediatek/
|
||||
|
||||
+MEDIATEK ETHERNET PCS DRIVER
|
||||
+M: Alexander Couzens <lynxis@fe80.eu>
|
||||
+M: Daniel Golle <daniel@makrotopia.org>
|
||||
+L: netdev@vger.kernel.org
|
||||
+S: Maintained
|
||||
+F: drivers/net/pcs/pcs-mtk-lynxi.c
|
||||
+F: include/linux/pcs/pcs-mtk-lynxi.h
|
||||
+
|
||||
MEDIATEK I2C CONTROLLER DRIVER
|
||||
M: Qii Wang <qii.wang@mediatek.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
--- a/drivers/net/pcs/Kconfig
|
||||
+++ b/drivers/net/pcs/Kconfig
|
||||
@@ -32,4 +32,11 @@ config PCS_ALTERA_TSE
|
||||
This module provides helper functions for the Altera Triple Speed
|
||||
Ethernet SGMII PCS, that can be found on the Intel Socfpga family.
|
||||
|
||||
+config PCS_MTK_LYNXI
|
||||
+ tristate
|
||||
+ select REGMAP
|
||||
+ help
|
||||
+ This module provides helpers to phylink for managing the LynxI PCS
|
||||
+ which is part of MediaTek's SoC and Ethernet switch ICs.
|
||||
+
|
||||
endmenu
|
||||
--- a/drivers/net/pcs/Makefile
|
||||
+++ b/drivers/net/pcs/Makefile
|
||||
@@ -7,3 +7,4 @@ obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
|
||||
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
|
||||
obj-$(CONFIG_PCS_RZN1_MIIC) += pcs-rzn1-miic.o
|
||||
obj-$(CONFIG_PCS_ALTERA_TSE) += pcs-altera-tse.o
|
||||
+obj-$(CONFIG_PCS_MTK_LYNXI) += pcs-mtk-lynxi.o
|
||||
--- /dev/null
|
||||
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
|
||||
@@ -0,0 +1,305 @@
|
||||
+// SPDX-License-Identifier: GPL-2.0
|
||||
+// Copyright (c) 2018-2019 MediaTek Inc.
|
||||
+/* A library for MediaTek SGMII circuit
|
||||
+ *
|
||||
+ * Author: Sean Wang <sean.wang@mediatek.com>
|
||||
+ * Author: Alexander Couzens <lynxis@fe80.eu>
|
||||
+ * Author: Daniel Golle <daniel@makrotopia.org>
|
||||
+ *
|
||||
+ */
|
||||
+
|
||||
+#include <linux/mdio.h>
|
||||
+#include <linux/of.h>
|
||||
+#include <linux/pcs/pcs-mtk-lynxi.h>
|
||||
+#include <linux/phylink.h>
|
||||
+#include <linux/regmap.h>
|
||||
+
|
||||
+/* SGMII subsystem config registers */
|
||||
+/* BMCR (low 16) BMSR (high 16) */
|
||||
+#define SGMSYS_PCS_CONTROL_1 0x0
|
||||
+#define SGMII_BMCR GENMASK(15, 0)
|
||||
+#define SGMII_BMSR GENMASK(31, 16)
|
||||
+
|
||||
+#define SGMSYS_PCS_DEVICE_ID 0x4
|
||||
+#define SGMII_LYNXI_DEV_ID 0x4d544950
|
||||
+
|
||||
+#define SGMSYS_PCS_ADVERTISE 0x8
|
||||
+#define SGMII_ADVERTISE GENMASK(15, 0)
|
||||
+#define SGMII_LPA GENMASK(31, 16)
|
||||
+
|
||||
+#define SGMSYS_PCS_SCRATCH 0x14
|
||||
+#define SGMII_DEV_VERSION GENMASK(31, 16)
|
||||
+
|
||||
+/* Register to programmable link timer, the unit in 2 * 8ns */
|
||||
+#define SGMSYS_PCS_LINK_TIMER 0x18
|
||||
+#define SGMII_LINK_TIMER_MASK GENMASK(19, 0)
|
||||
+#define SGMII_LINK_TIMER_VAL(ns) FIELD_PREP(SGMII_LINK_TIMER_MASK, \
|
||||
+ ((ns) / 2 / 8))
|
||||
+
|
||||
+/* Register to control remote fault */
|
||||
+#define SGMSYS_SGMII_MODE 0x20
|
||||
+#define SGMII_IF_MODE_SGMII BIT(0)
|
||||
+#define SGMII_SPEED_DUPLEX_AN BIT(1)
|
||||
+#define SGMII_SPEED_MASK GENMASK(3, 2)
|
||||
+#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
|
||||
+#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1)
|
||||
+#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2)
|
||||
+#define SGMII_DUPLEX_HALF BIT(4)
|
||||
+#define SGMII_REMOTE_FAULT_DIS BIT(8)
|
||||
+
|
||||
+/* Register to reset SGMII design */
|
||||
+#define SGMSYS_RESERVED_0 0x34
|
||||
+#define SGMII_SW_RESET BIT(0)
|
||||
+
|
||||
+/* Register to set SGMII speed, ANA RG_ Control Signals III */
|
||||
+#define SGMII_PHY_SPEED_MASK GENMASK(3, 2)
|
||||
+#define SGMII_PHY_SPEED_1_25G FIELD_PREP(SGMII_PHY_SPEED_MASK, 0)
|
||||
+#define SGMII_PHY_SPEED_3_125G FIELD_PREP(SGMII_PHY_SPEED_MASK, 1)
|
||||
+
|
||||
+/* Register to power up QPHY */
|
||||
+#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
|
||||
+#define SGMII_PHYA_PWD BIT(4)
|
||||
+
|
||||
+/* Register to QPHY wrapper control */
|
||||
+#define SGMSYS_QPHY_WRAP_CTRL 0xec
|
||||
+#define SGMII_PN_SWAP_MASK GENMASK(1, 0)
|
||||
+#define SGMII_PN_SWAP_TX_RX (BIT(0) | BIT(1))
|
||||
+
|
||||
+/* struct mtk_pcs_lynxi - This structure holds each sgmii regmap andassociated
|
||||
+ * data
|
||||
+ * @regmap: The register map pointing at the range used to setup
|
||||
+ * SGMII modes
|
||||
+ * @dev: Pointer to device owning the PCS
|
||||
+ * @ana_rgc3: The offset of register ANA_RGC3 relative to regmap
|
||||
+ * @interface: Currently configured interface mode
|
||||
+ * @pcs: Phylink PCS structure
|
||||
+ * @flags: Flags indicating hardware properties
|
||||
+ */
|
||||
+struct mtk_pcs_lynxi {
|
||||
+ struct regmap *regmap;
|
||||
+ u32 ana_rgc3;
|
||||
+ phy_interface_t interface;
|
||||
+ struct phylink_pcs pcs;
|
||||
+ u32 flags;
|
||||
+};
|
||||
+
|
||||
+static struct mtk_pcs_lynxi *pcs_to_mtk_pcs_lynxi(struct phylink_pcs *pcs)
|
||||
+{
|
||||
+ return container_of(pcs, struct mtk_pcs_lynxi, pcs);
|
||||
+}
|
||||
+
|
||||
+static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
|
||||
+ struct phylink_link_state *state)
|
||||
+{
|
||||
+ struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs);
|
||||
+ unsigned int bm, adv;
|
||||
+
|
||||
+ /* Read the BMSR and LPA */
|
||||
+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm);
|
||||
+ regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv);
|
||||
+
|
||||
+ phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm),
|
||||
+ FIELD_GET(SGMII_LPA, adv));
|
||||
+}
|
||||
+
|
||||
+static int mtk_pcs_lynxi_config(struct phylink_pcs *pcs, unsigned int mode,
|
||||
+ phy_interface_t interface,
|
||||
+ const unsigned long *advertising,
|
||||
+ bool permit_pause_to_mac)
|
||||
+{
|
||||
+ struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs);
|
||||
+ bool mode_changed = false, changed, use_an;
|
||||
+ unsigned int rgc3, sgm_mode, bmcr;
|
||||
+ int advertise, link_timer;
|
||||
+
|
||||
+ advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
|
||||
+ advertising);
|
||||
+ if (advertise < 0)
|
||||
+ return advertise;
|
||||
+
|
||||
+ /* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and
|
||||
+ * we assume that fixes it's speed at bitrate = line rate (in
|
||||
+ * other words, 1000Mbps or 2500Mbps).
|
||||
+ */
|
||||
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
|
||||
+ sgm_mode = SGMII_IF_MODE_SGMII;
|
||||
+ if (phylink_autoneg_inband(mode)) {
|
||||
+ sgm_mode |= SGMII_REMOTE_FAULT_DIS |
|
||||
+ SGMII_SPEED_DUPLEX_AN;
|
||||
+ use_an = true;
|
||||
+ } else {
|
||||
+ use_an = false;
|
||||
+ }
|
||||
+ } else if (phylink_autoneg_inband(mode)) {
|
||||
+ /* 1000base-X or 2500base-X autoneg */
|
||||
+ sgm_mode = SGMII_REMOTE_FAULT_DIS;
|
||||
+ use_an = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
|
||||
+ advertising);
|
||||
+ } else {
|
||||
+ /* 1000base-X or 2500base-X without autoneg */
|
||||
+ sgm_mode = 0;
|
||||
+ use_an = false;
|
||||
+ }
|
||||
+
|
||||
+ if (use_an)
|
||||
+ bmcr = BMCR_ANENABLE;
|
||||
+ else
|
||||
+ bmcr = 0;
|
||||
+
|
||||
+ if (mpcs->interface != interface) {
|
||||
+ link_timer = phylink_get_link_timer_ns(interface);
|
||||
+ if (link_timer < 0)
|
||||
+ return link_timer;
|
||||
+
|
||||
+ /* PHYA power down */
|
||||
+ regmap_set_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
+ SGMII_PHYA_PWD);
|
||||
+
|
||||
+ /* Reset SGMII PCS state */
|
||||
+ regmap_set_bits(mpcs->regmap, SGMSYS_RESERVED_0,
|
||||
+ SGMII_SW_RESET);
|
||||
+
|
||||
+ if (mpcs->flags & MTK_SGMII_FLAG_PN_SWAP)
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_WRAP_CTRL,
|
||||
+ SGMII_PN_SWAP_MASK,
|
||||
+ SGMII_PN_SWAP_TX_RX);
|
||||
+
|
||||
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||
+ rgc3 = SGMII_PHY_SPEED_3_125G;
|
||||
+ else
|
||||
+ rgc3 = SGMII_PHY_SPEED_1_25G;
|
||||
+
|
||||
+ /* Configure the underlying interface speed */
|
||||
+ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
|
||||
+ SGMII_PHY_SPEED_MASK, rgc3);
|
||||
+
|
||||
+ /* Setup the link timer */
|
||||
+ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER,
|
||||
+ SGMII_LINK_TIMER_VAL(link_timer));
|
||||
+
|
||||
+ mpcs->interface = interface;
|
||||
+ mode_changed = true;
|
||||
+ }
|
||||
+
|
||||
+ /* Update the advertisement, noting whether it has changed */
|
||||
+ regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
|
||||
+ SGMII_ADVERTISE, advertise, &changed);
|
||||
+
|
||||
+ /* Update the sgmsys mode register */
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
+ SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN |
|
||||
+ SGMII_IF_MODE_SGMII, sgm_mode);
|
||||
+
|
||||
+ /* Update the BMCR */
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
+ BMCR_ANENABLE, bmcr);
|
||||
+
|
||||
+ /* Release PHYA power down state
|
||||
+ * Only removing bit SGMII_PHYA_PWD isn't enough.
|
||||
+ * There are cases when the SGMII_PHYA_PWD register contains 0x9 which
|
||||
+ * prevents SGMII from working. The SGMII still shows link but no traffic
|
||||
+ * can flow. Writing 0x0 to the PHYA_PWD register fix the issue. 0x0 was
|
||||
+ * taken from a good working state of the SGMII interface.
|
||||
+ * Unknown how much the QPHY needs but it is racy without a sleep.
|
||||
+ * Tested on mt7622 & mt7986.
|
||||
+ */
|
||||
+ usleep_range(50, 100);
|
||||
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
|
||||
+
|
||||
+ return changed || mode_changed;
|
||||
+}
|
||||
+
|
||||
+static void mtk_pcs_lynxi_restart_an(struct phylink_pcs *pcs)
|
||||
+{
|
||||
+ struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs);
|
||||
+
|
||||
+ regmap_set_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1, BMCR_ANRESTART);
|
||||
+}
|
||||
+
|
||||
+static void mtk_pcs_lynxi_link_up(struct phylink_pcs *pcs, unsigned int mode,
|
||||
+ phy_interface_t interface, int speed,
|
||||
+ int duplex)
|
||||
+{
|
||||
+ struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs);
|
||||
+ unsigned int sgm_mode;
|
||||
+
|
||||
+ if (!phylink_autoneg_inband(mode)) {
|
||||
+ /* Force the speed and duplex setting */
|
||||
+ if (speed == SPEED_10)
|
||||
+ sgm_mode = SGMII_SPEED_10;
|
||||
+ else if (speed == SPEED_100)
|
||||
+ sgm_mode = SGMII_SPEED_100;
|
||||
+ else
|
||||
+ sgm_mode = SGMII_SPEED_1000;
|
||||
+
|
||||
+ if (duplex != DUPLEX_FULL)
|
||||
+ sgm_mode |= SGMII_DUPLEX_HALF;
|
||||
+
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
+ SGMII_DUPLEX_HALF | SGMII_SPEED_MASK,
|
||||
+ sgm_mode);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = {
|
||||
+ .pcs_get_state = mtk_pcs_lynxi_get_state,
|
||||
+ .pcs_config = mtk_pcs_lynxi_config,
|
||||
+ .pcs_an_restart = mtk_pcs_lynxi_restart_an,
|
||||
+ .pcs_link_up = mtk_pcs_lynxi_link_up,
|
||||
+};
|
||||
+
|
||||
+struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
|
||||
+ struct regmap *regmap, u32 ana_rgc3,
|
||||
+ u32 flags)
|
||||
+{
|
||||
+ struct mtk_pcs_lynxi *mpcs;
|
||||
+ u32 id, ver;
|
||||
+ int ret;
|
||||
+
|
||||
+ ret = regmap_read(regmap, SGMSYS_PCS_DEVICE_ID, &id);
|
||||
+ if (ret < 0)
|
||||
+ return NULL;
|
||||
+
|
||||
+ if (id != SGMII_LYNXI_DEV_ID) {
|
||||
+ dev_err(dev, "unknown PCS device id %08x\n", id);
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
+ ret = regmap_read(regmap, SGMSYS_PCS_SCRATCH, &ver);
|
||||
+ if (ret < 0)
|
||||
+ return NULL;
|
||||
+
|
||||
+ ver = FIELD_GET(SGMII_DEV_VERSION, ver);
|
||||
+ if (ver != 0x1) {
|
||||
+ dev_err(dev, "unknown PCS device version %04x\n", ver);
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
+ dev_dbg(dev, "MediaTek LynxI SGMII PCS (id 0x%08x, ver 0x%04x)\n", id,
|
||||
+ ver);
|
||||
+
|
||||
+ mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
|
||||
+ if (!mpcs)
|
||||
+ return NULL;
|
||||
+
|
||||
+ mpcs->ana_rgc3 = ana_rgc3;
|
||||
+ mpcs->regmap = regmap;
|
||||
+ mpcs->flags = flags;
|
||||
+ mpcs->pcs.ops = &mtk_pcs_lynxi_ops;
|
||||
+ mpcs->pcs.poll = true;
|
||||
+ mpcs->interface = PHY_INTERFACE_MODE_NA;
|
||||
+
|
||||
+ return &mpcs->pcs;
|
||||
+}
|
||||
+EXPORT_SYMBOL(mtk_pcs_lynxi_create);
|
||||
+
|
||||
+void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs)
|
||||
+{
|
||||
+ if (!pcs)
|
||||
+ return;
|
||||
+
|
||||
+ kfree(pcs_to_mtk_pcs_lynxi(pcs));
|
||||
+}
|
||||
+EXPORT_SYMBOL(mtk_pcs_lynxi_destroy);
|
||||
+
|
||||
+MODULE_LICENSE("GPL");
|
||||
--- /dev/null
|
||||
+++ b/include/linux/pcs/pcs-mtk-lynxi.h
|
||||
@@ -0,0 +1,13 @@
|
||||
+/* SPDX-License-Identifier: GPL-2.0 */
|
||||
+#ifndef __LINUX_PCS_MTK_LYNXI_H
|
||||
+#define __LINUX_PCS_MTK_LYNXI_H
|
||||
+
|
||||
+#include <linux/phylink.h>
|
||||
+#include <linux/regmap.h>
|
||||
+
|
||||
+#define MTK_SGMII_FLAG_PN_SWAP BIT(0)
|
||||
+struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
|
||||
+ struct regmap *regmap,
|
||||
+ u32 ana_rgc3, u32 flags);
|
||||
+void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs);
|
||||
+#endif
|
||||
@ -0,0 +1,591 @@
|
||||
From: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Date: Sat, 5 Nov 2022 23:36:18 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: introduce wed mcu support
|
||||
|
||||
Introduce WED mcu support used to configure WED WO chip.
|
||||
This is a preliminary patch in order to add RX Wireless
|
||||
Ethernet Dispatch available on MT7986 SoC.
|
||||
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c
|
||||
create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/Makefile
|
||||
+++ b/drivers/net/ethernet/mediatek/Makefile
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
|
||||
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
|
||||
-mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
|
||||
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o
|
||||
ifdef CONFIG_DEBUG_FS
|
||||
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
|
||||
endif
|
||||
--- /dev/null
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
|
||||
@@ -0,0 +1,359 @@
|
||||
+// SPDX-License-Identifier: GPL-2.0-only
|
||||
+/* Copyright (C) 2022 MediaTek Inc.
|
||||
+ *
|
||||
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
+ * Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
+ */
|
||||
+
|
||||
+#include <linux/firmware.h>
|
||||
+#include <linux/of_address.h>
|
||||
+#include <linux/of_reserved_mem.h>
|
||||
+#include <linux/mfd/syscon.h>
|
||||
+#include <linux/soc/mediatek/mtk_wed.h>
|
||||
+
|
||||
+#include "mtk_wed_regs.h"
|
||||
+#include "mtk_wed_wo.h"
|
||||
+#include "mtk_wed.h"
|
||||
+
|
||||
+static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
|
||||
+{
|
||||
+ return readl(wo->boot.addr + reg);
|
||||
+}
|
||||
+
|
||||
+static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
|
||||
+{
|
||||
+ writel(val, wo->boot.addr + reg);
|
||||
+}
|
||||
+
|
||||
+static struct sk_buff *
|
||||
+mtk_wed_mcu_msg_alloc(const void *data, int data_len)
|
||||
+{
|
||||
+ int length = sizeof(struct mtk_wed_mcu_hdr) + data_len;
|
||||
+ struct sk_buff *skb;
|
||||
+
|
||||
+ skb = alloc_skb(length, GFP_KERNEL);
|
||||
+ if (!skb)
|
||||
+ return NULL;
|
||||
+
|
||||
+ memset(skb->head, 0, length);
|
||||
+ skb_reserve(skb, sizeof(struct mtk_wed_mcu_hdr));
|
||||
+ if (data && data_len)
|
||||
+ skb_put_data(skb, data, data_len);
|
||||
+
|
||||
+ return skb;
|
||||
+}
|
||||
+
|
||||
+static struct sk_buff *
|
||||
+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
|
||||
+{
|
||||
+ if (!time_is_after_jiffies(expires))
|
||||
+ return NULL;
|
||||
+
|
||||
+ wait_event_timeout(wo->mcu.wait, !skb_queue_empty(&wo->mcu.res_q),
|
||||
+ expires - jiffies);
|
||||
+ return skb_dequeue(&wo->mcu.res_q);
|
||||
+}
|
||||
+
|
||||
+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
|
||||
+{
|
||||
+ skb_queue_tail(&wo->mcu.res_q, skb);
|
||||
+ wake_up(&wo->mcu.wait);
|
||||
+}
|
||||
+
|
||||
+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
|
||||
+ struct sk_buff *skb)
|
||||
+{
|
||||
+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
|
||||
+
|
||||
+ switch (hdr->cmd) {
|
||||
+ case MTK_WED_WO_EVT_LOG_DUMP: {
|
||||
+ const char *msg = (const char *)(skb->data + sizeof(*hdr));
|
||||
+
|
||||
+ dev_notice(wo->hw->dev, "%s\n", msg);
|
||||
+ break;
|
||||
+ }
|
||||
+ case MTK_WED_WO_EVT_PROFILING: {
|
||||
+ struct mtk_wed_wo_log_info *info;
|
||||
+ u32 count = (skb->len - sizeof(*hdr)) / sizeof(*info);
|
||||
+ int i;
|
||||
+
|
||||
+ info = (struct mtk_wed_wo_log_info *)(skb->data + sizeof(*hdr));
|
||||
+ for (i = 0 ; i < count ; i++)
|
||||
+ dev_notice(wo->hw->dev,
|
||||
+ "SN:%u latency: total=%u, rro:%u, mod:%u\n",
|
||||
+ le32_to_cpu(info[i].sn),
|
||||
+ le32_to_cpu(info[i].total),
|
||||
+ le32_to_cpu(info[i].rro),
|
||||
+ le32_to_cpu(info[i].mod));
|
||||
+ break;
|
||||
+ }
|
||||
+ case MTK_WED_WO_EVT_RXCNT_INFO:
|
||||
+ break;
|
||||
+ default:
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
+ dev_kfree_skb(skb);
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb,
|
||||
+ int id, int cmd, u16 *wait_seq, bool wait_resp)
|
||||
+{
|
||||
+ struct mtk_wed_mcu_hdr *hdr;
|
||||
+
|
||||
+ /* TODO: make it dynamic based on cmd */
|
||||
+ wo->mcu.timeout = 20 * HZ;
|
||||
+
|
||||
+ hdr = (struct mtk_wed_mcu_hdr *)skb_push(skb, sizeof(*hdr));
|
||||
+ hdr->cmd = cmd;
|
||||
+ hdr->length = cpu_to_le16(skb->len);
|
||||
+
|
||||
+ if (wait_resp && wait_seq) {
|
||||
+ u16 seq = ++wo->mcu.seq;
|
||||
+
|
||||
+ if (!seq)
|
||||
+ seq = ++wo->mcu.seq;
|
||||
+ *wait_seq = seq;
|
||||
+
|
||||
+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_NEED_RSP);
|
||||
+ hdr->seq = cpu_to_le16(seq);
|
||||
+ }
|
||||
+ if (id == MTK_WED_MODULE_ID_WO)
|
||||
+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
|
||||
+
|
||||
+ dev_kfree_skb(skb);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_mcu_parse_response(struct mtk_wed_wo *wo, struct sk_buff *skb,
|
||||
+ int cmd, int seq)
|
||||
+{
|
||||
+ struct mtk_wed_mcu_hdr *hdr;
|
||||
+
|
||||
+ if (!skb) {
|
||||
+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
|
||||
+ cmd, seq);
|
||||
+ return -ETIMEDOUT;
|
||||
+ }
|
||||
+
|
||||
+ hdr = (struct mtk_wed_mcu_hdr *)skb->data;
|
||||
+ if (le16_to_cpu(hdr->seq) != seq)
|
||||
+ return -EAGAIN;
|
||||
+
|
||||
+ skb_pull(skb, sizeof(*hdr));
|
||||
+ switch (cmd) {
|
||||
+ case MTK_WED_WO_CMD_RXCNT_INFO:
|
||||
+ default:
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
|
||||
+ const void *data, int len, bool wait_resp)
|
||||
+{
|
||||
+ unsigned long expires;
|
||||
+ struct sk_buff *skb;
|
||||
+ u16 seq;
|
||||
+ int ret;
|
||||
+
|
||||
+ skb = mtk_wed_mcu_msg_alloc(data, len);
|
||||
+ if (!skb)
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ mutex_lock(&wo->mcu.mutex);
|
||||
+
|
||||
+ ret = mtk_wed_mcu_skb_send_msg(wo, skb, id, cmd, &seq, wait_resp);
|
||||
+ if (ret || !wait_resp)
|
||||
+ goto unlock;
|
||||
+
|
||||
+ expires = jiffies + wo->mcu.timeout;
|
||||
+ do {
|
||||
+ skb = mtk_wed_mcu_get_response(wo, expires);
|
||||
+ ret = mtk_wed_mcu_parse_response(wo, skb, cmd, seq);
|
||||
+ dev_kfree_skb(skb);
|
||||
+ } while (ret == -EAGAIN);
|
||||
+
|
||||
+unlock:
|
||||
+ mutex_unlock(&wo->mcu.mutex);
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
|
||||
+ struct mtk_wed_wo_memory_region *region)
|
||||
+{
|
||||
+ struct reserved_mem *rmem;
|
||||
+ struct device_node *np;
|
||||
+ int index;
|
||||
+
|
||||
+ index = of_property_match_string(wo->hw->node, "memory-region-names",
|
||||
+ region->name);
|
||||
+ if (index < 0)
|
||||
+ return index;
|
||||
+
|
||||
+ np = of_parse_phandle(wo->hw->node, "memory-region", index);
|
||||
+ if (!np)
|
||||
+ return -ENODEV;
|
||||
+
|
||||
+ rmem = of_reserved_mem_lookup(np);
|
||||
+ of_node_put(np);
|
||||
+
|
||||
+ if (!rmem)
|
||||
+ return -ENODEV;
|
||||
+
|
||||
+ region->phy_addr = rmem->base;
|
||||
+ region->size = rmem->size;
|
||||
+ region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
|
||||
+
|
||||
+ return !region->addr ? -EINVAL : 0;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
|
||||
+ struct mtk_wed_wo_memory_region *region)
|
||||
+{
|
||||
+ const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data;
|
||||
+ const struct mtk_wed_fw_trailer *trailer;
|
||||
+ const struct mtk_wed_fw_region *fw_region;
|
||||
+
|
||||
+ trailer_ptr = fw->data + fw->size - sizeof(*trailer);
|
||||
+ trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
|
||||
+ region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
|
||||
+ first_region_ptr = region_ptr;
|
||||
+
|
||||
+ while (region_ptr < trailer_ptr) {
|
||||
+ u32 length;
|
||||
+
|
||||
+ fw_region = (const struct mtk_wed_fw_region *)region_ptr;
|
||||
+ length = le32_to_cpu(fw_region->len);
|
||||
+
|
||||
+ if (region->phy_addr != le32_to_cpu(fw_region->addr))
|
||||
+ goto next;
|
||||
+
|
||||
+ if (region->size < length)
|
||||
+ goto next;
|
||||
+
|
||||
+ if (first_region_ptr < ptr + length)
|
||||
+ goto next;
|
||||
+
|
||||
+ if (region->shared && region->consumed)
|
||||
+ return 0;
|
||||
+
|
||||
+ if (!region->shared || !region->consumed) {
|
||||
+ memcpy_toio(region->addr, ptr, length);
|
||||
+ region->consumed = true;
|
||||
+ return 0;
|
||||
+ }
|
||||
+next:
|
||||
+ region_ptr += sizeof(*fw_region);
|
||||
+ ptr += length;
|
||||
+ }
|
||||
+
|
||||
+ return -EINVAL;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
|
||||
+{
|
||||
+ static struct mtk_wed_wo_memory_region mem_region[] = {
|
||||
+ [MTK_WED_WO_REGION_EMI] = {
|
||||
+ .name = "wo-emi",
|
||||
+ },
|
||||
+ [MTK_WED_WO_REGION_ILM] = {
|
||||
+ .name = "wo-ilm",
|
||||
+ },
|
||||
+ [MTK_WED_WO_REGION_DATA] = {
|
||||
+ .name = "wo-data",
|
||||
+ .shared = true,
|
||||
+ },
|
||||
+ };
|
||||
+ const struct mtk_wed_fw_trailer *trailer;
|
||||
+ const struct firmware *fw;
|
||||
+ const char *fw_name;
|
||||
+ u32 val, boot_cr;
|
||||
+ int ret, i;
|
||||
+
|
||||
+ /* load firmware region metadata */
|
||||
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
|
||||
+ ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+ }
|
||||
+
|
||||
+ wo->boot.name = "wo-boot";
|
||||
+ ret = mtk_wed_get_memory_region(wo, &wo->boot);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+
|
||||
+ /* set dummy cr */
|
||||
+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
|
||||
+ wo->hw->index + 1);
|
||||
+
|
||||
+ /* load firmware */
|
||||
+ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
|
||||
+ ret = request_firmware(&fw, fw_name, wo->hw->dev);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+
|
||||
+ trailer = (void *)(fw->data + fw->size -
|
||||
+ sizeof(struct mtk_wed_fw_trailer));
|
||||
+ dev_info(wo->hw->dev,
|
||||
+ "MTK WED WO Firmware Version: %.10s, Build Time: %.15s\n",
|
||||
+ trailer->fw_ver, trailer->build_date);
|
||||
+ dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n",
|
||||
+ trailer->chip_id, trailer->num_region);
|
||||
+
|
||||
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
|
||||
+ ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]);
|
||||
+ if (ret)
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /* set the start address */
|
||||
+ boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
|
||||
+ : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
|
||||
+ wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
|
||||
+ /* wo firmware reset */
|
||||
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
|
||||
+
|
||||
+ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
|
||||
+ val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
|
||||
+ : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
|
||||
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
|
||||
+out:
|
||||
+ release_firmware(fw);
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+static u32
|
||||
+mtk_wed_mcu_read_fw_dl(struct mtk_wed_wo *wo)
|
||||
+{
|
||||
+ return wed_r32(wo->hw->wed_dev,
|
||||
+ MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL);
|
||||
+}
|
||||
+
|
||||
+int mtk_wed_mcu_init(struct mtk_wed_wo *wo)
|
||||
+{
|
||||
+ u32 val;
|
||||
+ int ret;
|
||||
+
|
||||
+ skb_queue_head_init(&wo->mcu.res_q);
|
||||
+ init_waitqueue_head(&wo->mcu.wait);
|
||||
+ mutex_init(&wo->mcu.mutex);
|
||||
+
|
||||
+ ret = mtk_wed_mcu_load_firmware(wo);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+
|
||||
+ return readx_poll_timeout(mtk_wed_mcu_read_fw_dl, wo, val, !val,
|
||||
+ 100, MTK_FW_DL_TIMEOUT);
|
||||
+}
|
||||
+
|
||||
+MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
|
||||
+MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
|
||||
@@ -152,6 +152,7 @@ struct mtk_wdma_desc {
|
||||
|
||||
#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
|
||||
|
||||
+#define MTK_WED_SCR0 0x3c0
|
||||
#define MTK_WED_WPDMA_INT_TRIGGER 0x504
|
||||
#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
|
||||
#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
|
||||
--- /dev/null
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
|
||||
@@ -0,0 +1,150 @@
|
||||
+/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
+/* Copyright (C) 2022 Lorenzo Bianconi <lorenzo@kernel.org> */
|
||||
+
|
||||
+#ifndef __MTK_WED_WO_H
|
||||
+#define __MTK_WED_WO_H
|
||||
+
|
||||
+#include <linux/skbuff.h>
|
||||
+#include <linux/netdevice.h>
|
||||
+
|
||||
+struct mtk_wed_hw;
|
||||
+
|
||||
+struct mtk_wed_mcu_hdr {
|
||||
+ /* DW0 */
|
||||
+ u8 version;
|
||||
+ u8 cmd;
|
||||
+ __le16 length;
|
||||
+
|
||||
+ /* DW1 */
|
||||
+ __le16 seq;
|
||||
+ __le16 flag;
|
||||
+
|
||||
+ /* DW2 */
|
||||
+ __le32 status;
|
||||
+
|
||||
+ /* DW3 */
|
||||
+ u8 rsv[20];
|
||||
+};
|
||||
+
|
||||
+struct mtk_wed_wo_log_info {
|
||||
+ __le32 sn;
|
||||
+ __le32 total;
|
||||
+ __le32 rro;
|
||||
+ __le32 mod;
|
||||
+};
|
||||
+
|
||||
+enum mtk_wed_wo_event {
|
||||
+ MTK_WED_WO_EVT_LOG_DUMP = 0x1,
|
||||
+ MTK_WED_WO_EVT_PROFILING = 0x2,
|
||||
+ MTK_WED_WO_EVT_RXCNT_INFO = 0x3,
|
||||
+};
|
||||
+
|
||||
+#define MTK_WED_MODULE_ID_WO 1
|
||||
+#define MTK_FW_DL_TIMEOUT 4000000 /* us */
|
||||
+#define MTK_WOCPU_TIMEOUT 2000000 /* us */
|
||||
+
|
||||
+enum {
|
||||
+ MTK_WED_WARP_CMD_FLAG_RSP = BIT(0),
|
||||
+ MTK_WED_WARP_CMD_FLAG_NEED_RSP = BIT(1),
|
||||
+ MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2),
|
||||
+};
|
||||
+
|
||||
+enum {
|
||||
+ MTK_WED_WO_REGION_EMI,
|
||||
+ MTK_WED_WO_REGION_ILM,
|
||||
+ MTK_WED_WO_REGION_DATA,
|
||||
+ MTK_WED_WO_REGION_BOOT,
|
||||
+ __MTK_WED_WO_REGION_MAX,
|
||||
+};
|
||||
+
|
||||
+enum mtk_wed_dummy_cr_idx {
|
||||
+ MTK_WED_DUMMY_CR_FWDL,
|
||||
+ MTK_WED_DUMMY_CR_WO_STATUS,
|
||||
+};
|
||||
+
|
||||
+#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
|
||||
+#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
|
||||
+
|
||||
+#define MTK_WO_MCU_CFG_LS_BASE 0
|
||||
+#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
|
||||
+#define MTK_WO_MCU_CFG_LS_FW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x004)
|
||||
+#define MTK_WO_MCU_CFG_LS_CFG_DBG1_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x00c)
|
||||
+#define MTK_WO_MCU_CFG_LS_CFG_DBG2_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x010)
|
||||
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x014)
|
||||
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_SET_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x018)
|
||||
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x01c)
|
||||
+#define MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x050)
|
||||
+#define MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x060)
|
||||
+#define MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x064)
|
||||
+
|
||||
+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
|
||||
+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
|
||||
+
|
||||
+struct mtk_wed_wo_memory_region {
|
||||
+ const char *name;
|
||||
+ void __iomem *addr;
|
||||
+ phys_addr_t phy_addr;
|
||||
+ u32 size;
|
||||
+ bool shared:1;
|
||||
+ bool consumed:1;
|
||||
+};
|
||||
+
|
||||
+struct mtk_wed_fw_region {
|
||||
+ __le32 decomp_crc;
|
||||
+ __le32 decomp_len;
|
||||
+ __le32 decomp_blk_sz;
|
||||
+ u8 rsv0[4];
|
||||
+ __le32 addr;
|
||||
+ __le32 len;
|
||||
+ u8 feature_set;
|
||||
+ u8 rsv1[15];
|
||||
+} __packed;
|
||||
+
|
||||
+struct mtk_wed_fw_trailer {
|
||||
+ u8 chip_id;
|
||||
+ u8 eco_code;
|
||||
+ u8 num_region;
|
||||
+ u8 format_ver;
|
||||
+ u8 format_flag;
|
||||
+ u8 rsv[2];
|
||||
+ char fw_ver[10];
|
||||
+ char build_date[15];
|
||||
+ u32 crc;
|
||||
+};
|
||||
+
|
||||
+struct mtk_wed_wo {
|
||||
+ struct mtk_wed_hw *hw;
|
||||
+ struct mtk_wed_wo_memory_region boot;
|
||||
+
|
||||
+ struct {
|
||||
+ struct mutex mutex;
|
||||
+ int timeout;
|
||||
+ u16 seq;
|
||||
+
|
||||
+ struct sk_buff_head res_q;
|
||||
+ wait_queue_head_t wait;
|
||||
+ } mcu;
|
||||
+};
|
||||
+
|
||||
+static inline int
|
||||
+mtk_wed_mcu_check_msg(struct mtk_wed_wo *wo, struct sk_buff *skb)
|
||||
+{
|
||||
+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
|
||||
+
|
||||
+ if (hdr->version)
|
||||
+ return -EINVAL;
|
||||
+
|
||||
+ if (skb->len < sizeof(*hdr) || skb->len != le16_to_cpu(hdr->length))
|
||||
+ return -EINVAL;
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
|
||||
+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
|
||||
+ struct sk_buff *skb);
|
||||
+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
|
||||
+ const void *data, int len, bool wait_resp);
|
||||
+int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
|
||||
+
|
||||
+#endif /* __MTK_WED_WO_H */
|
||||
--- a/include/linux/soc/mediatek/mtk_wed.h
|
||||
+++ b/include/linux/soc/mediatek/mtk_wed.h
|
||||
@@ -11,6 +11,35 @@
|
||||
struct mtk_wed_hw;
|
||||
struct mtk_wdma_desc;
|
||||
|
||||
+enum mtk_wed_wo_cmd {
|
||||
+ MTK_WED_WO_CMD_WED_CFG,
|
||||
+ MTK_WED_WO_CMD_WED_RX_STAT,
|
||||
+ MTK_WED_WO_CMD_RRO_SER,
|
||||
+ MTK_WED_WO_CMD_DBG_INFO,
|
||||
+ MTK_WED_WO_CMD_DEV_INFO,
|
||||
+ MTK_WED_WO_CMD_BSS_INFO,
|
||||
+ MTK_WED_WO_CMD_STA_REC,
|
||||
+ MTK_WED_WO_CMD_DEV_INFO_DUMP,
|
||||
+ MTK_WED_WO_CMD_BSS_INFO_DUMP,
|
||||
+ MTK_WED_WO_CMD_STA_REC_DUMP,
|
||||
+ MTK_WED_WO_CMD_BA_INFO_DUMP,
|
||||
+ MTK_WED_WO_CMD_FBCMD_Q_DUMP,
|
||||
+ MTK_WED_WO_CMD_FW_LOG_CTRL,
|
||||
+ MTK_WED_WO_CMD_LOG_FLUSH,
|
||||
+ MTK_WED_WO_CMD_CHANGE_STATE,
|
||||
+ MTK_WED_WO_CMD_CPU_STATS_ENABLE,
|
||||
+ MTK_WED_WO_CMD_CPU_STATS_DUMP,
|
||||
+ MTK_WED_WO_CMD_EXCEPTION_INIT,
|
||||
+ MTK_WED_WO_CMD_PROF_CTRL,
|
||||
+ MTK_WED_WO_CMD_STA_BA_DUMP,
|
||||
+ MTK_WED_WO_CMD_BA_CTRL_DUMP,
|
||||
+ MTK_WED_WO_CMD_RXCNT_CTRL,
|
||||
+ MTK_WED_WO_CMD_RXCNT_INFO,
|
||||
+ MTK_WED_WO_CMD_SET_CAP,
|
||||
+ MTK_WED_WO_CMD_CCIF_RING_DUMP,
|
||||
+ MTK_WED_WO_CMD_WED_END
|
||||
+};
|
||||
+
|
||||
enum mtk_wed_bus_tye {
|
||||
MTK_WED_BUS_PCIE,
|
||||
MTK_WED_BUS_AXI,
|
||||
@ -0,0 +1,737 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Sat, 5 Nov 2022 23:36:19 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: introduce wed wo support
|
||||
|
||||
Introduce WO chip support to mtk wed driver. MTK WED WO is used to
|
||||
implement RX Wireless Ethernet Dispatch and offload traffic received by
|
||||
wlan nic to the wired interface.
|
||||
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/Makefile
|
||||
+++ b/drivers/net/ethernet/mediatek/Makefile
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
|
||||
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
|
||||
-mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o
|
||||
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
|
||||
ifdef CONFIG_DEBUG_FS
|
||||
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
|
||||
endif
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -16,6 +16,7 @@
|
||||
#include "mtk_wed_regs.h"
|
||||
#include "mtk_wed.h"
|
||||
#include "mtk_ppe.h"
|
||||
+#include "mtk_wed_wo.h"
|
||||
|
||||
#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
|
||||
|
||||
@@ -355,6 +356,8 @@ mtk_wed_detach(struct mtk_wed_device *de
|
||||
|
||||
mtk_wed_free_buffer(dev);
|
||||
mtk_wed_free_tx_rings(dev);
|
||||
+ if (hw->version != 1)
|
||||
+ mtk_wed_wo_deinit(hw);
|
||||
|
||||
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
|
||||
struct device_node *wlan_node;
|
||||
@@ -878,9 +881,11 @@ mtk_wed_attach(struct mtk_wed_device *de
|
||||
}
|
||||
|
||||
mtk_wed_hw_init_early(dev);
|
||||
- if (hw->hifsys)
|
||||
+ if (hw->version == 1)
|
||||
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
|
||||
BIT(hw->index), 0);
|
||||
+ else
|
||||
+ ret = mtk_wed_wo_init(hw);
|
||||
|
||||
out:
|
||||
mutex_unlock(&hw_lock);
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
struct mtk_eth;
|
||||
+struct mtk_wed_wo;
|
||||
|
||||
struct mtk_wed_hw {
|
||||
struct device_node *node;
|
||||
@@ -22,6 +23,7 @@ struct mtk_wed_hw {
|
||||
struct regmap *mirror;
|
||||
struct dentry *debugfs_dir;
|
||||
struct mtk_wed_device *wed_dev;
|
||||
+ struct mtk_wed_wo *wed_wo;
|
||||
u32 debugfs_reg;
|
||||
u32 num_flows;
|
||||
u8 version;
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
|
||||
@@ -122,8 +122,7 @@ mtk_wed_mcu_skb_send_msg(struct mtk_wed_
|
||||
if (id == MTK_WED_MODULE_ID_WO)
|
||||
hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
|
||||
|
||||
- dev_kfree_skb(skb);
|
||||
- return 0;
|
||||
+ return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
|
||||
}
|
||||
|
||||
static int
|
||||
--- /dev/null
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
||||
@@ -0,0 +1,508 @@
|
||||
+// SPDX-License-Identifier: GPL-2.0-only
|
||||
+/* Copyright (C) 2022 MediaTek Inc.
|
||||
+ *
|
||||
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
+ * Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
+ */
|
||||
+
|
||||
+#include <linux/kernel.h>
|
||||
+#include <linux/dma-mapping.h>
|
||||
+#include <linux/of_platform.h>
|
||||
+#include <linux/interrupt.h>
|
||||
+#include <linux/of_address.h>
|
||||
+#include <linux/mfd/syscon.h>
|
||||
+#include <linux/of_irq.h>
|
||||
+#include <linux/bitfield.h>
|
||||
+
|
||||
+#include "mtk_wed.h"
|
||||
+#include "mtk_wed_regs.h"
|
||||
+#include "mtk_wed_wo.h"
|
||||
+
|
||||
+static u32
|
||||
+mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
|
||||
+{
|
||||
+ u32 val;
|
||||
+
|
||||
+ if (regmap_read(wo->mmio.regs, reg, &val))
|
||||
+ val = ~0;
|
||||
+
|
||||
+ return val;
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
|
||||
+{
|
||||
+ regmap_write(wo->mmio.regs, reg, val);
|
||||
+}
|
||||
+
|
||||
+static u32
|
||||
+mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
|
||||
+{
|
||||
+ u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
|
||||
+
|
||||
+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
|
||||
+{
|
||||
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
|
||||
+{
|
||||
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
|
||||
+{
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ spin_lock_irqsave(&wo->mmio.lock, flags);
|
||||
+ wo->mmio.irq_mask &= ~mask;
|
||||
+ wo->mmio.irq_mask |= val;
|
||||
+ if (set)
|
||||
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
|
||||
+ spin_unlock_irqrestore(&wo->mmio.lock, flags);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
|
||||
+{
|
||||
+ mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
|
||||
+ tasklet_schedule(&wo->mmio.irq_tasklet);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
|
||||
+{
|
||||
+ mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
|
||||
+{
|
||||
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
|
||||
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
|
||||
+ u32 val)
|
||||
+{
|
||||
+ wmb();
|
||||
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
|
||||
+}
|
||||
+
|
||||
+static void *
|
||||
+mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
|
||||
+ bool flush)
|
||||
+{
|
||||
+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
|
||||
+ int index = (q->tail + 1) % q->n_desc;
|
||||
+ struct mtk_wed_wo_queue_entry *entry;
|
||||
+ struct mtk_wed_wo_queue_desc *desc;
|
||||
+ void *buf;
|
||||
+
|
||||
+ if (!q->queued)
|
||||
+ return NULL;
|
||||
+
|
||||
+ if (flush)
|
||||
+ q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
|
||||
+ else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
|
||||
+ return NULL;
|
||||
+
|
||||
+ q->tail = index;
|
||||
+ q->queued--;
|
||||
+
|
||||
+ desc = &q->desc[index];
|
||||
+ entry = &q->entry[index];
|
||||
+ buf = entry->buf;
|
||||
+ if (len)
|
||||
+ *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
|
||||
+ le32_to_cpu(READ_ONCE(desc->ctrl)));
|
||||
+ if (buf)
|
||||
+ dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
|
||||
+ DMA_FROM_DEVICE);
|
||||
+ entry->buf = NULL;
|
||||
+
|
||||
+ return buf;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
|
||||
+ gfp_t gfp, bool rx)
|
||||
+{
|
||||
+ enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
+ int n_buf = 0;
|
||||
+
|
||||
+ spin_lock_bh(&q->lock);
|
||||
+ while (q->queued < q->n_desc) {
|
||||
+ void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
|
||||
+ struct mtk_wed_wo_queue_entry *entry;
|
||||
+ dma_addr_t addr;
|
||||
+
|
||||
+ if (!buf)
|
||||
+ break;
|
||||
+
|
||||
+ addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
|
||||
+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
|
||||
+ skb_free_frag(buf);
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
+ q->head = (q->head + 1) % q->n_desc;
|
||||
+ entry = &q->entry[q->head];
|
||||
+ entry->addr = addr;
|
||||
+ entry->len = q->buf_size;
|
||||
+ q->entry[q->head].buf = buf;
|
||||
+
|
||||
+ if (rx) {
|
||||
+ struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
|
||||
+ u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
|
||||
+ FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
|
||||
+ entry->len);
|
||||
+
|
||||
+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
|
||||
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
|
||||
+ }
|
||||
+ q->queued++;
|
||||
+ n_buf++;
|
||||
+ }
|
||||
+ spin_unlock_bh(&q->lock);
|
||||
+
|
||||
+ return n_buf;
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
|
||||
+{
|
||||
+ mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
|
||||
+ mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
||||
+{
|
||||
+ for (;;) {
|
||||
+ struct mtk_wed_mcu_hdr *hdr;
|
||||
+ struct sk_buff *skb;
|
||||
+ void *data;
|
||||
+ u32 len;
|
||||
+
|
||||
+ data = mtk_wed_wo_dequeue(wo, q, &len, false);
|
||||
+ if (!data)
|
||||
+ break;
|
||||
+
|
||||
+ skb = build_skb(data, q->buf_size);
|
||||
+ if (!skb) {
|
||||
+ skb_free_frag(data);
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ __skb_put(skb, len);
|
||||
+ if (mtk_wed_mcu_check_msg(wo, skb)) {
|
||||
+ dev_kfree_skb(skb);
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ hdr = (struct mtk_wed_mcu_hdr *)skb->data;
|
||||
+ if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
|
||||
+ mtk_wed_mcu_rx_event(wo, skb);
|
||||
+ else
|
||||
+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
|
||||
+ }
|
||||
+
|
||||
+ if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
|
||||
+ u32 index = (q->head - 1) % q->n_desc;
|
||||
+
|
||||
+ mtk_wed_wo_queue_kick(wo, q, index);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static irqreturn_t
|
||||
+mtk_wed_wo_irq_handler(int irq, void *data)
|
||||
+{
|
||||
+ struct mtk_wed_wo *wo = data;
|
||||
+
|
||||
+ mtk_wed_wo_set_isr(wo, 0);
|
||||
+ tasklet_schedule(&wo->mmio.irq_tasklet);
|
||||
+
|
||||
+ return IRQ_HANDLED;
|
||||
+}
|
||||
+
|
||||
+static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
|
||||
+{
|
||||
+ struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
|
||||
+ u32 intr, mask;
|
||||
+
|
||||
+ /* disable interrupts */
|
||||
+ mtk_wed_wo_set_isr(wo, 0);
|
||||
+
|
||||
+ intr = mtk_wed_wo_get_isr(wo);
|
||||
+ intr &= wo->mmio.irq_mask;
|
||||
+ mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
|
||||
+ mtk_wed_wo_irq_disable(wo, mask);
|
||||
+
|
||||
+ if (intr & MTK_WED_WO_RXCH_INT_MASK) {
|
||||
+ mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
|
||||
+ mtk_wed_wo_rx_complete(wo);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+/* mtk wed wo hw queues */
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
|
||||
+ int n_desc, int buf_size, int index,
|
||||
+ struct mtk_wed_wo_queue_regs *regs)
|
||||
+{
|
||||
+ spin_lock_init(&q->lock);
|
||||
+ q->regs = *regs;
|
||||
+ q->n_desc = n_desc;
|
||||
+ q->buf_size = buf_size;
|
||||
+
|
||||
+ q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
|
||||
+ &q->desc_dma, GFP_KERNEL);
|
||||
+ if (!q->desc)
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
|
||||
+ GFP_KERNEL);
|
||||
+ if (!q->entry)
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
||||
+{
|
||||
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
|
||||
+ dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
|
||||
+ q->desc_dma);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
||||
+{
|
||||
+ struct page *page;
|
||||
+ int i;
|
||||
+
|
||||
+ spin_lock_bh(&q->lock);
|
||||
+ for (i = 0; i < q->n_desc; i++) {
|
||||
+ struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
|
||||
+
|
||||
+ dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
|
||||
+ DMA_TO_DEVICE);
|
||||
+ skb_free_frag(entry->buf);
|
||||
+ entry->buf = NULL;
|
||||
+ }
|
||||
+ spin_unlock_bh(&q->lock);
|
||||
+
|
||||
+ if (!q->cache.va)
|
||||
+ return;
|
||||
+
|
||||
+ page = virt_to_page(q->cache.va);
|
||||
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
|
||||
+ memset(&q->cache, 0, sizeof(q->cache));
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
||||
+{
|
||||
+ struct page *page;
|
||||
+
|
||||
+ spin_lock_bh(&q->lock);
|
||||
+ for (;;) {
|
||||
+ void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
|
||||
+
|
||||
+ if (!buf)
|
||||
+ break;
|
||||
+
|
||||
+ skb_free_frag(buf);
|
||||
+ }
|
||||
+ spin_unlock_bh(&q->lock);
|
||||
+
|
||||
+ if (!q->cache.va)
|
||||
+ return;
|
||||
+
|
||||
+ page = virt_to_page(q->cache.va);
|
||||
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
|
||||
+ memset(&q->cache, 0, sizeof(q->cache));
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
||||
+{
|
||||
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
|
||||
+ mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
|
||||
+ mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
|
||||
+}
|
||||
+
|
||||
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
|
||||
+ struct sk_buff *skb)
|
||||
+{
|
||||
+ struct mtk_wed_wo_queue_entry *entry;
|
||||
+ struct mtk_wed_wo_queue_desc *desc;
|
||||
+ int ret = 0, index;
|
||||
+ u32 ctrl;
|
||||
+
|
||||
+ spin_lock_bh(&q->lock);
|
||||
+
|
||||
+ q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
|
||||
+ index = (q->head + 1) % q->n_desc;
|
||||
+ if (q->tail == index) {
|
||||
+ ret = -ENOMEM;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ entry = &q->entry[index];
|
||||
+ if (skb->len > entry->len) {
|
||||
+ ret = -ENOMEM;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ desc = &q->desc[index];
|
||||
+ q->head = index;
|
||||
+
|
||||
+ dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
|
||||
+ DMA_TO_DEVICE);
|
||||
+ memcpy(entry->buf, skb->data, skb->len);
|
||||
+ dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
|
||||
+ DMA_TO_DEVICE);
|
||||
+
|
||||
+ ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
|
||||
+ MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
|
||||
+ WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
|
||||
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
|
||||
+
|
||||
+ mtk_wed_wo_queue_kick(wo, q, q->head);
|
||||
+ mtk_wed_wo_kickout(wo);
|
||||
+out:
|
||||
+ spin_unlock_bh(&q->lock);
|
||||
+
|
||||
+ dev_kfree_skb(skb);
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
|
||||
+{
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
|
||||
+{
|
||||
+ struct mtk_wed_wo_queue_regs regs;
|
||||
+ struct device_node *np;
|
||||
+ int ret;
|
||||
+
|
||||
+ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
|
||||
+ if (!np)
|
||||
+ return -ENODEV;
|
||||
+
|
||||
+ wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
|
||||
+ if (IS_ERR_OR_NULL(wo->mmio.regs))
|
||||
+ return PTR_ERR(wo->mmio.regs);
|
||||
+
|
||||
+ wo->mmio.irq = irq_of_parse_and_map(np, 0);
|
||||
+ wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
|
||||
+ spin_lock_init(&wo->mmio.lock);
|
||||
+ tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
|
||||
+
|
||||
+ ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
|
||||
+ mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
|
||||
+ KBUILD_MODNAME, wo);
|
||||
+ if (ret)
|
||||
+ goto error;
|
||||
+
|
||||
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
|
||||
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
|
||||
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
|
||||
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
|
||||
+
|
||||
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
|
||||
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
|
||||
+ ®s);
|
||||
+ if (ret)
|
||||
+ goto error;
|
||||
+
|
||||
+ mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
|
||||
+ mtk_wed_wo_queue_reset(wo, &wo->q_tx);
|
||||
+
|
||||
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
|
||||
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
|
||||
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
|
||||
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
|
||||
+
|
||||
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
|
||||
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
|
||||
+ ®s);
|
||||
+ if (ret)
|
||||
+ goto error;
|
||||
+
|
||||
+ mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
|
||||
+ mtk_wed_wo_queue_reset(wo, &wo->q_rx);
|
||||
+
|
||||
+ /* rx queue irqmask */
|
||||
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
|
||||
+
|
||||
+ return 0;
|
||||
+
|
||||
+error:
|
||||
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
|
||||
+{
|
||||
+ /* disable interrupts */
|
||||
+ mtk_wed_wo_set_isr(wo, 0);
|
||||
+
|
||||
+ tasklet_disable(&wo->mmio.irq_tasklet);
|
||||
+
|
||||
+ disable_irq(wo->mmio.irq);
|
||||
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
|
||||
+
|
||||
+ mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
|
||||
+ mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
|
||||
+ mtk_wed_wo_queue_free(wo, &wo->q_tx);
|
||||
+ mtk_wed_wo_queue_free(wo, &wo->q_rx);
|
||||
+}
|
||||
+
|
||||
+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
|
||||
+{
|
||||
+ struct mtk_wed_wo *wo;
|
||||
+ int ret;
|
||||
+
|
||||
+ wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
|
||||
+ if (!wo)
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ hw->wed_wo = wo;
|
||||
+ wo->hw = hw;
|
||||
+
|
||||
+ ret = mtk_wed_wo_hardware_init(wo);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+
|
||||
+ ret = mtk_wed_mcu_init(wo);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+
|
||||
+ return mtk_wed_wo_exception_init(wo);
|
||||
+}
|
||||
+
|
||||
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
|
||||
+{
|
||||
+ struct mtk_wed_wo *wo = hw->wed_wo;
|
||||
+
|
||||
+ mtk_wed_wo_hw_deinit(wo);
|
||||
+}
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
|
||||
@@ -80,6 +80,54 @@ enum mtk_wed_dummy_cr_idx {
|
||||
#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
|
||||
#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
|
||||
|
||||
+#define MTK_WED_WO_RING_SIZE 256
|
||||
+#define MTK_WED_WO_CMD_LEN 1504
|
||||
+
|
||||
+#define MTK_WED_WO_TXCH_NUM 0
|
||||
+#define MTK_WED_WO_RXCH_NUM 1
|
||||
+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
|
||||
+
|
||||
+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
|
||||
+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
|
||||
+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
|
||||
+#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
|
||||
+ MTK_WED_WO_EXCEPTION_INT_MASK)
|
||||
+
|
||||
+#define MTK_WED_WO_CCIF_BUSY 0x004
|
||||
+#define MTK_WED_WO_CCIF_START 0x008
|
||||
+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
|
||||
+#define MTK_WED_WO_CCIF_RCHNUM 0x010
|
||||
+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
|
||||
+
|
||||
+#define MTK_WED_WO_CCIF_ACK 0x014
|
||||
+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
|
||||
+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
|
||||
+#define MTK_WED_WO_CCIF_DUMMY1 0x020
|
||||
+#define MTK_WED_WO_CCIF_DUMMY2 0x024
|
||||
+#define MTK_WED_WO_CCIF_DUMMY3 0x028
|
||||
+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
|
||||
+#define MTK_WED_WO_CCIF_SHADOW1 0x030
|
||||
+#define MTK_WED_WO_CCIF_SHADOW2 0x034
|
||||
+#define MTK_WED_WO_CCIF_SHADOW3 0x038
|
||||
+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
|
||||
+#define MTK_WED_WO_CCIF_DUMMY5 0x050
|
||||
+#define MTK_WED_WO_CCIF_DUMMY6 0x054
|
||||
+#define MTK_WED_WO_CCIF_DUMMY7 0x058
|
||||
+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
|
||||
+#define MTK_WED_WO_CCIF_SHADOW5 0x060
|
||||
+#define MTK_WED_WO_CCIF_SHADOW6 0x064
|
||||
+#define MTK_WED_WO_CCIF_SHADOW7 0x068
|
||||
+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
|
||||
+
|
||||
+#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
|
||||
+#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
|
||||
+#define MTK_WED_WO_CTL_BURST BIT(15)
|
||||
+#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
|
||||
+#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
|
||||
+#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
|
||||
+#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
|
||||
+#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
|
||||
+
|
||||
struct mtk_wed_wo_memory_region {
|
||||
const char *name;
|
||||
void __iomem *addr;
|
||||
@@ -112,10 +160,53 @@ struct mtk_wed_fw_trailer {
|
||||
u32 crc;
|
||||
};
|
||||
|
||||
+struct mtk_wed_wo_queue_regs {
|
||||
+ u32 desc_base;
|
||||
+ u32 ring_size;
|
||||
+ u32 cpu_idx;
|
||||
+ u32 dma_idx;
|
||||
+};
|
||||
+
|
||||
+struct mtk_wed_wo_queue_desc {
|
||||
+ __le32 buf0;
|
||||
+ __le32 ctrl;
|
||||
+ __le32 buf1;
|
||||
+ __le32 info;
|
||||
+ __le32 reserved[4];
|
||||
+} __packed __aligned(32);
|
||||
+
|
||||
+struct mtk_wed_wo_queue_entry {
|
||||
+ dma_addr_t addr;
|
||||
+ void *buf;
|
||||
+ u32 len;
|
||||
+};
|
||||
+
|
||||
+struct mtk_wed_wo_queue {
|
||||
+ struct mtk_wed_wo_queue_regs regs;
|
||||
+
|
||||
+ struct page_frag_cache cache;
|
||||
+ spinlock_t lock;
|
||||
+
|
||||
+ struct mtk_wed_wo_queue_desc *desc;
|
||||
+ dma_addr_t desc_dma;
|
||||
+
|
||||
+ struct mtk_wed_wo_queue_entry *entry;
|
||||
+
|
||||
+ u16 head;
|
||||
+ u16 tail;
|
||||
+ int n_desc;
|
||||
+ int queued;
|
||||
+ int buf_size;
|
||||
+
|
||||
+};
|
||||
+
|
||||
struct mtk_wed_wo {
|
||||
struct mtk_wed_hw *hw;
|
||||
struct mtk_wed_wo_memory_region boot;
|
||||
|
||||
+ struct mtk_wed_wo_queue q_tx;
|
||||
+ struct mtk_wed_wo_queue q_rx;
|
||||
+
|
||||
struct {
|
||||
struct mutex mutex;
|
||||
int timeout;
|
||||
@@ -124,6 +215,15 @@ struct mtk_wed_wo {
|
||||
struct sk_buff_head res_q;
|
||||
wait_queue_head_t wait;
|
||||
} mcu;
|
||||
+
|
||||
+ struct {
|
||||
+ struct regmap *regs;
|
||||
+
|
||||
+ spinlock_t lock;
|
||||
+ struct tasklet_struct irq_tasklet;
|
||||
+ int irq;
|
||||
+ u32 irq_mask;
|
||||
+ } mmio;
|
||||
};
|
||||
|
||||
static inline int
|
||||
@@ -146,5 +246,9 @@ void mtk_wed_mcu_rx_unsolicited_event(st
|
||||
int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
|
||||
const void *data, int len, bool wait_resp);
|
||||
int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
|
||||
+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
|
||||
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
|
||||
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
|
||||
+ struct sk_buff *skb);
|
||||
|
||||
#endif /* __MTK_WED_WO_H */
|
||||
@ -0,0 +1,79 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Sat, 5 Nov 2022 23:36:20 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: rename tx_wdma array in rx_wdma
|
||||
|
||||
Rename tx_wdma queue array in rx_wdma since this is rx side of wdma soc.
|
||||
Moreover rename mtk_wed_wdma_ring_setup routine in
|
||||
mtk_wed_wdma_rx_ring_setup()
|
||||
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -253,8 +253,8 @@ mtk_wed_free_tx_rings(struct mtk_wed_dev
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
|
||||
mtk_wed_free_ring(dev, &dev->tx_ring[i]);
|
||||
- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
|
||||
- mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
|
||||
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
|
||||
+ mtk_wed_free_ring(dev, &dev->rx_wdma[i]);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -688,10 +688,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
|
||||
}
|
||||
|
||||
static int
|
||||
-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
|
||||
+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
|
||||
{
|
||||
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
|
||||
- struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
|
||||
+ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx];
|
||||
|
||||
if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
|
||||
return -ENOMEM;
|
||||
@@ -805,9 +805,9 @@ mtk_wed_start(struct mtk_wed_device *dev
|
||||
{
|
||||
int i;
|
||||
|
||||
- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
|
||||
- if (!dev->tx_wdma[i].desc)
|
||||
- mtk_wed_wdma_ring_setup(dev, i, 16);
|
||||
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
|
||||
+ if (!dev->rx_wdma[i].desc)
|
||||
+ mtk_wed_wdma_rx_ring_setup(dev, i, 16);
|
||||
|
||||
mtk_wed_hw_init(dev);
|
||||
mtk_wed_configure_irq(dev, irq_mask);
|
||||
@@ -916,7 +916,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
|
||||
sizeof(*ring->desc)))
|
||||
return -ENOMEM;
|
||||
|
||||
- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
|
||||
+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
|
||||
return -ENOMEM;
|
||||
|
||||
ring->reg_base = MTK_WED_RING_TX(idx);
|
||||
--- a/include/linux/soc/mediatek/mtk_wed.h
|
||||
+++ b/include/linux/soc/mediatek/mtk_wed.h
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
#define MTK_WED_TX_QUEUES 2
|
||||
+#define MTK_WED_RX_QUEUES 2
|
||||
|
||||
struct mtk_wed_hw;
|
||||
struct mtk_wdma_desc;
|
||||
@@ -66,7 +67,7 @@ struct mtk_wed_device {
|
||||
|
||||
struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
|
||||
struct mtk_wed_ring txfree_ring;
|
||||
- struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
|
||||
+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
|
||||
|
||||
struct {
|
||||
int size;
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,149 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Sat, 5 Nov 2022 23:36:22 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: add rx mib counters
|
||||
|
||||
Introduce WED RX MIB counters support available on MT7986a SoC.
|
||||
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
|
||||
@@ -2,6 +2,7 @@
|
||||
/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
|
||||
|
||||
#include <linux/seq_file.h>
|
||||
+#include <linux/soc/mediatek/mtk_wed.h>
|
||||
#include "mtk_wed.h"
|
||||
#include "mtk_wed_regs.h"
|
||||
|
||||
@@ -18,6 +19,8 @@ enum {
|
||||
DUMP_TYPE_WDMA,
|
||||
DUMP_TYPE_WPDMA_TX,
|
||||
DUMP_TYPE_WPDMA_TXFREE,
|
||||
+ DUMP_TYPE_WPDMA_RX,
|
||||
+ DUMP_TYPE_WED_RRO,
|
||||
};
|
||||
|
||||
#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
|
||||
@@ -36,6 +39,9 @@ enum {
|
||||
|
||||
#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
|
||||
#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
|
||||
+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
|
||||
+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
|
||||
+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
|
||||
|
||||
static void
|
||||
print_reg_val(struct seq_file *s, const char *name, u32 val)
|
||||
@@ -57,6 +63,7 @@ dump_wed_regs(struct seq_file *s, struct
|
||||
cur > regs ? "\n" : "",
|
||||
cur->name);
|
||||
continue;
|
||||
+ case DUMP_TYPE_WED_RRO:
|
||||
case DUMP_TYPE_WED:
|
||||
val = wed_r32(dev, cur->offset);
|
||||
break;
|
||||
@@ -69,6 +76,9 @@ dump_wed_regs(struct seq_file *s, struct
|
||||
case DUMP_TYPE_WPDMA_TXFREE:
|
||||
val = wpdma_txfree_r32(dev, cur->offset);
|
||||
break;
|
||||
+ case DUMP_TYPE_WPDMA_RX:
|
||||
+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
|
||||
+ break;
|
||||
}
|
||||
print_reg_val(s, cur->name, val);
|
||||
}
|
||||
@@ -132,6 +142,80 @@ wed_txinfo_show(struct seq_file *s, void
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
|
||||
|
||||
+static int
|
||||
+wed_rxinfo_show(struct seq_file *s, void *data)
|
||||
+{
|
||||
+ static const struct reg_dump regs[] = {
|
||||
+ DUMP_STR("WPDMA RX"),
|
||||
+ DUMP_WPDMA_RX_RING(0),
|
||||
+ DUMP_WPDMA_RX_RING(1),
|
||||
+
|
||||
+ DUMP_STR("WPDMA RX"),
|
||||
+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
|
||||
+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
|
||||
+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
|
||||
+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
|
||||
+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
|
||||
+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
|
||||
+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
|
||||
+
|
||||
+ DUMP_STR("WED RX"),
|
||||
+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
|
||||
+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
|
||||
+
|
||||
+ DUMP_STR("WED RRO"),
|
||||
+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
|
||||
+ DUMP_WED(WED_RROQM_MID_MIB),
|
||||
+ DUMP_WED(WED_RROQM_MOD_MIB),
|
||||
+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
|
||||
+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
|
||||
+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
|
||||
+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
|
||||
+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
|
||||
+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
|
||||
+
|
||||
+ DUMP_STR("WED Route QM"),
|
||||
+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
|
||||
+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
|
||||
+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
|
||||
+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
|
||||
+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
|
||||
+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
|
||||
+ DUMP_WED(WED_RTQM_Q2N_MIB),
|
||||
+ DUMP_WED(WED_RTQM_Q2B_MIB),
|
||||
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
|
||||
+
|
||||
+ DUMP_STR("WED WDMA TX"),
|
||||
+ DUMP_WED(WED_WDMA_TX_MIB),
|
||||
+ DUMP_WED_RING(WED_WDMA_RING_TX),
|
||||
+
|
||||
+ DUMP_STR("WDMA TX"),
|
||||
+ DUMP_WDMA(WDMA_GLO_CFG),
|
||||
+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
|
||||
+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
|
||||
+
|
||||
+ DUMP_STR("WED RX BM"),
|
||||
+ DUMP_WED(WED_RX_BM_BASE),
|
||||
+ DUMP_WED(WED_RX_BM_RX_DMAD),
|
||||
+ DUMP_WED(WED_RX_BM_PTR),
|
||||
+ DUMP_WED(WED_RX_BM_TKID_MIB),
|
||||
+ DUMP_WED(WED_RX_BM_BLEN),
|
||||
+ DUMP_WED(WED_RX_BM_STS),
|
||||
+ DUMP_WED(WED_RX_BM_INTF2),
|
||||
+ DUMP_WED(WED_RX_BM_INTF),
|
||||
+ DUMP_WED(WED_RX_BM_ERR_STS),
|
||||
+ };
|
||||
+ struct mtk_wed_hw *hw = s->private;
|
||||
+ struct mtk_wed_device *dev = hw->wed_dev;
|
||||
+
|
||||
+ if (!dev)
|
||||
+ return 0;
|
||||
+
|
||||
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
|
||||
|
||||
static int
|
||||
mtk_wed_reg_set(void *data, u64 val)
|
||||
@@ -175,4 +259,7 @@ void mtk_wed_hw_add_debugfs(struct mtk_w
|
||||
debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
|
||||
debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
|
||||
debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
|
||||
+ if (hw->version != 1)
|
||||
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
|
||||
+ &wed_rxinfo_fops);
|
||||
}
|
||||
@ -0,0 +1,36 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Thu, 17 Nov 2022 00:58:46 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: remove cpu_relax in
|
||||
mtk_pending_work
|
||||
|
||||
Get rid of cpu_relax in mtk_pending_work routine since MTK_RESETTING is
|
||||
set only in mtk_pending_work() and it runs holding rtnl lock
|
||||
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -3495,11 +3495,8 @@ static void mtk_pending_work(struct work
|
||||
rtnl_lock();
|
||||
|
||||
dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
|
||||
+ set_bit(MTK_RESETTING, ð->state);
|
||||
|
||||
- while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
|
||||
- cpu_relax();
|
||||
-
|
||||
- dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
|
||||
/* stop all devices to make sure that dma is properly shut down */
|
||||
for (i = 0; i < MTK_MAC_COUNT; i++) {
|
||||
if (!eth->netdev[i])
|
||||
@@ -3533,7 +3530,7 @@ static void mtk_pending_work(struct work
|
||||
|
||||
dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
|
||||
|
||||
- clear_bit_unlock(MTK_RESETTING, ð->state);
|
||||
+ clear_bit(MTK_RESETTING, ð->state);
|
||||
|
||||
rtnl_unlock();
|
||||
}
|
||||
@ -0,0 +1,80 @@
|
||||
From: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Date: Thu, 24 Nov 2022 11:18:14 +0800
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: add wcid overwritten support for wed
|
||||
v1
|
||||
|
||||
All wed versions should enable the wcid overwritten feature,
|
||||
since the wcid size is controlled by the wlan driver.
|
||||
|
||||
Tested-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Co-developed-by: Bo Jiao <bo.jiao@mediatek.com>
|
||||
Signed-off-by: Bo Jiao <bo.jiao@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -526,9 +526,9 @@ mtk_wed_dma_disable(struct mtk_wed_devic
|
||||
MTK_WED_WPDMA_RX_D_RX_DRV_EN);
|
||||
wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
|
||||
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
|
||||
-
|
||||
- mtk_wed_set_512_support(dev, false);
|
||||
}
|
||||
+
|
||||
+ mtk_wed_set_512_support(dev, false);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1290,9 +1290,10 @@ mtk_wed_start(struct mtk_wed_device *dev
|
||||
if (mtk_wed_rro_cfg(dev))
|
||||
return;
|
||||
|
||||
- mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
|
||||
}
|
||||
|
||||
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
|
||||
+
|
||||
mtk_wed_dma_enable(dev);
|
||||
dev->running = true;
|
||||
}
|
||||
@@ -1358,11 +1359,13 @@ mtk_wed_attach(struct mtk_wed_device *de
|
||||
}
|
||||
|
||||
mtk_wed_hw_init_early(dev);
|
||||
- if (hw->version == 1)
|
||||
+ if (hw->version == 1) {
|
||||
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
|
||||
BIT(hw->index), 0);
|
||||
- else
|
||||
+ } else {
|
||||
+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
|
||||
ret = mtk_wed_wo_init(hw);
|
||||
+ }
|
||||
out:
|
||||
if (ret)
|
||||
mtk_wed_detach(dev);
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
|
||||
@@ -20,6 +20,8 @@ struct mtk_wdma_desc {
|
||||
__le32 info;
|
||||
} __packed __aligned(4);
|
||||
|
||||
+#define MTK_WED_REV_ID 0x004
|
||||
+
|
||||
#define MTK_WED_RESET 0x008
|
||||
#define MTK_WED_RESET_TX_BM BIT(0)
|
||||
#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
|
||||
--- a/include/linux/soc/mediatek/mtk_wed.h
|
||||
+++ b/include/linux/soc/mediatek/mtk_wed.h
|
||||
@@ -85,6 +85,9 @@ struct mtk_wed_device {
|
||||
int irq;
|
||||
u8 version;
|
||||
|
||||
+ /* used by wlan driver */
|
||||
+ u32 rev_id;
|
||||
+
|
||||
struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
|
||||
struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
|
||||
struct mtk_wed_ring txfree_ring;
|
||||
@ -0,0 +1,85 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Thu, 24 Nov 2022 16:22:51 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: return status value in
|
||||
mtk_wdma_rx_reset
|
||||
|
||||
Move MTK_WDMA_RESET_IDX configuration in mtk_wdma_rx_reset routine.
|
||||
Increase poll timeout to 10ms in order to be aligned with vendor sdk.
|
||||
This is a preliminary patch to add Wireless Ethernet Dispatcher reset
|
||||
support.
|
||||
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -101,17 +101,21 @@ mtk_wdma_read_reset(struct mtk_wed_devic
|
||||
return wdma_r32(dev, MTK_WDMA_GLO_CFG);
|
||||
}
|
||||
|
||||
-static void
|
||||
+static int
|
||||
mtk_wdma_rx_reset(struct mtk_wed_device *dev)
|
||||
{
|
||||
u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
|
||||
- int i;
|
||||
+ int i, ret;
|
||||
|
||||
wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
|
||||
- if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
|
||||
- !(status & mask), 0, 1000))
|
||||
+ ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
|
||||
+ !(status & mask), 0, 10000);
|
||||
+ if (ret)
|
||||
dev_err(dev->hw->dev, "rx reset failed\n");
|
||||
|
||||
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
|
||||
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
|
||||
+
|
||||
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) {
|
||||
if (dev->rx_wdma[i].desc)
|
||||
continue;
|
||||
@@ -119,6 +123,8 @@ mtk_wdma_rx_reset(struct mtk_wed_device
|
||||
wdma_w32(dev,
|
||||
MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
|
||||
}
|
||||
+
|
||||
+ return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -565,9 +571,7 @@ mtk_wed_detach(struct mtk_wed_device *de
|
||||
|
||||
mtk_wed_stop(dev);
|
||||
|
||||
- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
|
||||
- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
|
||||
-
|
||||
+ mtk_wdma_rx_reset(dev);
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WED);
|
||||
if (mtk_wed_get_rx_capa(dev)) {
|
||||
wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
|
||||
@@ -582,7 +586,6 @@ mtk_wed_detach(struct mtk_wed_device *de
|
||||
mtk_wed_wo_reset(dev);
|
||||
mtk_wed_free_rx_rings(dev);
|
||||
mtk_wed_wo_deinit(hw);
|
||||
- mtk_wdma_rx_reset(dev);
|
||||
}
|
||||
|
||||
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
|
||||
@@ -999,11 +1002,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
|
||||
wed_w32(dev, MTK_WED_RESET_IDX, 0);
|
||||
}
|
||||
|
||||
- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
|
||||
- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
|
||||
-
|
||||
- if (mtk_wed_get_rx_capa(dev))
|
||||
- mtk_wdma_rx_reset(dev);
|
||||
+ mtk_wdma_rx_reset(dev);
|
||||
|
||||
if (busy) {
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
|
||||
@ -0,0 +1,52 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Thu, 24 Nov 2022 16:22:52 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: move MTK_WDMA_RESET_IDX_TX
|
||||
configuration in mtk_wdma_tx_reset
|
||||
|
||||
Remove duplicated code. Increase poll timeout to 10ms in order to be
|
||||
aligned with vendor sdk.
|
||||
This is a preliminary patch to add Wireless Ethernet Dispatcher reset
|
||||
support.
|
||||
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -135,16 +135,15 @@ mtk_wdma_tx_reset(struct mtk_wed_device
|
||||
|
||||
wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
|
||||
if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
|
||||
- !(status & mask), 0, 1000))
|
||||
+ !(status & mask), 0, 10000))
|
||||
dev_err(dev->hw->dev, "tx reset failed\n");
|
||||
|
||||
- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) {
|
||||
- if (dev->tx_wdma[i].desc)
|
||||
- continue;
|
||||
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
|
||||
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
|
||||
|
||||
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
|
||||
wdma_w32(dev,
|
||||
MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
|
||||
- }
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -573,12 +572,6 @@ mtk_wed_detach(struct mtk_wed_device *de
|
||||
|
||||
mtk_wdma_rx_reset(dev);
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WED);
|
||||
- if (mtk_wed_get_rx_capa(dev)) {
|
||||
- wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
|
||||
- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
|
||||
- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
|
||||
- }
|
||||
-
|
||||
mtk_wed_free_tx_buffer(dev);
|
||||
mtk_wed_free_tx_rings(dev);
|
||||
|
||||
@ -0,0 +1,98 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Thu, 24 Nov 2022 16:22:53 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: update mtk_wed_stop
|
||||
|
||||
Update mtk_wed_stop routine and rename old mtk_wed_stop() to
|
||||
mtk_wed_deinit(). This is a preliminary patch to add Wireless Ethernet
|
||||
Dispatcher reset support.
|
||||
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -539,14 +539,8 @@ mtk_wed_dma_disable(struct mtk_wed_devic
|
||||
static void
|
||||
mtk_wed_stop(struct mtk_wed_device *dev)
|
||||
{
|
||||
- mtk_wed_dma_disable(dev);
|
||||
mtk_wed_set_ext_int(dev, false);
|
||||
|
||||
- wed_clr(dev, MTK_WED_CTRL,
|
||||
- MTK_WED_CTRL_WDMA_INT_AGENT_EN |
|
||||
- MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
|
||||
- MTK_WED_CTRL_WED_TX_BM_EN |
|
||||
- MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
|
||||
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
|
||||
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
|
||||
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
|
||||
@@ -558,7 +552,27 @@ mtk_wed_stop(struct mtk_wed_device *dev)
|
||||
|
||||
wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
|
||||
wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
|
||||
- wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_deinit(struct mtk_wed_device *dev)
|
||||
+{
|
||||
+ mtk_wed_stop(dev);
|
||||
+ mtk_wed_dma_disable(dev);
|
||||
+
|
||||
+ wed_clr(dev, MTK_WED_CTRL,
|
||||
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
|
||||
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
|
||||
+ MTK_WED_CTRL_WED_TX_BM_EN |
|
||||
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
|
||||
+
|
||||
+ if (dev->hw->version == 1)
|
||||
+ return;
|
||||
+
|
||||
+ wed_clr(dev, MTK_WED_CTRL,
|
||||
+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
|
||||
+ MTK_WED_CTRL_WED_RX_BM_EN |
|
||||
+ MTK_WED_CTRL_RX_RRO_QM_EN);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -568,7 +582,7 @@ mtk_wed_detach(struct mtk_wed_device *de
|
||||
|
||||
mutex_lock(&hw_lock);
|
||||
|
||||
- mtk_wed_stop(dev);
|
||||
+ mtk_wed_deinit(dev);
|
||||
|
||||
mtk_wdma_rx_reset(dev);
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WED);
|
||||
@@ -670,7 +684,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
|
||||
{
|
||||
u32 mask, set;
|
||||
|
||||
- mtk_wed_stop(dev);
|
||||
+ mtk_wed_deinit(dev);
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WED);
|
||||
mtk_wed_set_wpdma(dev);
|
||||
|
||||
--- a/include/linux/soc/mediatek/mtk_wed.h
|
||||
+++ b/include/linux/soc/mediatek/mtk_wed.h
|
||||
@@ -234,6 +234,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic
|
||||
(_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
|
||||
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
|
||||
(_dev)->ops->msg_update(_dev, _id, _msg, _len)
|
||||
+#define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
|
||||
+#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
|
||||
#else
|
||||
static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
|
||||
{
|
||||
@@ -250,6 +252,8 @@ static inline bool mtk_wed_device_active
|
||||
#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
|
||||
#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0)
|
||||
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
|
||||
+#define mtk_wed_device_stop(_dev) do {} while (0)
|
||||
+#define mtk_wed_device_dma_reset(_dev) do {} while (0)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@ -0,0 +1,309 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Thu, 24 Nov 2022 16:22:54 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: add mtk_wed_rx_reset routine
|
||||
|
||||
Introduce mtk_wed_rx_reset routine in order to reset rx DMA for Wireless
|
||||
Ethernet Dispatcher available on MT7986 SoC.
|
||||
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -944,42 +944,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
|
||||
}
|
||||
|
||||
static u32
|
||||
-mtk_wed_check_busy(struct mtk_wed_device *dev)
|
||||
+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
|
||||
{
|
||||
- if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
|
||||
- return true;
|
||||
-
|
||||
- if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
|
||||
- MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
|
||||
- return true;
|
||||
-
|
||||
- if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
|
||||
- return true;
|
||||
-
|
||||
- if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
|
||||
- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
|
||||
- return true;
|
||||
-
|
||||
- if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
|
||||
- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
|
||||
- return true;
|
||||
-
|
||||
- if (wed_r32(dev, MTK_WED_CTRL) &
|
||||
- (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
|
||||
- return true;
|
||||
-
|
||||
- return false;
|
||||
+ return !!(wed_r32(dev, reg) & mask);
|
||||
}
|
||||
|
||||
static int
|
||||
-mtk_wed_poll_busy(struct mtk_wed_device *dev)
|
||||
+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
|
||||
{
|
||||
int sleep = 15000;
|
||||
int timeout = 100 * sleep;
|
||||
u32 val;
|
||||
|
||||
return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
|
||||
- timeout, false, dev);
|
||||
+ timeout, false, dev, reg, mask);
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_rx_reset(struct mtk_wed_device *dev)
|
||||
+{
|
||||
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
|
||||
+ u8 val = MTK_WED_WO_STATE_SER_RESET;
|
||||
+ int i, ret;
|
||||
+
|
||||
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
|
||||
+ MTK_WED_WO_CMD_CHANGE_STATE, &val,
|
||||
+ sizeof(val), true);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+
|
||||
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
|
||||
+ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
|
||||
+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
|
||||
+ if (ret) {
|
||||
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
|
||||
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
|
||||
+ } else {
|
||||
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
|
||||
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
|
||||
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
|
||||
+
|
||||
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
|
||||
+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
|
||||
+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
|
||||
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
|
||||
+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
|
||||
+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
|
||||
+
|
||||
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
|
||||
+ }
|
||||
+
|
||||
+ /* reset rro qm */
|
||||
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
|
||||
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
|
||||
+ MTK_WED_CTRL_RX_RRO_QM_BUSY);
|
||||
+ if (ret) {
|
||||
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
|
||||
+ } else {
|
||||
+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
|
||||
+ MTK_WED_RROQM_RST_IDX_MIOD |
|
||||
+ MTK_WED_RROQM_RST_IDX_FDBK);
|
||||
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
|
||||
+ }
|
||||
+
|
||||
+ /* reset route qm */
|
||||
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
|
||||
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
|
||||
+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
|
||||
+ if (ret)
|
||||
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
|
||||
+ else
|
||||
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
|
||||
+ MTK_WED_RTQM_Q_RST);
|
||||
+
|
||||
+ /* reset tx wdma */
|
||||
+ mtk_wdma_tx_reset(dev);
|
||||
+
|
||||
+ /* reset tx wdma drv */
|
||||
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
|
||||
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
|
||||
+ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
|
||||
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
|
||||
+
|
||||
+ /* reset wed rx dma */
|
||||
+ ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
|
||||
+ MTK_WED_GLO_CFG_RX_DMA_BUSY);
|
||||
+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
|
||||
+ if (ret) {
|
||||
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
|
||||
+ } else {
|
||||
+ struct mtk_eth *eth = dev->hw->eth;
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ wed_set(dev, MTK_WED_RESET_IDX,
|
||||
+ MTK_WED_RESET_IDX_RX_V2);
|
||||
+ else
|
||||
+ wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
|
||||
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
|
||||
+ }
|
||||
+
|
||||
+ /* reset rx bm */
|
||||
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
|
||||
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
|
||||
+ MTK_WED_CTRL_WED_RX_BM_BUSY);
|
||||
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
|
||||
+
|
||||
+ /* wo change to enable state */
|
||||
+ val = MTK_WED_WO_STATE_ENABLE;
|
||||
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
|
||||
+ MTK_WED_WO_CMD_CHANGE_STATE, &val,
|
||||
+ sizeof(val), true);
|
||||
+ if (ret)
|
||||
+ return ret;
|
||||
+
|
||||
+ /* wed_rx_ring_reset */
|
||||
+ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
|
||||
+ if (!dev->rx_ring[i].desc)
|
||||
+ continue;
|
||||
+
|
||||
+ mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE,
|
||||
+ false);
|
||||
+ }
|
||||
+ mtk_wed_free_rx_buffer(dev);
|
||||
+
|
||||
+ return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -997,19 +1085,23 @@ mtk_wed_reset_dma(struct mtk_wed_device
|
||||
true);
|
||||
}
|
||||
|
||||
- if (mtk_wed_poll_busy(dev))
|
||||
- busy = mtk_wed_check_busy(dev);
|
||||
-
|
||||
+ /* 1. reset WED tx DMA */
|
||||
+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
|
||||
+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
|
||||
+ MTK_WED_GLO_CFG_TX_DMA_BUSY);
|
||||
if (busy) {
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
|
||||
} else {
|
||||
- wed_w32(dev, MTK_WED_RESET_IDX,
|
||||
- MTK_WED_RESET_IDX_TX |
|
||||
- MTK_WED_RESET_IDX_RX);
|
||||
+ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
|
||||
wed_w32(dev, MTK_WED_RESET_IDX, 0);
|
||||
}
|
||||
|
||||
- mtk_wdma_rx_reset(dev);
|
||||
+ /* 2. reset WDMA rx DMA */
|
||||
+ busy = !!mtk_wdma_rx_reset(dev);
|
||||
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
|
||||
+ if (!busy)
|
||||
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
|
||||
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
|
||||
|
||||
if (busy) {
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
|
||||
@@ -1026,6 +1118,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
|
||||
MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
|
||||
}
|
||||
|
||||
+ /* 3. reset WED WPDMA tx */
|
||||
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
|
||||
+
|
||||
for (i = 0; i < 100; i++) {
|
||||
val = wed_r32(dev, MTK_WED_TX_BM_INTF);
|
||||
if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
|
||||
@@ -1033,8 +1128,19 @@ mtk_wed_reset_dma(struct mtk_wed_device
|
||||
}
|
||||
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
|
||||
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
|
||||
|
||||
+ /* 4. reset WED WPDMA tx */
|
||||
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
|
||||
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
|
||||
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
|
||||
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
|
||||
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
|
||||
+ if (!busy)
|
||||
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
|
||||
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY);
|
||||
+
|
||||
if (busy) {
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
|
||||
@@ -1045,6 +1151,17 @@ mtk_wed_reset_dma(struct mtk_wed_device
|
||||
MTK_WED_WPDMA_RESET_IDX_RX);
|
||||
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
|
||||
}
|
||||
+
|
||||
+ dev->init_done = false;
|
||||
+ if (dev->hw->version == 1)
|
||||
+ return;
|
||||
+
|
||||
+ if (!busy) {
|
||||
+ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX);
|
||||
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
|
||||
+ }
|
||||
+
|
||||
+ mtk_wed_rx_reset(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -1267,6 +1384,9 @@ mtk_wed_start(struct mtk_wed_device *dev
|
||||
{
|
||||
int i;
|
||||
|
||||
+ if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev))
|
||||
+ return;
|
||||
+
|
||||
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
|
||||
if (!dev->rx_wdma[i].desc)
|
||||
mtk_wed_wdma_rx_ring_setup(dev, i, 16);
|
||||
@@ -1355,10 +1475,6 @@ mtk_wed_attach(struct mtk_wed_device *de
|
||||
goto out;
|
||||
|
||||
if (mtk_wed_get_rx_capa(dev)) {
|
||||
- ret = mtk_wed_rx_buffer_alloc(dev);
|
||||
- if (ret)
|
||||
- goto out;
|
||||
-
|
||||
ret = mtk_wed_rro_alloc(dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
|
||||
@@ -24,11 +24,15 @@ struct mtk_wdma_desc {
|
||||
|
||||
#define MTK_WED_RESET 0x008
|
||||
#define MTK_WED_RESET_TX_BM BIT(0)
|
||||
+#define MTK_WED_RESET_RX_BM BIT(1)
|
||||
#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
|
||||
#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
|
||||
#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
|
||||
+#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10)
|
||||
#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
|
||||
#define MTK_WED_RESET_WED_TX_DMA BIT(12)
|
||||
+#define MTK_WED_RESET_WED_RX_DMA BIT(13)
|
||||
+#define MTK_WED_RESET_WDMA_TX_DRV BIT(16)
|
||||
#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
|
||||
#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
|
||||
#define MTK_WED_RESET_RX_RRO_QM BIT(20)
|
||||
@@ -158,6 +162,8 @@ struct mtk_wdma_desc {
|
||||
#define MTK_WED_RESET_IDX 0x20c
|
||||
#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
|
||||
#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
|
||||
+#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
|
||||
+#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
|
||||
|
||||
#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
|
||||
#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
|
||||
@@ -267,6 +273,9 @@ struct mtk_wdma_desc {
|
||||
|
||||
#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
|
||||
#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
|
||||
+#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1)
|
||||
+#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3)
|
||||
+#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4)
|
||||
#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
|
||||
#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
|
||||
|
||||
@ -0,0 +1,103 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Thu, 24 Nov 2022 16:22:55 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: add reset to tx_ring_setup callback
|
||||
|
||||
Introduce reset parameter to mtk_wed_tx_ring_setup signature.
|
||||
This is a preliminary patch to add Wireless Ethernet Dispatcher reset
|
||||
support.
|
||||
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -1181,7 +1181,8 @@ mtk_wed_ring_alloc(struct mtk_wed_device
|
||||
}
|
||||
|
||||
static int
|
||||
-mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
|
||||
+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
|
||||
+ bool reset)
|
||||
{
|
||||
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
|
||||
struct mtk_wed_ring *wdma;
|
||||
@@ -1190,8 +1191,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
|
||||
return -EINVAL;
|
||||
|
||||
wdma = &dev->rx_wdma[idx];
|
||||
- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size,
|
||||
- true))
|
||||
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
|
||||
+ desc_size, true))
|
||||
return -ENOMEM;
|
||||
|
||||
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
|
||||
@@ -1389,7 +1390,7 @@ mtk_wed_start(struct mtk_wed_device *dev
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
|
||||
if (!dev->rx_wdma[i].desc)
|
||||
- mtk_wed_wdma_rx_ring_setup(dev, i, 16);
|
||||
+ mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
|
||||
|
||||
mtk_wed_hw_init(dev);
|
||||
mtk_wed_configure_irq(dev, irq_mask);
|
||||
@@ -1498,7 +1499,8 @@ unlock:
|
||||
}
|
||||
|
||||
static int
|
||||
-mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
|
||||
+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
|
||||
+ bool reset)
|
||||
{
|
||||
struct mtk_wed_ring *ring = &dev->tx_ring[idx];
|
||||
|
||||
@@ -1517,11 +1519,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
|
||||
if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
|
||||
return -EINVAL;
|
||||
|
||||
- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
|
||||
- sizeof(*ring->desc), true))
|
||||
+ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
|
||||
+ sizeof(*ring->desc), true))
|
||||
return -ENOMEM;
|
||||
|
||||
- if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
|
||||
+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
|
||||
+ reset))
|
||||
return -ENOMEM;
|
||||
|
||||
ring->reg_base = MTK_WED_RING_TX(idx);
|
||||
--- a/include/linux/soc/mediatek/mtk_wed.h
|
||||
+++ b/include/linux/soc/mediatek/mtk_wed.h
|
||||
@@ -158,7 +158,7 @@ struct mtk_wed_device {
|
||||
struct mtk_wed_ops {
|
||||
int (*attach)(struct mtk_wed_device *dev);
|
||||
int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
|
||||
- void __iomem *regs);
|
||||
+ void __iomem *regs, bool reset);
|
||||
int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
|
||||
void __iomem *regs);
|
||||
int (*txfree_ring_setup)(struct mtk_wed_device *dev,
|
||||
@@ -216,8 +216,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic
|
||||
#define mtk_wed_device_active(_dev) !!(_dev)->ops
|
||||
#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
|
||||
#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
|
||||
-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
|
||||
- (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
|
||||
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) \
|
||||
+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs, _reset)
|
||||
#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
|
||||
(_dev)->ops->txfree_ring_setup(_dev, _regs)
|
||||
#define mtk_wed_device_reg_read(_dev, _reg) \
|
||||
@@ -243,7 +243,7 @@ static inline bool mtk_wed_device_active
|
||||
}
|
||||
#define mtk_wed_device_detach(_dev) do {} while (0)
|
||||
#define mtk_wed_device_start(_dev, _mask) do {} while (0)
|
||||
-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
|
||||
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
|
||||
#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
|
||||
#define mtk_wed_device_reg_read(_dev, _reg) 0
|
||||
#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
|
||||
@ -0,0 +1,103 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Thu, 1 Dec 2022 16:26:53 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: fix sleep while atomic in
|
||||
mtk_wed_wo_queue_refill
|
||||
|
||||
In order to fix the following sleep while atomic bug always alloc pages
|
||||
with GFP_ATOMIC in mtk_wed_wo_queue_refill since page_frag_alloc runs in
|
||||
spin_lock critical section.
|
||||
|
||||
[ 9.049719] Hardware name: MediaTek MT7986a RFB (DT)
|
||||
[ 9.054665] Call trace:
|
||||
[ 9.057096] dump_backtrace+0x0/0x154
|
||||
[ 9.060751] show_stack+0x14/0x1c
|
||||
[ 9.064052] dump_stack_lvl+0x64/0x7c
|
||||
[ 9.067702] dump_stack+0x14/0x2c
|
||||
[ 9.071001] ___might_sleep+0xec/0x120
|
||||
[ 9.074736] __might_sleep+0x4c/0x9c
|
||||
[ 9.078296] __alloc_pages+0x184/0x2e4
|
||||
[ 9.082030] page_frag_alloc_align+0x98/0x1ac
|
||||
[ 9.086369] mtk_wed_wo_queue_refill+0x134/0x234
|
||||
[ 9.090974] mtk_wed_wo_init+0x174/0x2c0
|
||||
[ 9.094881] mtk_wed_attach+0x7c8/0x7e0
|
||||
[ 9.098701] mt7915_mmio_wed_init+0x1f0/0x3a0 [mt7915e]
|
||||
[ 9.103940] mt7915_pci_probe+0xec/0x3bc [mt7915e]
|
||||
[ 9.108727] pci_device_probe+0xac/0x13c
|
||||
[ 9.112638] really_probe.part.0+0x98/0x2f4
|
||||
[ 9.116807] __driver_probe_device+0x94/0x13c
|
||||
[ 9.121147] driver_probe_device+0x40/0x114
|
||||
[ 9.125314] __driver_attach+0x7c/0x180
|
||||
[ 9.129133] bus_for_each_dev+0x5c/0x90
|
||||
[ 9.132953] driver_attach+0x20/0x2c
|
||||
[ 9.136513] bus_add_driver+0x104/0x1fc
|
||||
[ 9.140333] driver_register+0x74/0x120
|
||||
[ 9.144153] __pci_register_driver+0x40/0x50
|
||||
[ 9.148407] mt7915_init+0x5c/0x1000 [mt7915e]
|
||||
[ 9.152848] do_one_initcall+0x40/0x25c
|
||||
[ 9.156669] do_init_module+0x44/0x230
|
||||
[ 9.160403] load_module+0x1f30/0x2750
|
||||
[ 9.164135] __do_sys_init_module+0x150/0x200
|
||||
[ 9.168475] __arm64_sys_init_module+0x18/0x20
|
||||
[ 9.172901] invoke_syscall.constprop.0+0x4c/0xe0
|
||||
[ 9.177589] do_el0_svc+0x48/0xe0
|
||||
[ 9.180889] el0_svc+0x14/0x50
|
||||
[ 9.183929] el0t_64_sync_handler+0x9c/0x120
|
||||
[ 9.188183] el0t_64_sync+0x158/0x15c
|
||||
|
||||
Fixes: 799684448e3e ("net: ethernet: mtk_wed: introduce wed wo support")
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
|
||||
Link: https://lore.kernel.org/r/67ca94bdd3d9eaeb86e52b3050fbca0bcf7bb02f.1669908312.git.lorenzo@kernel.org
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
||||
@@ -133,17 +133,18 @@ mtk_wed_wo_dequeue(struct mtk_wed_wo *wo
|
||||
|
||||
static int
|
||||
mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
|
||||
- gfp_t gfp, bool rx)
|
||||
+ bool rx)
|
||||
{
|
||||
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
int n_buf = 0;
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
while (q->queued < q->n_desc) {
|
||||
- void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
|
||||
struct mtk_wed_wo_queue_entry *entry;
|
||||
dma_addr_t addr;
|
||||
+ void *buf;
|
||||
|
||||
+ buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
break;
|
||||
|
||||
@@ -215,7 +216,7 @@ mtk_wed_wo_rx_run_queue(struct mtk_wed_w
|
||||
mtk_wed_mcu_rx_unsolicited_event(wo, skb);
|
||||
}
|
||||
|
||||
- if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
|
||||
+ if (mtk_wed_wo_queue_refill(wo, q, true)) {
|
||||
u32 index = (q->head - 1) % q->n_desc;
|
||||
|
||||
mtk_wed_wo_queue_kick(wo, q, index);
|
||||
@@ -432,7 +433,7 @@ mtk_wed_wo_hardware_init(struct mtk_wed_
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
- mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
|
||||
+ mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
|
||||
mtk_wed_wo_queue_reset(wo, &wo->q_tx);
|
||||
|
||||
regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
|
||||
@@ -446,7 +447,7 @@ mtk_wed_wo_hardware_init(struct mtk_wed_
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
- mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
|
||||
+ mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
|
||||
mtk_wed_wo_queue_reset(wo, &wo->q_rx);
|
||||
|
||||
/* rx queue irqmask */
|
||||
@ -0,0 +1,52 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Tue, 10 Jan 2023 10:31:26 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: get rid of queue lock for rx queue
|
||||
|
||||
Queue spinlock is currently held in mtk_wed_wo_queue_rx_clean and
|
||||
mtk_wed_wo_queue_refill routines for MTK Wireless Ethernet Dispatcher
|
||||
MCU rx queue. mtk_wed_wo_queue_refill() is running during initialization
|
||||
and in rx tasklet while mtk_wed_wo_queue_rx_clean() is running in
|
||||
mtk_wed_wo_hw_deinit() during hw de-init phase after rx tasklet has been
|
||||
disabled. Since mtk_wed_wo_queue_rx_clean and mtk_wed_wo_queue_refill
|
||||
routines can't run concurrently get rid of spinlock for mcu rx queue.
|
||||
|
||||
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Link: https://lore.kernel.org/r/36ec3b729542ea60898471d890796f745479ba32.1673342990.git.lorenzo@kernel.org
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
||||
@@ -138,7 +138,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_w
|
||||
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
int n_buf = 0;
|
||||
|
||||
- spin_lock_bh(&q->lock);
|
||||
while (q->queued < q->n_desc) {
|
||||
struct mtk_wed_wo_queue_entry *entry;
|
||||
dma_addr_t addr;
|
||||
@@ -172,7 +171,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_w
|
||||
q->queued++;
|
||||
n_buf++;
|
||||
}
|
||||
- spin_unlock_bh(&q->lock);
|
||||
|
||||
return n_buf;
|
||||
}
|
||||
@@ -316,7 +314,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
- spin_lock_bh(&q->lock);
|
||||
for (;;) {
|
||||
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
|
||||
|
||||
@@ -325,7 +322,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed
|
||||
|
||||
skb_free_frag(buf);
|
||||
}
|
||||
- spin_unlock_bh(&q->lock);
|
||||
|
||||
if (!q->cache.va)
|
||||
return;
|
||||
@ -0,0 +1,75 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Thu, 12 Jan 2023 10:21:29 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: get rid of queue lock for tx queue
|
||||
|
||||
Similar to MTK Wireless Ethernet Dispatcher (WED) MCU rx queue,
|
||||
we do not need to protect WED MCU tx queue with a spin lock since
|
||||
the tx queue is accessed in the two following routines:
|
||||
- mtk_wed_wo_queue_tx_skb():
|
||||
it is run at initialization and during mt7915 normal operation.
|
||||
Moreover MCU messages are serialized through MCU mutex.
|
||||
- mtk_wed_wo_queue_tx_clean():
|
||||
it runs just at mt7915 driver module unload when no more messages
|
||||
are sent to the MCU.
|
||||
|
||||
Remove tx queue spinlock.
|
||||
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Link: https://lore.kernel.org/r/7bd0337b2a13ab1a63673b7c03fd35206b3b284e.1673515140.git.lorenzo@kernel.org
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
||||
@@ -258,7 +258,6 @@ mtk_wed_wo_queue_alloc(struct mtk_wed_wo
|
||||
int n_desc, int buf_size, int index,
|
||||
struct mtk_wed_wo_queue_regs *regs)
|
||||
{
|
||||
- spin_lock_init(&q->lock);
|
||||
q->regs = *regs;
|
||||
q->n_desc = n_desc;
|
||||
q->buf_size = buf_size;
|
||||
@@ -290,7 +289,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
- spin_lock_bh(&q->lock);
|
||||
for (i = 0; i < q->n_desc; i++) {
|
||||
struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
|
||||
|
||||
@@ -299,7 +297,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed
|
||||
skb_free_frag(entry->buf);
|
||||
entry->buf = NULL;
|
||||
}
|
||||
- spin_unlock_bh(&q->lock);
|
||||
|
||||
if (!q->cache.va)
|
||||
return;
|
||||
@@ -347,8 +344,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_w
|
||||
int ret = 0, index;
|
||||
u32 ctrl;
|
||||
|
||||
- spin_lock_bh(&q->lock);
|
||||
-
|
||||
q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
|
||||
index = (q->head + 1) % q->n_desc;
|
||||
if (q->tail == index) {
|
||||
@@ -379,8 +374,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_w
|
||||
mtk_wed_wo_queue_kick(wo, q, q->head);
|
||||
mtk_wed_wo_kickout(wo);
|
||||
out:
|
||||
- spin_unlock_bh(&q->lock);
|
||||
-
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
return ret;
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
|
||||
@@ -211,7 +211,6 @@ struct mtk_wed_wo_queue {
|
||||
struct mtk_wed_wo_queue_regs regs;
|
||||
|
||||
struct page_frag_cache cache;
|
||||
- spinlock_t lock;
|
||||
|
||||
struct mtk_wed_wo_queue_desc *desc;
|
||||
dma_addr_t desc_dma;
|
||||
@ -0,0 +1,70 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Sat, 14 Jan 2023 18:01:28 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce mtk_hw_reset utility
|
||||
routine
|
||||
|
||||
This is a preliminary patch to add Wireless Ethernet Dispatcher reset
|
||||
support.
|
||||
|
||||
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -3254,6 +3254,27 @@ static void mtk_set_mcr_max_rx(struct mt
|
||||
mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
|
||||
}
|
||||
|
||||
+static void mtk_hw_reset(struct mtk_eth *eth)
|
||||
+{
|
||||
+ u32 val;
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
|
||||
+ val = RSTCTRL_PPE0_V2;
|
||||
+ } else {
|
||||
+ val = RSTCTRL_PPE0;
|
||||
+ }
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
|
||||
+ val |= RSTCTRL_PPE1;
|
||||
+
|
||||
+ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
|
||||
+ 0x3ffffff);
|
||||
+}
|
||||
+
|
||||
static int mtk_hw_init(struct mtk_eth *eth)
|
||||
{
|
||||
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
|
||||
@@ -3293,22 +3314,9 @@ static int mtk_hw_init(struct mtk_eth *e
|
||||
return 0;
|
||||
}
|
||||
|
||||
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
|
||||
- val = RSTCTRL_PPE0_V2;
|
||||
- } else {
|
||||
- val = RSTCTRL_PPE0;
|
||||
- }
|
||||
-
|
||||
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
|
||||
- val |= RSTCTRL_PPE1;
|
||||
-
|
||||
- ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
|
||||
+ mtk_hw_reset(eth);
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
|
||||
- 0x3ffffff);
|
||||
-
|
||||
/* Set FE to PDMAv2 if necessary */
|
||||
val = mtk_r32(eth, MTK_FE_GLO_MISC);
|
||||
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
|
||||
@ -0,0 +1,107 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Sat, 14 Jan 2023 18:01:29 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce mtk_hw_warm_reset
|
||||
support
|
||||
|
||||
Introduce mtk_hw_warm_reset utility routine. This is a preliminary patch
|
||||
to align reset procedure to vendor sdk and avoid to power down the chip
|
||||
during hw reset.
|
||||
|
||||
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -3275,7 +3275,54 @@ static void mtk_hw_reset(struct mtk_eth
|
||||
0x3ffffff);
|
||||
}
|
||||
|
||||
-static int mtk_hw_init(struct mtk_eth *eth)
|
||||
+static u32 mtk_hw_reset_read(struct mtk_eth *eth)
|
||||
+{
|
||||
+ u32 val;
|
||||
+
|
||||
+ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
|
||||
+ return val;
|
||||
+}
|
||||
+
|
||||
+static void mtk_hw_warm_reset(struct mtk_eth *eth)
|
||||
+{
|
||||
+ u32 rst_mask, val;
|
||||
+
|
||||
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
|
||||
+ RSTCTRL_FE);
|
||||
+ if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
|
||||
+ val & RSTCTRL_FE, 1, 1000)) {
|
||||
+ dev_err(eth->dev, "warm reset failed\n");
|
||||
+ mtk_hw_reset(eth);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
|
||||
+ else
|
||||
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
|
||||
+ rst_mask |= RSTCTRL_PPE1;
|
||||
+
|
||||
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
|
||||
+
|
||||
+ udelay(1);
|
||||
+ val = mtk_hw_reset_read(eth);
|
||||
+ if (!(val & rst_mask))
|
||||
+ dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
|
||||
+ val, rst_mask);
|
||||
+
|
||||
+ rst_mask |= RSTCTRL_FE;
|
||||
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
|
||||
+
|
||||
+ udelay(1);
|
||||
+ val = mtk_hw_reset_read(eth);
|
||||
+ if (val & rst_mask)
|
||||
+ dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
|
||||
+ val, rst_mask);
|
||||
+}
|
||||
+
|
||||
+static int mtk_hw_init(struct mtk_eth *eth, bool reset)
|
||||
{
|
||||
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
|
||||
ETHSYS_DMA_AG_MAP_PPE;
|
||||
@@ -3314,7 +3361,12 @@ static int mtk_hw_init(struct mtk_eth *e
|
||||
return 0;
|
||||
}
|
||||
|
||||
- mtk_hw_reset(eth);
|
||||
+ msleep(100);
|
||||
+
|
||||
+ if (reset)
|
||||
+ mtk_hw_warm_reset(eth);
|
||||
+ else
|
||||
+ mtk_hw_reset(eth);
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
/* Set FE to PDMAv2 if necessary */
|
||||
@@ -3522,7 +3574,7 @@ static void mtk_pending_work(struct work
|
||||
if (eth->dev->pins)
|
||||
pinctrl_select_state(eth->dev->pins->p,
|
||||
eth->dev->pins->default_state);
|
||||
- mtk_hw_init(eth);
|
||||
+ mtk_hw_init(eth, true);
|
||||
|
||||
/* restart DMA and enable IRQs */
|
||||
for (i = 0; i < MTK_MAC_COUNT; i++) {
|
||||
@@ -4114,7 +4166,7 @@ static int mtk_probe(struct platform_dev
|
||||
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
|
||||
INIT_WORK(ð->pending_work, mtk_pending_work);
|
||||
|
||||
- err = mtk_hw_init(eth);
|
||||
+ err = mtk_hw_init(eth, false);
|
||||
if (err)
|
||||
goto err_wed_exit;
|
||||
|
||||
@ -0,0 +1,262 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Sat, 14 Jan 2023 18:01:30 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: align reset procedure to vendor
|
||||
sdk
|
||||
|
||||
Avoid to power-down the ethernet chip during hw reset and align reset
|
||||
procedure to vendor sdk.
|
||||
|
||||
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -2842,14 +2842,29 @@ static void mtk_dma_free(struct mtk_eth
|
||||
kfree(eth->scratch_head);
|
||||
}
|
||||
|
||||
+static bool mtk_hw_reset_check(struct mtk_eth *eth)
|
||||
+{
|
||||
+ u32 val = mtk_r32(eth, MTK_INT_STATUS2);
|
||||
+
|
||||
+ return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
|
||||
+ (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
|
||||
+ (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
|
||||
+}
|
||||
+
|
||||
static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
|
||||
{
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
|
||||
+ if (test_bit(MTK_RESETTING, ð->state))
|
||||
+ return;
|
||||
+
|
||||
+ if (!mtk_hw_reset_check(eth))
|
||||
+ return;
|
||||
+
|
||||
eth->netdev[mac->id]->stats.tx_errors++;
|
||||
- netif_err(eth, tx_err, dev,
|
||||
- "transmit timed out\n");
|
||||
+ netif_err(eth, tx_err, dev, "transmit timed out\n");
|
||||
+
|
||||
schedule_work(ð->pending_work);
|
||||
}
|
||||
|
||||
@@ -3329,15 +3344,17 @@ static int mtk_hw_init(struct mtk_eth *e
|
||||
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
int i, val, ret;
|
||||
|
||||
- if (test_and_set_bit(MTK_HW_INIT, ð->state))
|
||||
+ if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
|
||||
return 0;
|
||||
|
||||
- pm_runtime_enable(eth->dev);
|
||||
- pm_runtime_get_sync(eth->dev);
|
||||
+ if (!reset) {
|
||||
+ pm_runtime_enable(eth->dev);
|
||||
+ pm_runtime_get_sync(eth->dev);
|
||||
|
||||
- ret = mtk_clk_enable(eth);
|
||||
- if (ret)
|
||||
- goto err_disable_pm;
|
||||
+ ret = mtk_clk_enable(eth);
|
||||
+ if (ret)
|
||||
+ goto err_disable_pm;
|
||||
+ }
|
||||
|
||||
if (eth->ethsys)
|
||||
regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
|
||||
@@ -3466,8 +3483,10 @@ static int mtk_hw_init(struct mtk_eth *e
|
||||
return 0;
|
||||
|
||||
err_disable_pm:
|
||||
- pm_runtime_put_sync(eth->dev);
|
||||
- pm_runtime_disable(eth->dev);
|
||||
+ if (!reset) {
|
||||
+ pm_runtime_put_sync(eth->dev);
|
||||
+ pm_runtime_disable(eth->dev);
|
||||
+ }
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -3546,30 +3565,53 @@ static int mtk_do_ioctl(struct net_devic
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
+static void mtk_prepare_for_reset(struct mtk_eth *eth)
|
||||
+{
|
||||
+ u32 val;
|
||||
+ int i;
|
||||
+
|
||||
+ /* disabe FE P3 and P4 */
|
||||
+ val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
|
||||
+ val |= MTK_FE_LINK_DOWN_P4;
|
||||
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
|
||||
+
|
||||
+ /* adjust PPE configurations to prepare for reset */
|
||||
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
|
||||
+ mtk_ppe_prepare_reset(eth->ppe[i]);
|
||||
+
|
||||
+ /* disable NETSYS interrupts */
|
||||
+ mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
|
||||
+
|
||||
+ /* force link down GMAC */
|
||||
+ for (i = 0; i < 2; i++) {
|
||||
+ val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
|
||||
+ mtk_w32(eth, val, MTK_MAC_MCR(i));
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
static void mtk_pending_work(struct work_struct *work)
|
||||
{
|
||||
struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
|
||||
- int err, i;
|
||||
unsigned long restart = 0;
|
||||
+ u32 val;
|
||||
+ int i;
|
||||
|
||||
rtnl_lock();
|
||||
-
|
||||
- dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
|
||||
set_bit(MTK_RESETTING, ð->state);
|
||||
|
||||
+ mtk_prepare_for_reset(eth);
|
||||
+
|
||||
/* stop all devices to make sure that dma is properly shut down */
|
||||
for (i = 0; i < MTK_MAC_COUNT; i++) {
|
||||
- if (!eth->netdev[i])
|
||||
+ if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
|
||||
continue;
|
||||
+
|
||||
mtk_stop(eth->netdev[i]);
|
||||
__set_bit(i, &restart);
|
||||
}
|
||||
- dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
|
||||
|
||||
- /* restart underlying hardware such as power, clock, pin mux
|
||||
- * and the connected phy
|
||||
- */
|
||||
- mtk_hw_deinit(eth);
|
||||
+ usleep_range(15000, 16000);
|
||||
|
||||
if (eth->dev->pins)
|
||||
pinctrl_select_state(eth->dev->pins->p,
|
||||
@@ -3580,15 +3622,19 @@ static void mtk_pending_work(struct work
|
||||
for (i = 0; i < MTK_MAC_COUNT; i++) {
|
||||
if (!test_bit(i, &restart))
|
||||
continue;
|
||||
- err = mtk_open(eth->netdev[i]);
|
||||
- if (err) {
|
||||
+
|
||||
+ if (mtk_open(eth->netdev[i])) {
|
||||
netif_alert(eth, ifup, eth->netdev[i],
|
||||
- "Driver up/down cycle failed, closing device.\n");
|
||||
+ "Driver up/down cycle failed\n");
|
||||
dev_close(eth->netdev[i]);
|
||||
}
|
||||
}
|
||||
|
||||
- dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
|
||||
+ /* enabe FE P3 and P4 */
|
||||
+ val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
|
||||
+ val &= ~MTK_FE_LINK_DOWN_P4;
|
||||
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
|
||||
|
||||
clear_bit(MTK_RESETTING, ð->state);
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -72,12 +72,24 @@
|
||||
#define MTK_HW_LRO_REPLACE_DELTA 1000
|
||||
#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
|
||||
|
||||
+/* Frame Engine Global Configuration */
|
||||
+#define MTK_FE_GLO_CFG 0x00
|
||||
+#define MTK_FE_LINK_DOWN_P3 BIT(11)
|
||||
+#define MTK_FE_LINK_DOWN_P4 BIT(12)
|
||||
+
|
||||
/* Frame Engine Global Reset Register */
|
||||
#define MTK_RST_GL 0x04
|
||||
#define RST_GL_PSE BIT(0)
|
||||
|
||||
/* Frame Engine Interrupt Status Register */
|
||||
#define MTK_INT_STATUS2 0x08
|
||||
+#define MTK_FE_INT_ENABLE 0x0c
|
||||
+#define MTK_FE_INT_FQ_EMPTY BIT(8)
|
||||
+#define MTK_FE_INT_TSO_FAIL BIT(12)
|
||||
+#define MTK_FE_INT_TSO_ILLEGAL BIT(13)
|
||||
+#define MTK_FE_INT_TSO_ALIGN BIT(14)
|
||||
+#define MTK_FE_INT_RFIFO_OV BIT(18)
|
||||
+#define MTK_FE_INT_RFIFO_UF BIT(19)
|
||||
#define MTK_GDM1_AF BIT(28)
|
||||
#define MTK_GDM2_AF BIT(29)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
@@ -710,6 +710,33 @@ int mtk_foe_entry_idle_time(struct mtk_p
|
||||
return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
|
||||
}
|
||||
|
||||
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
|
||||
+{
|
||||
+ if (!ppe)
|
||||
+ return -EINVAL;
|
||||
+
|
||||
+ /* disable KA */
|
||||
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
|
||||
+ ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
|
||||
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
|
||||
+ usleep_range(10000, 11000);
|
||||
+
|
||||
+ /* set KA timer to maximum */
|
||||
+ ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
|
||||
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
|
||||
+
|
||||
+ /* set KA tick select */
|
||||
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
|
||||
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
|
||||
+ usleep_range(10000, 11000);
|
||||
+
|
||||
+ /* disable scan mode */
|
||||
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
|
||||
+ usleep_range(10000, 11000);
|
||||
+
|
||||
+ return mtk_ppe_wait_busy(ppe);
|
||||
+}
|
||||
+
|
||||
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
|
||||
int version, int index)
|
||||
{
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||||
@@ -306,6 +306,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
|
||||
void mtk_ppe_deinit(struct mtk_eth *eth);
|
||||
void mtk_ppe_start(struct mtk_ppe *ppe);
|
||||
int mtk_ppe_stop(struct mtk_ppe *ppe);
|
||||
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
|
||||
|
||||
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
|
||||
@@ -58,6 +58,12 @@
|
||||
#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
|
||||
#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
|
||||
#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
|
||||
+#define MTK_PPE_TB_TICK_SEL BIT(24)
|
||||
+
|
||||
+#define MTK_PPE_BIND_LMT1 0x230
|
||||
+#define MTK_PPE_NTU_KEEPALIVE GENMASK(23, 16)
|
||||
+
|
||||
+#define MTK_PPE_KEEPALIVE 0x234
|
||||
|
||||
enum {
|
||||
MTK_PPE_SCAN_MODE_DISABLED,
|
||||
@ -0,0 +1,249 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Sat, 14 Jan 2023 18:01:31 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: add dma checks to
|
||||
mtk_hw_reset_check
|
||||
|
||||
Introduce mtk_hw_check_dma_hang routine to monitor possible dma hangs.
|
||||
|
||||
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -50,6 +50,7 @@ static const struct mtk_reg_map mtk_reg_
|
||||
.delay_irq = 0x0a0c,
|
||||
.irq_status = 0x0a20,
|
||||
.irq_mask = 0x0a28,
|
||||
+ .adma_rx_dbg0 = 0x0a38,
|
||||
.int_grp = 0x0a50,
|
||||
},
|
||||
.qdma = {
|
||||
@@ -79,6 +80,8 @@ static const struct mtk_reg_map mtk_reg_
|
||||
[0] = 0x2800,
|
||||
[1] = 0x2c00,
|
||||
},
|
||||
+ .pse_iq_sta = 0x0110,
|
||||
+ .pse_oq_sta = 0x0118,
|
||||
};
|
||||
|
||||
static const struct mtk_reg_map mt7628_reg_map = {
|
||||
@@ -109,6 +112,7 @@ static const struct mtk_reg_map mt7986_r
|
||||
.delay_irq = 0x620c,
|
||||
.irq_status = 0x6220,
|
||||
.irq_mask = 0x6228,
|
||||
+ .adma_rx_dbg0 = 0x6238,
|
||||
.int_grp = 0x6250,
|
||||
},
|
||||
.qdma = {
|
||||
@@ -138,6 +142,8 @@ static const struct mtk_reg_map mt7986_r
|
||||
[0] = 0x4800,
|
||||
[1] = 0x4c00,
|
||||
},
|
||||
+ .pse_iq_sta = 0x0180,
|
||||
+ .pse_oq_sta = 0x01a0,
|
||||
};
|
||||
|
||||
/* strings used by ethtool */
|
||||
@@ -3337,6 +3343,102 @@ static void mtk_hw_warm_reset(struct mtk
|
||||
val, rst_mask);
|
||||
}
|
||||
|
||||
+static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
|
||||
+{
|
||||
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
+ bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
|
||||
+ bool oq_hang, cdm1_busy, adma_busy;
|
||||
+ bool wtx_busy, cdm_full, oq_free;
|
||||
+ u32 wdidx, val, gdm1_fc, gdm2_fc;
|
||||
+ bool qfsm_hang, qfwd_hang;
|
||||
+ bool ret = false;
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
||||
+ return false;
|
||||
+
|
||||
+ /* WDMA sanity checks */
|
||||
+ wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
|
||||
+
|
||||
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
|
||||
+ wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
|
||||
+
|
||||
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
|
||||
+ cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
|
||||
+
|
||||
+ oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
|
||||
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
|
||||
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
|
||||
+
|
||||
+ if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
|
||||
+ if (++eth->reset.wdma_hang_count > 2) {
|
||||
+ eth->reset.wdma_hang_count = 0;
|
||||
+ ret = true;
|
||||
+ }
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /* QDMA sanity checks */
|
||||
+ qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
|
||||
+ qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
|
||||
+
|
||||
+ gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
|
||||
+ gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
|
||||
+ gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
|
||||
+ gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
|
||||
+ gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
|
||||
+ gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
|
||||
+
|
||||
+ if (qfsm_hang && qfwd_hang &&
|
||||
+ ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
|
||||
+ (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
|
||||
+ if (++eth->reset.qdma_hang_count > 2) {
|
||||
+ eth->reset.qdma_hang_count = 0;
|
||||
+ ret = true;
|
||||
+ }
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ /* ADMA sanity checks */
|
||||
+ oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
|
||||
+ cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
|
||||
+ adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
|
||||
+ !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
|
||||
+
|
||||
+ if (oq_hang && cdm1_busy && adma_busy) {
|
||||
+ if (++eth->reset.adma_hang_count > 2) {
|
||||
+ eth->reset.adma_hang_count = 0;
|
||||
+ ret = true;
|
||||
+ }
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ eth->reset.wdma_hang_count = 0;
|
||||
+ eth->reset.qdma_hang_count = 0;
|
||||
+ eth->reset.adma_hang_count = 0;
|
||||
+out:
|
||||
+ eth->reset.wdidx = wdidx;
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+static void mtk_hw_reset_monitor_work(struct work_struct *work)
|
||||
+{
|
||||
+ struct delayed_work *del_work = to_delayed_work(work);
|
||||
+ struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
|
||||
+ reset.monitor_work);
|
||||
+
|
||||
+ if (test_bit(MTK_RESETTING, ð->state))
|
||||
+ goto out;
|
||||
+
|
||||
+ /* DMA stuck checks */
|
||||
+ if (mtk_hw_check_dma_hang(eth))
|
||||
+ schedule_work(ð->pending_work);
|
||||
+
|
||||
+out:
|
||||
+ schedule_delayed_work(ð->reset.monitor_work,
|
||||
+ MTK_DMA_MONITOR_TIMEOUT);
|
||||
+}
|
||||
+
|
||||
static int mtk_hw_init(struct mtk_eth *eth, bool reset)
|
||||
{
|
||||
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
|
||||
@@ -3672,6 +3774,7 @@ static int mtk_cleanup(struct mtk_eth *e
|
||||
mtk_unreg_dev(eth);
|
||||
mtk_free_dev(eth);
|
||||
cancel_work_sync(ð->pending_work);
|
||||
+ cancel_delayed_work_sync(ð->reset.monitor_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -4099,6 +4202,7 @@ static int mtk_probe(struct platform_dev
|
||||
|
||||
eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
||||
INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
|
||||
+ INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
|
||||
|
||||
eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
||||
INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
|
||||
@@ -4301,6 +4405,8 @@ static int mtk_probe(struct platform_dev
|
||||
netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
|
||||
|
||||
platform_set_drvdata(pdev, eth);
|
||||
+ schedule_delayed_work(ð->reset.monitor_work,
|
||||
+ MTK_DMA_MONITOR_TIMEOUT);
|
||||
|
||||
return 0;
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -257,6 +257,8 @@
|
||||
|
||||
#define MTK_RX_DONE_INT_V2 BIT(14)
|
||||
|
||||
+#define MTK_CDM_TXFIFO_RDY BIT(7)
|
||||
+
|
||||
/* QDMA Interrupt grouping registers */
|
||||
#define MTK_RLS_DONE_INT BIT(0)
|
||||
|
||||
@@ -542,6 +544,17 @@
|
||||
#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
|
||||
#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
|
||||
|
||||
+#define MTK_FE_CDM1_FSM 0x220
|
||||
+#define MTK_FE_CDM2_FSM 0x224
|
||||
+#define MTK_FE_CDM3_FSM 0x238
|
||||
+#define MTK_FE_CDM4_FSM 0x298
|
||||
+#define MTK_FE_CDM5_FSM 0x318
|
||||
+#define MTK_FE_CDM6_FSM 0x328
|
||||
+#define MTK_FE_GDM1_FSM 0x228
|
||||
+#define MTK_FE_GDM2_FSM 0x22C
|
||||
+
|
||||
+#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
|
||||
+
|
||||
struct mtk_rx_dma {
|
||||
unsigned int rxd1;
|
||||
unsigned int rxd2;
|
||||
@@ -938,6 +951,7 @@ struct mtk_reg_map {
|
||||
u32 delay_irq; /* delay interrupt */
|
||||
u32 irq_status; /* interrupt status */
|
||||
u32 irq_mask; /* interrupt mask */
|
||||
+ u32 adma_rx_dbg0;
|
||||
u32 int_grp;
|
||||
} pdma;
|
||||
struct {
|
||||
@@ -964,6 +978,8 @@ struct mtk_reg_map {
|
||||
u32 gdma_to_ppe;
|
||||
u32 ppe_base;
|
||||
u32 wdma_base[2];
|
||||
+ u32 pse_iq_sta;
|
||||
+ u32 pse_oq_sta;
|
||||
};
|
||||
|
||||
/* struct mtk_eth_data - This is the structure holding all differences
|
||||
@@ -1006,6 +1022,8 @@ struct mtk_soc_data {
|
||||
} txrx;
|
||||
};
|
||||
|
||||
+#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
|
||||
+
|
||||
/* currently no SoC has more than 2 macs */
|
||||
#define MTK_MAX_DEVS 2
|
||||
|
||||
@@ -1128,6 +1146,14 @@ struct mtk_eth {
|
||||
struct rhashtable flow_table;
|
||||
|
||||
struct bpf_prog __rcu *prog;
|
||||
+
|
||||
+ struct {
|
||||
+ struct delayed_work monitor_work;
|
||||
+ u32 wdidx;
|
||||
+ u8 wdma_hang_count;
|
||||
+ u8 qdma_hang_count;
|
||||
+ u8 adma_hang_count;
|
||||
+ } reset;
|
||||
};
|
||||
|
||||
/* struct mtk_mac - the structure that holds the info about the MACs of the
|
||||
@ -0,0 +1,124 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Sat, 14 Jan 2023 18:01:32 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: add reset/reset_complete callbacks
|
||||
|
||||
Introduce reset and reset_complete wlan callback to schedule WLAN driver
|
||||
reset when ethernet/wed driver is resetting.
|
||||
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -3703,6 +3703,11 @@ static void mtk_pending_work(struct work
|
||||
set_bit(MTK_RESETTING, ð->state);
|
||||
|
||||
mtk_prepare_for_reset(eth);
|
||||
+ mtk_wed_fe_reset();
|
||||
+ /* Run again reset preliminary configuration in order to avoid any
|
||||
+ * possible race during FE reset since it can run releasing RTNL lock.
|
||||
+ */
|
||||
+ mtk_prepare_for_reset(eth);
|
||||
|
||||
/* stop all devices to make sure that dma is properly shut down */
|
||||
for (i = 0; i < MTK_MAC_COUNT; i++) {
|
||||
@@ -3740,6 +3745,8 @@ static void mtk_pending_work(struct work
|
||||
|
||||
clear_bit(MTK_RESETTING, ð->state);
|
||||
|
||||
+ mtk_wed_fe_reset_complete();
|
||||
+
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -205,6 +205,48 @@ mtk_wed_wo_reset(struct mtk_wed_device *
|
||||
iounmap(reg);
|
||||
}
|
||||
|
||||
+void mtk_wed_fe_reset(void)
|
||||
+{
|
||||
+ int i;
|
||||
+
|
||||
+ mutex_lock(&hw_lock);
|
||||
+
|
||||
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
|
||||
+ struct mtk_wed_hw *hw = hw_list[i];
|
||||
+ struct mtk_wed_device *dev = hw->wed_dev;
|
||||
+ int err;
|
||||
+
|
||||
+ if (!dev || !dev->wlan.reset)
|
||||
+ continue;
|
||||
+
|
||||
+ /* reset callback blocks until WLAN reset is completed */
|
||||
+ err = dev->wlan.reset(dev);
|
||||
+ if (err)
|
||||
+ dev_err(dev->dev, "wlan reset failed: %d\n", err);
|
||||
+ }
|
||||
+
|
||||
+ mutex_unlock(&hw_lock);
|
||||
+}
|
||||
+
|
||||
+void mtk_wed_fe_reset_complete(void)
|
||||
+{
|
||||
+ int i;
|
||||
+
|
||||
+ mutex_lock(&hw_lock);
|
||||
+
|
||||
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
|
||||
+ struct mtk_wed_hw *hw = hw_list[i];
|
||||
+ struct mtk_wed_device *dev = hw->wed_dev;
|
||||
+
|
||||
+ if (!dev || !dev->wlan.reset_complete)
|
||||
+ continue;
|
||||
+
|
||||
+ dev->wlan.reset_complete(dev);
|
||||
+ }
|
||||
+
|
||||
+ mutex_unlock(&hw_lock);
|
||||
+}
|
||||
+
|
||||
static struct mtk_wed_hw *
|
||||
mtk_wed_assign(struct mtk_wed_device *dev)
|
||||
{
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
|
||||
@@ -128,6 +128,8 @@ void mtk_wed_add_hw(struct device_node *
|
||||
void mtk_wed_exit(void);
|
||||
int mtk_wed_flow_add(int index);
|
||||
void mtk_wed_flow_remove(int index);
|
||||
+void mtk_wed_fe_reset(void);
|
||||
+void mtk_wed_fe_reset_complete(void);
|
||||
#else
|
||||
static inline void
|
||||
mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
|
||||
@@ -147,6 +149,13 @@ static inline void mtk_wed_flow_remove(i
|
||||
{
|
||||
}
|
||||
|
||||
+static inline void mtk_wed_fe_reset(void)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
+static inline void mtk_wed_fe_reset_complete(void)
|
||||
+{
|
||||
+}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
--- a/include/linux/soc/mediatek/mtk_wed.h
|
||||
+++ b/include/linux/soc/mediatek/mtk_wed.h
|
||||
@@ -151,6 +151,8 @@ struct mtk_wed_device {
|
||||
void (*release_rx_buf)(struct mtk_wed_device *wed);
|
||||
void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
|
||||
struct mtk_wed_wo_rx_stats *stats);
|
||||
+ int (*reset)(struct mtk_wed_device *wed);
|
||||
+ void (*reset_complete)(struct mtk_wed_device *wed);
|
||||
} wlan;
|
||||
#endif
|
||||
};
|
||||
@ -0,0 +1,106 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Mon, 5 Dec 2022 12:34:42 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: add reset to rx_ring_setup callback
|
||||
|
||||
This patch adds reset parameter to mtk_wed_rx_ring_setup signature
|
||||
in order to align rx_ring_setup callback to tx_ring_setup one introduced
|
||||
in 'commit 23dca7a90017 ("net: ethernet: mtk_wed: add reset to
|
||||
tx_ring_setup callback")'
|
||||
|
||||
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
|
||||
Link: https://lore.kernel.org/r/29c6e7a5469e784406cf3e2920351d1207713d05.1670239984.git.lorenzo@kernel.org
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -1252,7 +1252,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
|
||||
}
|
||||
|
||||
static int
|
||||
-mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
|
||||
+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
|
||||
+ bool reset)
|
||||
{
|
||||
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
|
||||
struct mtk_wed_ring *wdma;
|
||||
@@ -1261,8 +1262,8 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
|
||||
return -EINVAL;
|
||||
|
||||
wdma = &dev->tx_wdma[idx];
|
||||
- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size,
|
||||
- true))
|
||||
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
|
||||
+ desc_size, true))
|
||||
return -ENOMEM;
|
||||
|
||||
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
|
||||
@@ -1272,6 +1273,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
|
||||
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
|
||||
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
|
||||
|
||||
+ if (reset)
|
||||
+ mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true);
|
||||
+
|
||||
if (!idx) {
|
||||
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
|
||||
wdma->desc_phys);
|
||||
@@ -1611,18 +1615,20 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
|
||||
}
|
||||
|
||||
static int
|
||||
-mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
|
||||
+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
|
||||
+ bool reset)
|
||||
{
|
||||
struct mtk_wed_ring *ring = &dev->rx_ring[idx];
|
||||
|
||||
if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring)))
|
||||
return -EINVAL;
|
||||
|
||||
- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
|
||||
- sizeof(*ring->desc), false))
|
||||
+ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
|
||||
+ sizeof(*ring->desc), false))
|
||||
return -ENOMEM;
|
||||
|
||||
- if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
|
||||
+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
|
||||
+ reset))
|
||||
return -ENOMEM;
|
||||
|
||||
ring->reg_base = MTK_WED_RING_RX_DATA(idx);
|
||||
--- a/include/linux/soc/mediatek/mtk_wed.h
|
||||
+++ b/include/linux/soc/mediatek/mtk_wed.h
|
||||
@@ -162,7 +162,7 @@ struct mtk_wed_ops {
|
||||
int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
|
||||
void __iomem *regs, bool reset);
|
||||
int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
|
||||
- void __iomem *regs);
|
||||
+ void __iomem *regs, bool reset);
|
||||
int (*txfree_ring_setup)(struct mtk_wed_device *dev,
|
||||
void __iomem *regs);
|
||||
int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
|
||||
@@ -230,8 +230,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic
|
||||
(_dev)->ops->irq_get(_dev, _mask)
|
||||
#define mtk_wed_device_irq_set_mask(_dev, _mask) \
|
||||
(_dev)->ops->irq_set_mask(_dev, _mask)
|
||||
-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
|
||||
- (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
|
||||
+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
|
||||
+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
|
||||
#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
|
||||
(_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
|
||||
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
|
||||
@@ -251,7 +251,7 @@ static inline bool mtk_wed_device_active
|
||||
#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
|
||||
#define mtk_wed_device_irq_get(_dev, _mask) 0
|
||||
#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
|
||||
-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
|
||||
+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
|
||||
#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0)
|
||||
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
|
||||
#define mtk_wed_device_stop(_dev) do {} while (0)
|
||||
@ -0,0 +1,22 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Thu, 27 Oct 2022 19:50:31 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: account for vlan in rx
|
||||
header length
|
||||
|
||||
The network stack assumes that devices can handle an extra VLAN tag without
|
||||
increasing the MTU
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -29,7 +29,7 @@
|
||||
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
|
||||
#define MTK_DMA_SIZE 512
|
||||
#define MTK_MAC_COUNT 2
|
||||
-#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
|
||||
+#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + ETH_FCS_LEN)
|
||||
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
|
||||
#define MTK_DMA_DUMMY_DESC 0xffffffff
|
||||
#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
|
||||
@ -0,0 +1,143 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Thu, 27 Oct 2022 19:53:57 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: increase tx ring side for
|
||||
QDMA devices
|
||||
|
||||
In order to use the hardware traffic shaper feature, a larger tx ring is
|
||||
needed, especially for the scratch ring, which the hardware shaper uses to
|
||||
reorder packets.
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -945,7 +945,7 @@ static int mtk_init_fq_dma(struct mtk_et
|
||||
{
|
||||
const struct mtk_soc_data *soc = eth->soc;
|
||||
dma_addr_t phy_ring_tail;
|
||||
- int cnt = MTK_DMA_SIZE;
|
||||
+ int cnt = MTK_QDMA_RING_SIZE;
|
||||
dma_addr_t dma_addr;
|
||||
int i;
|
||||
|
||||
@@ -2209,19 +2209,25 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
int i, sz = soc->txrx.txd_size;
|
||||
struct mtk_tx_dma_v2 *txd;
|
||||
+ int ring_size;
|
||||
|
||||
- ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
|
||||
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
||||
+ ring_size = MTK_QDMA_RING_SIZE;
|
||||
+ else
|
||||
+ ring_size = MTK_DMA_SIZE;
|
||||
+
|
||||
+ ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
|
||||
GFP_KERNEL);
|
||||
if (!ring->buf)
|
||||
goto no_tx_mem;
|
||||
|
||||
- ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
|
||||
+ ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
||||
&ring->phys, GFP_KERNEL);
|
||||
if (!ring->dma)
|
||||
goto no_tx_mem;
|
||||
|
||||
- for (i = 0; i < MTK_DMA_SIZE; i++) {
|
||||
- int next = (i + 1) % MTK_DMA_SIZE;
|
||||
+ for (i = 0; i < ring_size; i++) {
|
||||
+ int next = (i + 1) % ring_size;
|
||||
u32 next_ptr = ring->phys + next * sz;
|
||||
|
||||
txd = ring->dma + i * sz;
|
||||
@@ -2241,22 +2247,22 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
* descriptors in ring->dma_pdma.
|
||||
*/
|
||||
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
||||
- ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
|
||||
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
||||
&ring->phys_pdma, GFP_KERNEL);
|
||||
if (!ring->dma_pdma)
|
||||
goto no_tx_mem;
|
||||
|
||||
- for (i = 0; i < MTK_DMA_SIZE; i++) {
|
||||
+ for (i = 0; i < ring_size; i++) {
|
||||
ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
|
||||
ring->dma_pdma[i].txd4 = 0;
|
||||
}
|
||||
}
|
||||
|
||||
- ring->dma_size = MTK_DMA_SIZE;
|
||||
- atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
|
||||
+ ring->dma_size = ring_size;
|
||||
+ atomic_set(&ring->free_count, ring_size - 2);
|
||||
ring->next_free = ring->dma;
|
||||
ring->last_free = (void *)txd;
|
||||
- ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
|
||||
+ ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
|
||||
ring->thresh = MAX_SKB_FRAGS;
|
||||
|
||||
/* make sure that all changes to the dma ring are flushed before we
|
||||
@@ -2268,14 +2274,14 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
|
||||
mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
|
||||
mtk_w32(eth,
|
||||
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
|
||||
+ ring->phys + ((ring_size - 1) * sz),
|
||||
soc->reg_map->qdma.crx_ptr);
|
||||
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
|
||||
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
|
||||
soc->reg_map->qdma.qtx_cfg);
|
||||
} else {
|
||||
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
|
||||
- mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
|
||||
+ mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
|
||||
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
|
||||
mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
|
||||
}
|
||||
@@ -2293,7 +2299,7 @@ static void mtk_tx_clean(struct mtk_eth
|
||||
int i;
|
||||
|
||||
if (ring->buf) {
|
||||
- for (i = 0; i < MTK_DMA_SIZE; i++)
|
||||
+ for (i = 0; i < ring->dma_size; i++)
|
||||
mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
|
||||
kfree(ring->buf);
|
||||
ring->buf = NULL;
|
||||
@@ -2301,14 +2307,14 @@ static void mtk_tx_clean(struct mtk_eth
|
||||
|
||||
if (ring->dma) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
- MTK_DMA_SIZE * soc->txrx.txd_size,
|
||||
+ ring->dma_size * soc->txrx.txd_size,
|
||||
ring->dma, ring->phys);
|
||||
ring->dma = NULL;
|
||||
}
|
||||
|
||||
if (ring->dma_pdma) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
- MTK_DMA_SIZE * soc->txrx.txd_size,
|
||||
+ ring->dma_size * soc->txrx.txd_size,
|
||||
ring->dma_pdma, ring->phys_pdma);
|
||||
ring->dma_pdma = NULL;
|
||||
}
|
||||
@@ -2830,7 +2836,7 @@ static void mtk_dma_free(struct mtk_eth
|
||||
netdev_reset_queue(eth->netdev[i]);
|
||||
if (eth->scratch_ring) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
- MTK_DMA_SIZE * soc->txrx.txd_size,
|
||||
+ MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
|
||||
eth->scratch_ring, eth->phy_scratch_ring);
|
||||
eth->scratch_ring = NULL;
|
||||
eth->phy_scratch_ring = 0;
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -27,6 +27,7 @@
|
||||
#define MTK_MAX_RX_LENGTH_2K 2048
|
||||
#define MTK_TX_DMA_BUF_LEN 0x3fff
|
||||
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
|
||||
+#define MTK_QDMA_RING_SIZE 2048
|
||||
#define MTK_DMA_SIZE 512
|
||||
#define MTK_MAC_COUNT 2
|
||||
#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + ETH_FCS_LEN)
|
||||
@ -0,0 +1,52 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Fri, 4 Nov 2022 19:49:08 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: avoid port_mg assignment on
|
||||
MT7622 and newer
|
||||
|
||||
On newer chips, this field is unused and contains some bits related to queue
|
||||
assignment. Initialize it to 0 in those cases.
|
||||
Fix offload_version on MT7621 and MT7623, which still need the previous value.
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -4484,7 +4484,7 @@ static const struct mtk_soc_data mt7621_
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
.required_clks = MT7621_CLKS_BITMAP,
|
||||
.required_pctl = false,
|
||||
- .offload_version = 2,
|
||||
+ .offload_version = 1,
|
||||
.hash_offset = 2,
|
||||
.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
|
||||
.txrx = {
|
||||
@@ -4523,7 +4523,7 @@ static const struct mtk_soc_data mt7623_
|
||||
.hw_features = MTK_HW_FEATURES,
|
||||
.required_clks = MT7623_CLKS_BITMAP,
|
||||
.required_pctl = true,
|
||||
- .offload_version = 2,
|
||||
+ .offload_version = 1,
|
||||
.hash_offset = 2,
|
||||
.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
|
||||
.txrx = {
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
@@ -175,6 +175,8 @@ int mtk_foe_entry_prepare(struct mtk_eth
|
||||
val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
|
||||
FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
|
||||
} else {
|
||||
+ int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
|
||||
+
|
||||
val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
|
||||
FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
|
||||
FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
|
||||
@@ -182,7 +184,7 @@ int mtk_foe_entry_prepare(struct mtk_eth
|
||||
entry->ib1 = val;
|
||||
|
||||
val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
|
||||
- FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
|
||||
+ FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
|
||||
FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
|
||||
}
|
||||
|
||||
@ -0,0 +1,654 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Thu, 27 Oct 2022 20:17:27 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: implement multi-queue
|
||||
support for per-port queues
|
||||
|
||||
When sending traffic to multiple ports with different link speeds, queued
|
||||
packets to one port can drown out tx to other ports.
|
||||
In order to better handle transmission to multiple ports, use the hardware
|
||||
shaper feature to implement weighted fair queueing between ports.
|
||||
Weight and maximum rate are automatically adjusted based on the link speed
|
||||
of the port.
|
||||
The first 3 queues are unrestricted and reserved for non-DSA direct tx on
|
||||
GMAC ports. The following queues are automatically assigned by the MTK DSA
|
||||
tag driver based on the target port number.
|
||||
The PPE offload code configures the queues for offloaded traffic in the same
|
||||
way.
|
||||
This feature is only supported on devices supporting QDMA. All queues still
|
||||
share the same DMA ring and descriptor pool.
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -55,6 +55,7 @@ static const struct mtk_reg_map mtk_reg_
|
||||
},
|
||||
.qdma = {
|
||||
.qtx_cfg = 0x1800,
|
||||
+ .qtx_sch = 0x1804,
|
||||
.rx_ptr = 0x1900,
|
||||
.rx_cnt_cfg = 0x1904,
|
||||
.qcrx_ptr = 0x1908,
|
||||
@@ -62,6 +63,7 @@ static const struct mtk_reg_map mtk_reg_
|
||||
.rst_idx = 0x1a08,
|
||||
.delay_irq = 0x1a0c,
|
||||
.fc_th = 0x1a10,
|
||||
+ .tx_sch_rate = 0x1a14,
|
||||
.int_grp = 0x1a20,
|
||||
.hred = 0x1a44,
|
||||
.ctx_ptr = 0x1b00,
|
||||
@@ -117,6 +119,7 @@ static const struct mtk_reg_map mt7986_r
|
||||
},
|
||||
.qdma = {
|
||||
.qtx_cfg = 0x4400,
|
||||
+ .qtx_sch = 0x4404,
|
||||
.rx_ptr = 0x4500,
|
||||
.rx_cnt_cfg = 0x4504,
|
||||
.qcrx_ptr = 0x4508,
|
||||
@@ -134,6 +137,7 @@ static const struct mtk_reg_map mt7986_r
|
||||
.fq_tail = 0x4724,
|
||||
.fq_count = 0x4728,
|
||||
.fq_blen = 0x472c,
|
||||
+ .tx_sch_rate = 0x4798,
|
||||
},
|
||||
.gdm1_cnt = 0x1c00,
|
||||
.gdma_to_ppe = 0x3333,
|
||||
@@ -620,6 +624,75 @@ static void mtk_mac_link_down(struct phy
|
||||
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
|
||||
}
|
||||
|
||||
+static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
|
||||
+ int speed)
|
||||
+{
|
||||
+ const struct mtk_soc_data *soc = eth->soc;
|
||||
+ u32 ofs, val;
|
||||
+
|
||||
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
||||
+ return;
|
||||
+
|
||||
+ val = MTK_QTX_SCH_MIN_RATE_EN |
|
||||
+ /* minimum: 10 Mbps */
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
|
||||
+ MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
|
||||
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
|
||||
+
|
||||
+ if (IS_ENABLED(CONFIG_SOC_MT7621)) {
|
||||
+ switch (speed) {
|
||||
+ case SPEED_10:
|
||||
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
|
||||
+ break;
|
||||
+ case SPEED_100:
|
||||
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
|
||||
+ break;
|
||||
+ case SPEED_1000:
|
||||
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
|
||||
+ break;
|
||||
+ default:
|
||||
+ break;
|
||||
+ }
|
||||
+ } else {
|
||||
+ switch (speed) {
|
||||
+ case SPEED_10:
|
||||
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
|
||||
+ break;
|
||||
+ case SPEED_100:
|
||||
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
|
||||
+ break;
|
||||
+ case SPEED_1000:
|
||||
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
|
||||
+ break;
|
||||
+ default:
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ ofs = MTK_QTX_OFFSET * idx;
|
||||
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
|
||||
+}
|
||||
+
|
||||
static void mtk_mac_link_up(struct phylink_config *config,
|
||||
struct phy_device *phy,
|
||||
unsigned int mode, phy_interface_t interface,
|
||||
@@ -645,6 +718,8 @@ static void mtk_mac_link_up(struct phyli
|
||||
break;
|
||||
}
|
||||
|
||||
+ mtk_set_queue_speed(mac->hw, mac->id, speed);
|
||||
+
|
||||
/* Configure duplex */
|
||||
if (duplex == DUPLEX_FULL)
|
||||
mcr |= MAC_MCR_FORCE_DPX;
|
||||
@@ -1106,7 +1181,8 @@ static void mtk_tx_set_dma_desc_v1(struc
|
||||
|
||||
WRITE_ONCE(desc->txd1, info->addr);
|
||||
|
||||
- data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
|
||||
+ data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
|
||||
+ FIELD_PREP(TX_DMA_PQID, info->qid);
|
||||
if (info->last)
|
||||
data |= TX_DMA_LS0;
|
||||
WRITE_ONCE(desc->txd3, data);
|
||||
@@ -1140,9 +1216,6 @@ static void mtk_tx_set_dma_desc_v2(struc
|
||||
data |= TX_DMA_LS0;
|
||||
WRITE_ONCE(desc->txd3, data);
|
||||
|
||||
- if (!info->qid && mac->id)
|
||||
- info->qid = MTK_QDMA_GMAC2_QID;
|
||||
-
|
||||
data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
|
||||
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
|
||||
WRITE_ONCE(desc->txd4, data);
|
||||
@@ -1186,11 +1259,12 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
.gso = gso,
|
||||
.csum = skb->ip_summed == CHECKSUM_PARTIAL,
|
||||
.vlan = skb_vlan_tag_present(skb),
|
||||
- .qid = skb->mark & MTK_QDMA_TX_MASK,
|
||||
+ .qid = skb_get_queue_mapping(skb),
|
||||
.vlan_tci = skb_vlan_tag_get(skb),
|
||||
.first = true,
|
||||
.last = !skb_is_nonlinear(skb),
|
||||
};
|
||||
+ struct netdev_queue *txq;
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
const struct mtk_soc_data *soc = eth->soc;
|
||||
@@ -1198,8 +1272,10 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
|
||||
struct mtk_tx_buf *itx_buf, *tx_buf;
|
||||
int i, n_desc = 1;
|
||||
+ int queue = skb_get_queue_mapping(skb);
|
||||
int k = 0;
|
||||
|
||||
+ txq = netdev_get_tx_queue(dev, queue);
|
||||
itxd = ring->next_free;
|
||||
itxd_pdma = qdma_to_pdma(ring, itxd);
|
||||
if (itxd == ring->last_free)
|
||||
@@ -1248,7 +1324,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
|
||||
txd_info.size = min_t(unsigned int, frag_size,
|
||||
soc->txrx.dma_max_len);
|
||||
- txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
|
||||
+ txd_info.qid = queue;
|
||||
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
|
||||
!(frag_size - txd_info.size);
|
||||
txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
|
||||
@@ -1287,7 +1363,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
txd_pdma->txd2 |= TX_DMA_LS1;
|
||||
}
|
||||
|
||||
- netdev_sent_queue(dev, skb->len);
|
||||
+ netdev_tx_sent_queue(txq, skb->len);
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
|
||||
@@ -1299,8 +1375,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
||||
wmb();
|
||||
|
||||
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
||||
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
|
||||
- !netdev_xmit_more())
|
||||
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
|
||||
mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
|
||||
} else {
|
||||
int next_idx;
|
||||
@@ -1369,7 +1444,7 @@ static void mtk_wake_queue(struct mtk_et
|
||||
for (i = 0; i < MTK_MAC_COUNT; i++) {
|
||||
if (!eth->netdev[i])
|
||||
continue;
|
||||
- netif_wake_queue(eth->netdev[i]);
|
||||
+ netif_tx_wake_all_queues(eth->netdev[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1393,7 +1468,7 @@ static netdev_tx_t mtk_start_xmit(struct
|
||||
|
||||
tx_num = mtk_cal_txd_req(eth, skb);
|
||||
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
|
||||
- netif_stop_queue(dev);
|
||||
+ netif_tx_stop_all_queues(dev);
|
||||
netif_err(eth, tx_queued, dev,
|
||||
"Tx Ring full when queue awake!\n");
|
||||
spin_unlock(ð->page_lock);
|
||||
@@ -1419,7 +1494,7 @@ static netdev_tx_t mtk_start_xmit(struct
|
||||
goto drop;
|
||||
|
||||
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
|
||||
- netif_stop_queue(dev);
|
||||
+ netif_tx_stop_all_queues(dev);
|
||||
|
||||
spin_unlock(ð->page_lock);
|
||||
|
||||
@@ -1586,10 +1661,12 @@ static int mtk_xdp_submit_frame(struct m
|
||||
struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
|
||||
const struct mtk_soc_data *soc = eth->soc;
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
+ struct mtk_mac *mac = netdev_priv(dev);
|
||||
struct mtk_tx_dma_desc_info txd_info = {
|
||||
.size = xdpf->len,
|
||||
.first = true,
|
||||
.last = !xdp_frame_has_frags(xdpf),
|
||||
+ .qid = mac->id,
|
||||
};
|
||||
int err, index = 0, n_desc = 1, nr_frags;
|
||||
struct mtk_tx_buf *htx_buf, *tx_buf;
|
||||
@@ -1639,6 +1716,7 @@ static int mtk_xdp_submit_frame(struct m
|
||||
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
|
||||
txd_info.size = skb_frag_size(&sinfo->frags[index]);
|
||||
txd_info.last = index + 1 == nr_frags;
|
||||
+ txd_info.qid = mac->id;
|
||||
data = skb_frag_address(&sinfo->frags[index]);
|
||||
|
||||
index++;
|
||||
@@ -1993,8 +2071,46 @@ rx_done:
|
||||
return done;
|
||||
}
|
||||
|
||||
+struct mtk_poll_state {
|
||||
+ struct netdev_queue *txq;
|
||||
+ unsigned int total;
|
||||
+ unsigned int done;
|
||||
+ unsigned int bytes;
|
||||
+};
|
||||
+
|
||||
+static void
|
||||
+mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
|
||||
+ struct sk_buff *skb)
|
||||
+{
|
||||
+ struct netdev_queue *txq;
|
||||
+ struct net_device *dev;
|
||||
+ unsigned int bytes = skb->len;
|
||||
+
|
||||
+ state->total++;
|
||||
+ eth->tx_packets++;
|
||||
+ eth->tx_bytes += bytes;
|
||||
+
|
||||
+ dev = eth->netdev[mac];
|
||||
+ if (!dev)
|
||||
+ return;
|
||||
+
|
||||
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
||||
+ if (state->txq == txq) {
|
||||
+ state->done++;
|
||||
+ state->bytes += bytes;
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ if (state->txq)
|
||||
+ netdev_tx_completed_queue(state->txq, state->done, state->bytes);
|
||||
+
|
||||
+ state->txq = txq;
|
||||
+ state->done = 1;
|
||||
+ state->bytes = bytes;
|
||||
+}
|
||||
+
|
||||
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
|
||||
- unsigned int *done, unsigned int *bytes)
|
||||
+ struct mtk_poll_state *state)
|
||||
{
|
||||
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
@@ -2026,12 +2142,9 @@ static int mtk_poll_tx_qdma(struct mtk_e
|
||||
break;
|
||||
|
||||
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
|
||||
- if (tx_buf->type == MTK_TYPE_SKB) {
|
||||
- struct sk_buff *skb = tx_buf->data;
|
||||
+ if (tx_buf->type == MTK_TYPE_SKB)
|
||||
+ mtk_poll_tx_done(eth, state, mac, tx_buf->data);
|
||||
|
||||
- bytes[mac] += skb->len;
|
||||
- done[mac]++;
|
||||
- }
|
||||
budget--;
|
||||
}
|
||||
mtk_tx_unmap(eth, tx_buf, &bq, true);
|
||||
@@ -2050,7 +2163,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
|
||||
}
|
||||
|
||||
static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
|
||||
- unsigned int *done, unsigned int *bytes)
|
||||
+ struct mtk_poll_state *state)
|
||||
{
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
struct mtk_tx_buf *tx_buf;
|
||||
@@ -2068,12 +2181,8 @@ static int mtk_poll_tx_pdma(struct mtk_e
|
||||
break;
|
||||
|
||||
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
|
||||
- if (tx_buf->type == MTK_TYPE_SKB) {
|
||||
- struct sk_buff *skb = tx_buf->data;
|
||||
-
|
||||
- bytes[0] += skb->len;
|
||||
- done[0]++;
|
||||
- }
|
||||
+ if (tx_buf->type == MTK_TYPE_SKB)
|
||||
+ mtk_poll_tx_done(eth, state, 0, tx_buf->data);
|
||||
budget--;
|
||||
}
|
||||
mtk_tx_unmap(eth, tx_buf, &bq, true);
|
||||
@@ -2095,26 +2204,15 @@ static int mtk_poll_tx(struct mtk_eth *e
|
||||
{
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
struct dim_sample dim_sample = {};
|
||||
- unsigned int done[MTK_MAX_DEVS];
|
||||
- unsigned int bytes[MTK_MAX_DEVS];
|
||||
- int total = 0, i;
|
||||
-
|
||||
- memset(done, 0, sizeof(done));
|
||||
- memset(bytes, 0, sizeof(bytes));
|
||||
+ struct mtk_poll_state state = {};
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
- budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
|
||||
+ budget = mtk_poll_tx_qdma(eth, budget, &state);
|
||||
else
|
||||
- budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
|
||||
+ budget = mtk_poll_tx_pdma(eth, budget, &state);
|
||||
|
||||
- for (i = 0; i < MTK_MAC_COUNT; i++) {
|
||||
- if (!eth->netdev[i] || !done[i])
|
||||
- continue;
|
||||
- netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
|
||||
- total += done[i];
|
||||
- eth->tx_packets += done[i];
|
||||
- eth->tx_bytes += bytes[i];
|
||||
- }
|
||||
+ if (state.txq)
|
||||
+ netdev_tx_completed_queue(state.txq, state.done, state.bytes);
|
||||
|
||||
dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
|
||||
&dim_sample);
|
||||
@@ -2124,7 +2222,7 @@ static int mtk_poll_tx(struct mtk_eth *e
|
||||
(atomic_read(&ring->free_count) > ring->thresh))
|
||||
mtk_wake_queue(eth);
|
||||
|
||||
- return total;
|
||||
+ return state.total;
|
||||
}
|
||||
|
||||
static void mtk_handle_status_irq(struct mtk_eth *eth)
|
||||
@@ -2210,6 +2308,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
int i, sz = soc->txrx.txd_size;
|
||||
struct mtk_tx_dma_v2 *txd;
|
||||
int ring_size;
|
||||
+ u32 ofs, val;
|
||||
|
||||
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
||||
ring_size = MTK_QDMA_RING_SIZE;
|
||||
@@ -2277,8 +2376,25 @@ static int mtk_tx_alloc(struct mtk_eth *
|
||||
ring->phys + ((ring_size - 1) * sz),
|
||||
soc->reg_map->qdma.crx_ptr);
|
||||
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
|
||||
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
|
||||
- soc->reg_map->qdma.qtx_cfg);
|
||||
+
|
||||
+ for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
|
||||
+ val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
|
||||
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
|
||||
+
|
||||
+ val = MTK_QTX_SCH_MIN_RATE_EN |
|
||||
+ /* minimum: 10 Mbps */
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
|
||||
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
|
||||
+ MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
|
||||
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
|
||||
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
|
||||
+ ofs += MTK_QTX_OFFSET;
|
||||
+ }
|
||||
+ val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
|
||||
+ mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
+ mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
|
||||
} else {
|
||||
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
|
||||
mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
|
||||
@@ -2960,7 +3076,7 @@ static int mtk_start_dma(struct mtk_eth
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
|
||||
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
|
||||
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
|
||||
- MTK_CHK_DDONE_EN;
|
||||
+ MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
|
||||
else
|
||||
val |= MTK_RX_BT_32DWORDS;
|
||||
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
|
||||
@@ -3006,6 +3122,45 @@ static void mtk_gdm_config(struct mtk_et
|
||||
mtk_w32(eth, 0, MTK_RST_GL);
|
||||
}
|
||||
|
||||
+static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
|
||||
+{
|
||||
+ struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
|
||||
+ struct mtk_eth *eth = mac->hw;
|
||||
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
+ struct ethtool_link_ksettings s;
|
||||
+ struct net_device *ldev;
|
||||
+ struct list_head *iter;
|
||||
+ struct dsa_port *dp;
|
||||
+
|
||||
+ if (event != NETDEV_CHANGE)
|
||||
+ return NOTIFY_DONE;
|
||||
+
|
||||
+ netdev_for_each_lower_dev(dev, ldev, iter) {
|
||||
+ if (netdev_priv(ldev) == mac)
|
||||
+ goto found;
|
||||
+ }
|
||||
+
|
||||
+ return NOTIFY_DONE;
|
||||
+
|
||||
+found:
|
||||
+ if (!dsa_slave_dev_check(dev))
|
||||
+ return NOTIFY_DONE;
|
||||
+
|
||||
+ if (__ethtool_get_link_ksettings(dev, &s))
|
||||
+ return NOTIFY_DONE;
|
||||
+
|
||||
+ if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
|
||||
+ return NOTIFY_DONE;
|
||||
+
|
||||
+ dp = dsa_port_from_netdev(dev);
|
||||
+ if (dp->index >= MTK_QDMA_NUM_QUEUES)
|
||||
+ return NOTIFY_DONE;
|
||||
+
|
||||
+ mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
|
||||
+
|
||||
+ return NOTIFY_DONE;
|
||||
+}
|
||||
+
|
||||
static int mtk_open(struct net_device *dev)
|
||||
{
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
@@ -3048,7 +3203,8 @@ static int mtk_open(struct net_device *d
|
||||
refcount_inc(ð->dma_refcnt);
|
||||
|
||||
phylink_start(mac->phylink);
|
||||
- netif_start_queue(dev);
|
||||
+ netif_tx_start_all_queues(dev);
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3774,8 +3930,12 @@ static int mtk_unreg_dev(struct mtk_eth
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MTK_MAC_COUNT; i++) {
|
||||
+ struct mtk_mac *mac;
|
||||
if (!eth->netdev[i])
|
||||
continue;
|
||||
+ mac = netdev_priv(eth->netdev[i]);
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
+ unregister_netdevice_notifier(&mac->device_notifier);
|
||||
unregister_netdev(eth->netdev[i]);
|
||||
}
|
||||
|
||||
@@ -3992,6 +4152,23 @@ static int mtk_set_rxnfc(struct net_devi
|
||||
return ret;
|
||||
}
|
||||
|
||||
+static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
+ struct net_device *sb_dev)
|
||||
+{
|
||||
+ struct mtk_mac *mac = netdev_priv(dev);
|
||||
+ unsigned int queue = 0;
|
||||
+
|
||||
+ if (netdev_uses_dsa(dev))
|
||||
+ queue = skb_get_queue_mapping(skb) + 3;
|
||||
+ else
|
||||
+ queue = mac->id;
|
||||
+
|
||||
+ if (queue >= dev->num_tx_queues)
|
||||
+ queue = 0;
|
||||
+
|
||||
+ return queue;
|
||||
+}
|
||||
+
|
||||
static const struct ethtool_ops mtk_ethtool_ops = {
|
||||
.get_link_ksettings = mtk_get_link_ksettings,
|
||||
.set_link_ksettings = mtk_set_link_ksettings,
|
||||
@@ -4027,6 +4204,7 @@ static const struct net_device_ops mtk_n
|
||||
.ndo_setup_tc = mtk_eth_setup_tc,
|
||||
.ndo_bpf = mtk_xdp,
|
||||
.ndo_xdp_xmit = mtk_xdp_xmit,
|
||||
+ .ndo_select_queue = mtk_select_queue,
|
||||
};
|
||||
|
||||
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
||||
@@ -4036,6 +4214,7 @@ static int mtk_add_mac(struct mtk_eth *e
|
||||
struct phylink *phylink;
|
||||
struct mtk_mac *mac;
|
||||
int id, err;
|
||||
+ int txqs = 1;
|
||||
|
||||
if (!_id) {
|
||||
dev_err(eth->dev, "missing mac id\n");
|
||||
@@ -4053,7 +4232,10 @@ static int mtk_add_mac(struct mtk_eth *e
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
- eth->netdev[id] = alloc_etherdev(sizeof(*mac));
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
+ txqs = MTK_QDMA_NUM_QUEUES;
|
||||
+
|
||||
+ eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
|
||||
if (!eth->netdev[id]) {
|
||||
dev_err(eth->dev, "alloc_etherdev failed\n");
|
||||
return -ENOMEM;
|
||||
@@ -4150,6 +4332,11 @@ static int mtk_add_mac(struct mtk_eth *e
|
||||
else
|
||||
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
|
||||
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
||||
+ mac->device_notifier.notifier_call = mtk_device_event;
|
||||
+ register_netdevice_notifier(&mac->device_notifier);
|
||||
+ }
|
||||
+
|
||||
return 0;
|
||||
|
||||
free_netdev:
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <linux/bpf_trace.h>
|
||||
#include "mtk_ppe.h"
|
||||
|
||||
+#define MTK_QDMA_NUM_QUEUES 16
|
||||
#define MTK_QDMA_PAGE_SIZE 2048
|
||||
#define MTK_MAX_RX_LENGTH 1536
|
||||
#define MTK_MAX_RX_LENGTH_2K 2048
|
||||
@@ -216,8 +217,26 @@
|
||||
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
|
||||
|
||||
/* QDMA TX Queue Configuration Registers */
|
||||
+#define MTK_QTX_OFFSET 0x10
|
||||
#define QDMA_RES_THRES 4
|
||||
|
||||
+/* QDMA Tx Queue Scheduler Configuration Registers */
|
||||
+#define MTK_QTX_SCH_TX_SEL BIT(31)
|
||||
+#define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30)
|
||||
+
|
||||
+#define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30)
|
||||
+#define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28)
|
||||
+#define MTK_QTX_SCH_MIN_RATE_EN BIT(27)
|
||||
+#define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20)
|
||||
+#define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16)
|
||||
+#define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12)
|
||||
+#define MTK_QTX_SCH_MAX_RATE_EN BIT(11)
|
||||
+#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
|
||||
+#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
|
||||
+
|
||||
+/* QDMA TX Scheduler Rate Control Register */
|
||||
+#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
|
||||
+
|
||||
/* QDMA Global Configuration Register */
|
||||
#define MTK_RX_2B_OFFSET BIT(31)
|
||||
#define MTK_RX_BT_32DWORDS (3 << 11)
|
||||
@@ -236,6 +255,7 @@
|
||||
#define MTK_WCOMP_EN BIT(24)
|
||||
#define MTK_RESV_BUF (0x40 << 16)
|
||||
#define MTK_MUTLI_CNT (0x4 << 12)
|
||||
+#define MTK_LEAKY_BUCKET_EN BIT(11)
|
||||
|
||||
/* QDMA Flow Control Register */
|
||||
#define FC_THRES_DROP_MODE BIT(20)
|
||||
@@ -266,8 +286,6 @@
|
||||
#define MTK_STAT_OFFSET 0x40
|
||||
|
||||
/* QDMA TX NUM */
|
||||
-#define MTK_QDMA_TX_NUM 16
|
||||
-#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
|
||||
#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
|
||||
#define MTK_QDMA_GMAC2_QID 8
|
||||
|
||||
@@ -297,6 +315,7 @@
|
||||
#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
|
||||
#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
|
||||
#define TX_DMA_SWC BIT(14)
|
||||
+#define TX_DMA_PQID GENMASK(3, 0)
|
||||
|
||||
/* PDMA on MT7628 */
|
||||
#define TX_DMA_DONE BIT(31)
|
||||
@@ -957,6 +976,7 @@ struct mtk_reg_map {
|
||||
} pdma;
|
||||
struct {
|
||||
u32 qtx_cfg; /* tx queue configuration */
|
||||
+ u32 qtx_sch; /* tx queue scheduler configuration */
|
||||
u32 rx_ptr; /* rx base pointer */
|
||||
u32 rx_cnt_cfg; /* rx max count configuration */
|
||||
u32 qcrx_ptr; /* rx cpu pointer */
|
||||
@@ -974,6 +994,7 @@ struct mtk_reg_map {
|
||||
u32 fq_tail; /* fq tail pointer */
|
||||
u32 fq_count; /* fq free page count */
|
||||
u32 fq_blen; /* fq free page buffer length */
|
||||
+ u32 tx_sch_rate; /* tx scheduler rate control registers */
|
||||
} qdma;
|
||||
u32 gdm1_cnt;
|
||||
u32 gdma_to_ppe;
|
||||
@@ -1177,6 +1198,7 @@ struct mtk_mac {
|
||||
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
|
||||
int hwlro_ip_cnt;
|
||||
unsigned int syscfg0;
|
||||
+ struct notifier_block device_notifier;
|
||||
};
|
||||
|
||||
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
|
||||
@ -0,0 +1,20 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Fri, 28 Oct 2022 18:16:03 +0200
|
||||
Subject: [PATCH] net: dsa: tag_mtk: assign per-port queues
|
||||
|
||||
Keeps traffic sent to the switch within link speed limits
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/net/dsa/tag_mtk.c
|
||||
+++ b/net/dsa/tag_mtk.c
|
||||
@@ -25,6 +25,8 @@ static struct sk_buff *mtk_tag_xmit(stru
|
||||
u8 xmit_tpid;
|
||||
u8 *mtk_tag;
|
||||
|
||||
+ skb_set_queue_mapping(skb, dp->index);
|
||||
+
|
||||
/* Build the special tag after the MAC Source Address. If VLAN header
|
||||
* is present, it's required that VLAN header and special tag is
|
||||
* being combined. Only in this way we can allow the switch can parse
|
||||
@ -0,0 +1,93 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Thu, 3 Nov 2022 17:49:44 +0100
|
||||
Subject: [PATCH] net: ethernet: mediatek: ppe: assign per-port queues
|
||||
for offloaded traffic
|
||||
|
||||
Keeps traffic sent to the switch within link speed limits
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
@@ -399,6 +399,24 @@ int mtk_foe_entry_set_wdma(struct mtk_et
|
||||
return 0;
|
||||
}
|
||||
|
||||
+int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
|
||||
+ unsigned int queue)
|
||||
+{
|
||||
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
|
||||
+
|
||||
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
+ *ib2 &= ~MTK_FOE_IB2_QID_V2;
|
||||
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
|
||||
+ *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
|
||||
+ } else {
|
||||
+ *ib2 &= ~MTK_FOE_IB2_QID;
|
||||
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
|
||||
+ *ib2 |= MTK_FOE_IB2_PSE_QOS;
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
static bool
|
||||
mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
|
||||
struct mtk_foe_entry *data)
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||||
@@ -68,7 +68,9 @@ enum {
|
||||
#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
|
||||
|
||||
/* CONFIG_MEDIATEK_NETSYS_V2 */
|
||||
+#define MTK_FOE_IB2_QID_V2 GENMASK(6, 0)
|
||||
#define MTK_FOE_IB2_PORT_MG_V2 BIT(7)
|
||||
+#define MTK_FOE_IB2_PSE_QOS_V2 BIT(8)
|
||||
#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9)
|
||||
#define MTK_FOE_IB2_MULTICAST_V2 BIT(13)
|
||||
#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19)
|
||||
@@ -351,6 +353,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_e
|
||||
int sid);
|
||||
int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
|
||||
int wdma_idx, int txq, int bss, int wcid);
|
||||
+int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
|
||||
+ unsigned int queue);
|
||||
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
|
||||
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
|
||||
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
||||
@@ -188,7 +188,7 @@ mtk_flow_set_output_device(struct mtk_et
|
||||
int *wed_index)
|
||||
{
|
||||
struct mtk_wdma_info info = {};
|
||||
- int pse_port, dsa_port;
|
||||
+ int pse_port, dsa_port, queue;
|
||||
|
||||
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
|
||||
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
|
||||
@@ -212,8 +212,6 @@ mtk_flow_set_output_device(struct mtk_et
|
||||
}
|
||||
|
||||
dsa_port = mtk_flow_get_dsa_port(&dev);
|
||||
- if (dsa_port >= 0)
|
||||
- mtk_foe_entry_set_dsa(eth, foe, dsa_port);
|
||||
|
||||
if (dev == eth->netdev[0])
|
||||
pse_port = 1;
|
||||
@@ -222,6 +220,14 @@ mtk_flow_set_output_device(struct mtk_et
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
+ if (dsa_port >= 0) {
|
||||
+ mtk_foe_entry_set_dsa(eth, foe, dsa_port);
|
||||
+ queue = 3 + dsa_port;
|
||||
+ } else {
|
||||
+ queue = pse_port - 1;
|
||||
+ }
|
||||
+ mtk_foe_entry_set_queue(eth, foe, queue);
|
||||
+
|
||||
out:
|
||||
mtk_foe_entry_set_pse_port(eth, foe, pse_port);
|
||||
|
||||
@ -0,0 +1,28 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Thu, 27 Oct 2022 23:39:52 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: compile out netsys v2 code
|
||||
on mt7621
|
||||
|
||||
Avoid some branches in the hot path on low-end devices with limited CPU power,
|
||||
and reduce code size
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -921,7 +921,13 @@ enum mkt_eth_capabilities {
|
||||
#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
|
||||
(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
|
||||
|
||||
-#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
|
||||
+#ifdef CONFIG_SOC_MT7621
|
||||
+#define MTK_CAP_MASK MTK_NETSYS_V2
|
||||
+#else
|
||||
+#define MTK_CAP_MASK 0
|
||||
+#endif
|
||||
+
|
||||
+#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x) & ~(MTK_CAP_MASK)) == (_x))
|
||||
|
||||
#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
|
||||
MTK_GMAC2_RGMII | MTK_SHARED_INT | \
|
||||
@ -0,0 +1,72 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Tue, 8 Nov 2022 15:03:15 +0100
|
||||
Subject: [PATCH] net: dsa: add support for DSA rx offloading via
|
||||
metadata dst
|
||||
|
||||
If a metadata dst is present with the type METADATA_HW_PORT_MUX on a dsa cpu
|
||||
port netdev, assume that it carries the port number and that there is no DSA
|
||||
tag present in the skb data.
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/net/core/flow_dissector.c
|
||||
+++ b/net/core/flow_dissector.c
|
||||
@@ -971,12 +971,14 @@ bool __skb_flow_dissect(const struct net
|
||||
#if IS_ENABLED(CONFIG_NET_DSA)
|
||||
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
|
||||
proto == htons(ETH_P_XDSA))) {
|
||||
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
|
||||
const struct dsa_device_ops *ops;
|
||||
int offset = 0;
|
||||
|
||||
ops = skb->dev->dsa_ptr->tag_ops;
|
||||
/* Only DSA header taggers break flow dissection */
|
||||
- if (ops->needed_headroom) {
|
||||
+ if (ops->needed_headroom &&
|
||||
+ (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)) {
|
||||
if (ops->flow_dissect)
|
||||
ops->flow_dissect(skb, &proto, &offset);
|
||||
else
|
||||
--- a/net/dsa/dsa.c
|
||||
+++ b/net/dsa/dsa.c
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/ptp_classify.h>
|
||||
+#include <net/dst_metadata.h>
|
||||
|
||||
#include "dsa_priv.h"
|
||||
|
||||
@@ -216,6 +217,7 @@ static bool dsa_skb_defer_rx_timestamp(s
|
||||
static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
struct packet_type *pt, struct net_device *unused)
|
||||
{
|
||||
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
|
||||
struct dsa_port *cpu_dp = dev->dsa_ptr;
|
||||
struct sk_buff *nskb = NULL;
|
||||
struct dsa_slave_priv *p;
|
||||
@@ -229,7 +231,22 @@ static int dsa_switch_rcv(struct sk_buff
|
||||
if (!skb)
|
||||
return 0;
|
||||
|
||||
- nskb = cpu_dp->rcv(skb, dev);
|
||||
+ if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) {
|
||||
+ unsigned int port = md_dst->u.port_info.port_id;
|
||||
+
|
||||
+ skb_dst_drop(skb);
|
||||
+ if (!skb_has_extensions(skb))
|
||||
+ skb->slow_gro = 0;
|
||||
+
|
||||
+ skb->dev = dsa_master_find_slave(dev, 0, port);
|
||||
+ if (likely(skb->dev)) {
|
||||
+ dsa_default_offload_fwd_mark(skb);
|
||||
+ nskb = skb;
|
||||
+ }
|
||||
+ } else {
|
||||
+ nskb = cpu_dp->rcv(skb, dev);
|
||||
+ }
|
||||
+
|
||||
if (!nskb) {
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
@ -0,0 +1,192 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Fri, 28 Oct 2022 11:01:12 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: fix VLAN rx hardware
|
||||
acceleration
|
||||
|
||||
- enable VLAN untagging for PDMA rx
|
||||
- make it possible to disable the feature via ethtool
|
||||
- pass VLAN tag to the DSA driver
|
||||
- untag special tag on PDMA only if no non-DSA devices are in use
|
||||
- disable special tag untagging on 7986 for now, since it's not working yet
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <net/dsa.h>
|
||||
+#include <net/dst_metadata.h>
|
||||
|
||||
#include "mtk_eth_soc.h"
|
||||
#include "mtk_wed.h"
|
||||
@@ -2022,16 +2023,22 @@ static int mtk_poll_rx(struct napi_struc
|
||||
htons(RX_DMA_VPID(trxd.rxd4)),
|
||||
RX_DMA_VID(trxd.rxd4));
|
||||
} else if (trxd.rxd2 & RX_DMA_VTAG) {
|
||||
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||
+ __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
|
||||
RX_DMA_VID(trxd.rxd3));
|
||||
}
|
||||
+ }
|
||||
+
|
||||
+ /* When using VLAN untagging in combination with DSA, the
|
||||
+ * hardware treats the MTK special tag as a VLAN and untags it.
|
||||
+ */
|
||||
+ if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
|
||||
+ unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
|
||||
|
||||
- /* If the device is attached to a dsa switch, the special
|
||||
- * tag inserted in VLAN field by hw switch can * be offloaded
|
||||
- * by RX HW VLAN offload. Clear vlan info.
|
||||
- */
|
||||
- if (netdev_uses_dsa(netdev))
|
||||
- __vlan_hwaccel_clear_tag(skb);
|
||||
+ if (port < ARRAY_SIZE(eth->dsa_meta) &&
|
||||
+ eth->dsa_meta[port])
|
||||
+ skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
|
||||
+
|
||||
+ __vlan_hwaccel_clear_tag(skb);
|
||||
}
|
||||
|
||||
skb_record_rx_queue(skb, 0);
|
||||
@@ -2856,15 +2863,30 @@ static netdev_features_t mtk_fix_feature
|
||||
|
||||
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
|
||||
{
|
||||
- int err = 0;
|
||||
+ struct mtk_mac *mac = netdev_priv(dev);
|
||||
+ struct mtk_eth *eth = mac->hw;
|
||||
+ netdev_features_t diff = dev->features ^ features;
|
||||
+ int i;
|
||||
+
|
||||
+ if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
|
||||
+ mtk_hwlro_netdev_disable(dev);
|
||||
|
||||
- if (!((dev->features ^ features) & NETIF_F_LRO))
|
||||
+ /* Set RX VLAN offloading */
|
||||
+ if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
|
||||
return 0;
|
||||
|
||||
- if (!(features & NETIF_F_LRO))
|
||||
- mtk_hwlro_netdev_disable(dev);
|
||||
+ mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
|
||||
+ MTK_CDMP_EG_CTRL);
|
||||
|
||||
- return err;
|
||||
+ /* sync features with other MAC */
|
||||
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
|
||||
+ if (!eth->netdev[i] || eth->netdev[i] == dev)
|
||||
+ continue;
|
||||
+ eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
|
||||
+ eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
}
|
||||
|
||||
/* wait for DMA to finish whatever it is doing before we start using it again */
|
||||
@@ -3161,11 +3183,45 @@ found:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
+static bool mtk_uses_dsa(struct net_device *dev)
|
||||
+{
|
||||
+#if IS_ENABLED(CONFIG_NET_DSA)
|
||||
+ return netdev_uses_dsa(dev) &&
|
||||
+ dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
|
||||
+#else
|
||||
+ return false;
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
static int mtk_open(struct net_device *dev)
|
||||
{
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
- int err;
|
||||
+ int i, err;
|
||||
+
|
||||
+ if (mtk_uses_dsa(dev) && !eth->prog) {
|
||||
+ for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
|
||||
+ struct metadata_dst *md_dst = eth->dsa_meta[i];
|
||||
+
|
||||
+ if (md_dst)
|
||||
+ continue;
|
||||
+
|
||||
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
|
||||
+ GFP_KERNEL);
|
||||
+ if (!md_dst)
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ md_dst->u.port_info.port_id = i;
|
||||
+ eth->dsa_meta[i] = md_dst;
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* Hardware special tag parsing needs to be disabled if at least
|
||||
+ * one MAC does not use DSA.
|
||||
+ */
|
||||
+ u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
|
||||
+ val &= ~MTK_CDMP_STAG_EN;
|
||||
+ mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
|
||||
+ }
|
||||
|
||||
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
|
||||
if (err) {
|
||||
@@ -3686,6 +3742,10 @@ static int mtk_hw_init(struct mtk_eth *e
|
||||
*/
|
||||
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
|
||||
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
|
||||
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
+ val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
|
||||
+ mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
|
||||
+ }
|
||||
|
||||
/* Enable RX VLan Offloading */
|
||||
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
|
||||
@@ -3922,6 +3982,12 @@ static int mtk_free_dev(struct mtk_eth *
|
||||
free_netdev(eth->netdev[i]);
|
||||
}
|
||||
|
||||
+ for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
|
||||
+ if (!eth->dsa_meta[i])
|
||||
+ break;
|
||||
+ metadata_dst_free(eth->dsa_meta[i]);
|
||||
+ }
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -22,6 +22,9 @@
|
||||
#include <linux/bpf_trace.h>
|
||||
#include "mtk_ppe.h"
|
||||
|
||||
+#define MTK_MAX_DSA_PORTS 7
|
||||
+#define MTK_DSA_PORT_MASK GENMASK(2, 0)
|
||||
+
|
||||
#define MTK_QDMA_NUM_QUEUES 16
|
||||
#define MTK_QDMA_PAGE_SIZE 2048
|
||||
#define MTK_MAX_RX_LENGTH 1536
|
||||
@@ -105,6 +108,9 @@
|
||||
#define MTK_CDMQ_IG_CTRL 0x1400
|
||||
#define MTK_CDMQ_STAG_EN BIT(0)
|
||||
|
||||
+/* CDMQ Exgress Control Register */
|
||||
+#define MTK_CDMQ_EG_CTRL 0x1404
|
||||
+
|
||||
/* CDMP Ingress Control Register */
|
||||
#define MTK_CDMP_IG_CTRL 0x400
|
||||
#define MTK_CDMP_STAG_EN BIT(0)
|
||||
@@ -1170,6 +1176,8 @@ struct mtk_eth {
|
||||
|
||||
int ip_align;
|
||||
|
||||
+ struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
|
||||
+
|
||||
struct mtk_ppe *ppe[2];
|
||||
struct rhashtable flow_table;
|
||||
|
||||
@ -0,0 +1,42 @@
|
||||
From: =?UTF-8?q?Ar=C4=B1n=C3=A7=20=C3=9CNAL?= <arinc.unal@arinc9.com>
|
||||
Date: Sat, 28 Jan 2023 12:42:32 +0300
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: disable hardware DSA untagging
|
||||
for second MAC
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
According to my tests on MT7621AT and MT7623NI SoCs, hardware DSA untagging
|
||||
won't work on the second MAC. Therefore, disable this feature when the
|
||||
second MAC of the MT7621 and MT7623 SoCs is being used.
|
||||
|
||||
Fixes: 2d7605a72906 ("net: ethernet: mtk_eth_soc: enable hardware DSA untagging")
|
||||
Link: https://lore.kernel.org/netdev/6249fc14-b38a-c770-36b4-5af6d41c21d3@arinc9.com/
|
||||
Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
|
||||
Signed-off-by: Arınç ÜNAL <arinc.unal@arinc9.com>
|
||||
Link: https://lore.kernel.org/r/20230128094232.2451947-1-arinc.unal@arinc9.com
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -3199,7 +3199,8 @@ static int mtk_open(struct net_device *d
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
int i, err;
|
||||
|
||||
- if (mtk_uses_dsa(dev) && !eth->prog) {
|
||||
+ if ((mtk_uses_dsa(dev) && !eth->prog) &&
|
||||
+ !(mac->id == 1 && MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_TRGMII))) {
|
||||
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
|
||||
struct metadata_dst *md_dst = eth->dsa_meta[i];
|
||||
|
||||
@@ -3216,7 +3217,8 @@ static int mtk_open(struct net_device *d
|
||||
}
|
||||
} else {
|
||||
/* Hardware special tag parsing needs to be disabled if at least
|
||||
- * one MAC does not use DSA.
|
||||
+ * one MAC does not use DSA, or the second MAC of the MT7621 and
|
||||
+ * MT7623 SoCs is being used.
|
||||
*/
|
||||
u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
|
||||
val &= ~MTK_CDMP_STAG_EN;
|
||||
@ -0,0 +1,54 @@
|
||||
From: =?UTF-8?q?Ar=C4=B1n=C3=A7=20=C3=9CNAL?= <arinc.unal@arinc9.com>
|
||||
Date: Sun, 5 Feb 2023 20:53:31 +0300
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: enable special tag when any MAC
|
||||
uses DSA
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
The special tag is only enabled when the first MAC uses DSA. However, it
|
||||
must be enabled when any MAC uses DSA. Change the check accordingly.
|
||||
|
||||
This fixes hardware DSA untagging not working on the second MAC of the
|
||||
MT7621 and MT7623 SoCs, and likely other SoCs too. Therefore, remove the
|
||||
check that disables hardware DSA untagging for the second MAC of the MT7621
|
||||
and MT7623 SoCs.
|
||||
|
||||
Fixes: a1f47752fd62 ("net: ethernet: mtk_eth_soc: disable hardware DSA untagging for second MAC")
|
||||
Co-developed-by: Richard van Schagen <richard@routerhints.com>
|
||||
Signed-off-by: Richard van Schagen <richard@routerhints.com>
|
||||
Signed-off-by: Arınç ÜNAL <arinc.unal@arinc9.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -3134,7 +3134,7 @@ static void mtk_gdm_config(struct mtk_et
|
||||
|
||||
val |= config;
|
||||
|
||||
- if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
|
||||
+ if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
|
||||
val |= MTK_GDMA_SPECIAL_TAG;
|
||||
|
||||
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
|
||||
@@ -3199,8 +3199,7 @@ static int mtk_open(struct net_device *d
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
int i, err;
|
||||
|
||||
- if ((mtk_uses_dsa(dev) && !eth->prog) &&
|
||||
- !(mac->id == 1 && MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_TRGMII))) {
|
||||
+ if (mtk_uses_dsa(dev) && !eth->prog) {
|
||||
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
|
||||
struct metadata_dst *md_dst = eth->dsa_meta[i];
|
||||
|
||||
@@ -3217,8 +3216,7 @@ static int mtk_open(struct net_device *d
|
||||
}
|
||||
} else {
|
||||
/* Hardware special tag parsing needs to be disabled if at least
|
||||
- * one MAC does not use DSA, or the second MAC of the MT7621 and
|
||||
- * MT7623 SoCs is being used.
|
||||
+ * one MAC does not use DSA.
|
||||
*/
|
||||
u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
|
||||
val &= ~MTK_CDMP_STAG_EN;
|
||||
@ -0,0 +1,129 @@
|
||||
From: Vladimir Oltean <vladimir.oltean@nxp.com>
|
||||
Date: Tue, 7 Feb 2023 12:30:27 +0200
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: fix DSA TX tag hwaccel for switch
|
||||
port 0
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Arınç reports that on his MT7621AT Unielec U7621-06 board and MT7623NI
|
||||
Bananapi BPI-R2, packets received by the CPU over mt7530 switch port 0
|
||||
(of which this driver acts as the DSA master) are not processed
|
||||
correctly by software. More precisely, they arrive without a DSA tag
|
||||
(in packet or in the hwaccel area - skb_metadata_dst()), so DSA cannot
|
||||
demux them towards the switch's interface for port 0. Traffic from other
|
||||
ports receives a skb_metadata_dst() with the correct port and is demuxed
|
||||
properly.
|
||||
|
||||
Looking at mtk_poll_rx(), it becomes apparent that this driver uses the
|
||||
skb vlan hwaccel area:
|
||||
|
||||
union {
|
||||
u32 vlan_all;
|
||||
struct {
|
||||
__be16 vlan_proto;
|
||||
__u16 vlan_tci;
|
||||
};
|
||||
};
|
||||
|
||||
as a temporary storage for the VLAN hwaccel tag, or the DSA hwaccel tag.
|
||||
If this is a DSA master it's a DSA hwaccel tag, and finally clears up
|
||||
the skb VLAN hwaccel header.
|
||||
|
||||
I'm guessing that the problem is the (mis)use of API.
|
||||
skb_vlan_tag_present() looks like this:
|
||||
|
||||
#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
|
||||
|
||||
So if both vlan_proto and vlan_tci are zeroes, skb_vlan_tag_present()
|
||||
returns precisely false. I don't know for sure what is the format of the
|
||||
DSA hwaccel tag, but I surely know that lowermost 3 bits of vlan_proto
|
||||
are 0 when receiving from port 0:
|
||||
|
||||
unsigned int port = vlan_proto & GENMASK(2, 0);
|
||||
|
||||
If the RX descriptor has no other bits set to non-zero values in
|
||||
RX_DMA_VTAG, then the call to __vlan_hwaccel_put_tag() will not, in
|
||||
fact, make the subsequent skb_vlan_tag_present() return true, because
|
||||
it's implemented like this:
|
||||
|
||||
static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
|
||||
__be16 vlan_proto, u16 vlan_tci)
|
||||
{
|
||||
skb->vlan_proto = vlan_proto;
|
||||
skb->vlan_tci = vlan_tci;
|
||||
}
|
||||
|
||||
What we need to do to fix this problem (assuming this is the problem) is
|
||||
to stop using skb->vlan_all as temporary storage for driver affairs, and
|
||||
just create some local variables that serve the same purpose, but
|
||||
hopefully better. Instead of calling skb_vlan_tag_present(), let's look
|
||||
at a boolean has_hwaccel_tag which we set to true when the RX DMA
|
||||
descriptors have something. Disambiguate based on netdev_uses_dsa()
|
||||
whether this is a VLAN or DSA hwaccel tag, and only call
|
||||
__vlan_hwaccel_put_tag() if we're certain it's a VLAN tag.
|
||||
|
||||
Arınç confirms that the treatment works, so this validates the
|
||||
assumption.
|
||||
|
||||
Link: https://lore.kernel.org/netdev/704f3a72-fc9e-714a-db54-272e17612637@arinc9.com/
|
||||
Fixes: 2d7605a72906 ("net: ethernet: mtk_eth_soc: enable hardware DSA untagging")
|
||||
Reported-by: Arınç ÜNAL <arinc.unal@arinc9.com>
|
||||
Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
|
||||
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
|
||||
Reviewed-by: Felix Fietkau <nbd@nbd.name>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -1878,7 +1878,9 @@ static int mtk_poll_rx(struct napi_struc
|
||||
|
||||
while (done < budget) {
|
||||
unsigned int pktlen, *rxdcsum;
|
||||
+ bool has_hwaccel_tag = false;
|
||||
struct net_device *netdev;
|
||||
+ u16 vlan_proto, vlan_tci;
|
||||
dma_addr_t dma_addr;
|
||||
u32 hash, reason;
|
||||
int mac = 0;
|
||||
@@ -2018,27 +2020,29 @@ static int mtk_poll_rx(struct napi_struc
|
||||
|
||||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
|
||||
- if (trxd.rxd3 & RX_DMA_VTAG_V2)
|
||||
- __vlan_hwaccel_put_tag(skb,
|
||||
- htons(RX_DMA_VPID(trxd.rxd4)),
|
||||
- RX_DMA_VID(trxd.rxd4));
|
||||
+ if (trxd.rxd3 & RX_DMA_VTAG_V2) {
|
||||
+ vlan_proto = RX_DMA_VPID(trxd.rxd4);
|
||||
+ vlan_tci = RX_DMA_VID(trxd.rxd4);
|
||||
+ has_hwaccel_tag = true;
|
||||
+ }
|
||||
} else if (trxd.rxd2 & RX_DMA_VTAG) {
|
||||
- __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
|
||||
- RX_DMA_VID(trxd.rxd3));
|
||||
+ vlan_proto = RX_DMA_VPID(trxd.rxd3);
|
||||
+ vlan_tci = RX_DMA_VID(trxd.rxd3);
|
||||
+ has_hwaccel_tag = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* When using VLAN untagging in combination with DSA, the
|
||||
* hardware treats the MTK special tag as a VLAN and untags it.
|
||||
*/
|
||||
- if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
|
||||
- unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
|
||||
+ if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
|
||||
+ unsigned int port = vlan_proto & GENMASK(2, 0);
|
||||
|
||||
if (port < ARRAY_SIZE(eth->dsa_meta) &&
|
||||
eth->dsa_meta[port])
|
||||
skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
|
||||
-
|
||||
- __vlan_hwaccel_clear_tag(skb);
|
||||
+ } else if (has_hwaccel_tag) {
|
||||
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
|
||||
}
|
||||
|
||||
skb_record_rx_queue(skb, 0);
|
||||
@ -0,0 +1,26 @@
|
||||
From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
|
||||
Date: Sun, 12 Feb 2023 07:51:51 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: No need to clear memory after a
|
||||
dma_alloc_coherent() call
|
||||
|
||||
dma_alloc_coherent() already clears the allocated memory, there is no need
|
||||
to explicitly call memset().
|
||||
|
||||
Moreover, it is likely that the size in the memset() is incorrect and
|
||||
should be "size * sizeof(*ring->desc)".
|
||||
|
||||
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
|
||||
Link: https://lore.kernel.org/r/d5acce7dd108887832c9719f62c7201b4c83b3fb.1676184599.git.christophe.jaillet@wanadoo.fr
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -779,7 +779,6 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_de
|
||||
|
||||
ring->desc_size = sizeof(*ring->desc);
|
||||
ring->size = size;
|
||||
- memset(ring->desc, 0, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -0,0 +1,61 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Wed, 7 Dec 2022 15:04:54 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: fix some possible NULL pointer
|
||||
dereferences
|
||||
|
||||
Fix possible NULL pointer dereference in mtk_wed_detach routine checking
|
||||
wo pointer is properly allocated before running mtk_wed_wo_reset() and
|
||||
mtk_wed_wo_deinit().
|
||||
Even if it is just a theoretical issue at the moment check wo pointer is
|
||||
not NULL in mtk_wed_mcu_msg_update.
|
||||
Moreover, honor mtk_wed_mcu_send_msg return value in mtk_wed_wo_reset()
|
||||
|
||||
Fixes: 799684448e3e ("net: ethernet: mtk_wed: introduce wed wo support")
|
||||
Fixes: 4c5de09eb0d0 ("net: ethernet: mtk_wed: add configure wed wo support")
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -174,9 +174,10 @@ mtk_wed_wo_reset(struct mtk_wed_device *
|
||||
mtk_wdma_tx_reset(dev);
|
||||
mtk_wed_reset(dev, MTK_WED_RESET_WED);
|
||||
|
||||
- mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
|
||||
- MTK_WED_WO_CMD_CHANGE_STATE, &state,
|
||||
- sizeof(state), false);
|
||||
+ if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
|
||||
+ MTK_WED_WO_CMD_CHANGE_STATE, &state,
|
||||
+ sizeof(state), false))
|
||||
+ return;
|
||||
|
||||
if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val,
|
||||
val == MTK_WED_WOIF_DISABLE_DONE,
|
||||
@@ -632,9 +633,11 @@ mtk_wed_detach(struct mtk_wed_device *de
|
||||
mtk_wed_free_tx_rings(dev);
|
||||
|
||||
if (mtk_wed_get_rx_capa(dev)) {
|
||||
- mtk_wed_wo_reset(dev);
|
||||
+ if (hw->wed_wo)
|
||||
+ mtk_wed_wo_reset(dev);
|
||||
mtk_wed_free_rx_rings(dev);
|
||||
- mtk_wed_wo_deinit(hw);
|
||||
+ if (hw->wed_wo)
|
||||
+ mtk_wed_wo_deinit(hw);
|
||||
}
|
||||
|
||||
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
|
||||
@@ -207,6 +207,9 @@ int mtk_wed_mcu_msg_update(struct mtk_we
|
||||
if (dev->hw->version == 1)
|
||||
return 0;
|
||||
|
||||
+ if (WARN_ON(!wo))
|
||||
+ return -ENODEV;
|
||||
+
|
||||
return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len,
|
||||
true);
|
||||
}
|
||||
@ -0,0 +1,58 @@
|
||||
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Date: Wed, 7 Dec 2022 15:04:55 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_wed: fix possible deadlock if
|
||||
mtk_wed_wo_init fails
|
||||
|
||||
Introduce __mtk_wed_detach() in order to avoid a deadlock in
|
||||
mtk_wed_attach routine if mtk_wed_wo_init fails since both
|
||||
mtk_wed_attach and mtk_wed_detach run holding hw_lock mutex.
|
||||
|
||||
Fixes: 4c5de09eb0d0 ("net: ethernet: mtk_wed: add configure wed wo support")
|
||||
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -619,12 +619,10 @@ mtk_wed_deinit(struct mtk_wed_device *de
|
||||
}
|
||||
|
||||
static void
|
||||
-mtk_wed_detach(struct mtk_wed_device *dev)
|
||||
+__mtk_wed_detach(struct mtk_wed_device *dev)
|
||||
{
|
||||
struct mtk_wed_hw *hw = dev->hw;
|
||||
|
||||
- mutex_lock(&hw_lock);
|
||||
-
|
||||
mtk_wed_deinit(dev);
|
||||
|
||||
mtk_wdma_rx_reset(dev);
|
||||
@@ -657,6 +655,13 @@ mtk_wed_detach(struct mtk_wed_device *de
|
||||
module_put(THIS_MODULE);
|
||||
|
||||
hw->wed_dev = NULL;
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+mtk_wed_detach(struct mtk_wed_device *dev)
|
||||
+{
|
||||
+ mutex_lock(&hw_lock);
|
||||
+ __mtk_wed_detach(dev);
|
||||
mutex_unlock(&hw_lock);
|
||||
}
|
||||
|
||||
@@ -1538,8 +1543,10 @@ mtk_wed_attach(struct mtk_wed_device *de
|
||||
ret = mtk_wed_wo_init(hw);
|
||||
}
|
||||
out:
|
||||
- if (ret)
|
||||
- mtk_wed_detach(dev);
|
||||
+ if (ret) {
|
||||
+ dev_err(dev->hw->dev, "failed to attach wed device\n");
|
||||
+ __mtk_wed_detach(dev);
|
||||
+ }
|
||||
unlock:
|
||||
mutex_unlock(&hw_lock);
|
||||
|
||||
@ -0,0 +1,31 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Fri, 24 Mar 2023 14:56:58 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: fix tx throughput regression with
|
||||
direct 1G links
|
||||
|
||||
Using the QDMA tx scheduler to throttle tx to line speed works fine for
|
||||
switch ports, but apparently caused a regression on non-switch ports.
|
||||
|
||||
Based on a number of tests, it seems that this throttling can be safely
|
||||
dropped without re-introducing the issues on switch ports that the
|
||||
tx scheduling changes resolved.
|
||||
|
||||
Link: https://lore.kernel.org/netdev/trinity-92c3826f-c2c8-40af-8339-bc6d0d3ffea4-1678213958520@3c-app-gmx-bs16/
|
||||
Fixes: f63959c7eec3 ("net: ethernet: mtk_eth_soc: implement multi-queue support for per-port queues")
|
||||
Reported-by: Frank Wunderlich <frank-w@public-files.de>
|
||||
Reported-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -719,8 +719,6 @@ static void mtk_mac_link_up(struct phyli
|
||||
break;
|
||||
}
|
||||
|
||||
- mtk_set_queue_speed(mac->hw, mac->id, speed);
|
||||
-
|
||||
/* Configure duplex */
|
||||
if (duplex == DUPLEX_FULL)
|
||||
mcr |= MAC_MCR_FORCE_DPX;
|
||||
@ -0,0 +1,55 @@
|
||||
From b6a709cb51f7bdc55c01cec886098a9753ce8c28 Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:10:42 +0100
|
||||
Subject: [PATCH 01/10] net: mtk_eth_soc: add definitions for PCS
|
||||
|
||||
As a result of help from Frank Wunderlich to investigate and test, we
|
||||
know a bit more about the PCS on the Mediatek platforms. Update the
|
||||
definitions from this investigation.
|
||||
|
||||
This PCS appears similar, but not identical to the Lynx PCS.
|
||||
|
||||
Although not included in this patch, but for future reference, the PHY
|
||||
ID registers at offset 4 read as 0x4d544950 'MTIP'.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 13 ++++++++++---
|
||||
1 file changed, 10 insertions(+), 3 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -504,8 +504,10 @@
|
||||
#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
|
||||
|
||||
/* SGMII subsystem config registers */
|
||||
-/* Register to auto-negotiation restart */
|
||||
+/* BMCR (low 16) BMSR (high 16) */
|
||||
#define SGMSYS_PCS_CONTROL_1 0x0
|
||||
+#define SGMII_BMCR GENMASK(15, 0)
|
||||
+#define SGMII_BMSR GENMASK(31, 16)
|
||||
#define SGMII_AN_RESTART BIT(9)
|
||||
#define SGMII_ISOLATE BIT(10)
|
||||
#define SGMII_AN_ENABLE BIT(12)
|
||||
@@ -515,13 +517,18 @@
|
||||
#define SGMII_PCS_FAULT BIT(23)
|
||||
#define SGMII_AN_EXPANSION_CLR BIT(30)
|
||||
|
||||
+#define SGMSYS_PCS_ADVERTISE 0x8
|
||||
+#define SGMII_ADVERTISE GENMASK(15, 0)
|
||||
+#define SGMII_LPA GENMASK(31, 16)
|
||||
+
|
||||
/* Register to programmable link timer, the unit in 2 * 8ns */
|
||||
#define SGMSYS_PCS_LINK_TIMER 0x18
|
||||
-#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0))
|
||||
+#define SGMII_LINK_TIMER_MASK GENMASK(19, 0)
|
||||
+#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & SGMII_LINK_TIMER_MASK)
|
||||
|
||||
/* Register to control remote fault */
|
||||
#define SGMSYS_SGMII_MODE 0x20
|
||||
-#define SGMII_IF_MODE_BIT0 BIT(0)
|
||||
+#define SGMII_IF_MODE_SGMII BIT(0)
|
||||
#define SGMII_SPEED_DUPLEX_AN BIT(1)
|
||||
#define SGMII_SPEED_MASK GENMASK(3, 2)
|
||||
#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
|
||||
@ -0,0 +1,74 @@
|
||||
From 5cf7797526ee81bea0f627bccaa3d887f48f53e0 Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:10:47 +0100
|
||||
Subject: [PATCH 02/10] net: mtk_eth_soc: eliminate unnecessary error handling
|
||||
|
||||
The functions called by the pcs_config() method always return zero, so
|
||||
there is no point trying to handle an error from these functions. Make
|
||||
these functions void, eliminate the "err" variable and simply return
|
||||
zero from the pcs_config() function itself.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 18 ++++++------------
|
||||
1 file changed, 6 insertions(+), 12 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -20,7 +20,7 @@ static struct mtk_pcs *pcs_to_mtk_pcs(st
|
||||
}
|
||||
|
||||
/* For SGMII interface mode */
|
||||
-static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs)
|
||||
+static void mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
@@ -39,16 +39,13 @@ static int mtk_pcs_setup_mode_an(struct
|
||||
regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
|
||||
val &= ~SGMII_PHYA_PWD;
|
||||
regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
|
||||
-
|
||||
- return 0;
|
||||
-
|
||||
}
|
||||
|
||||
/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a
|
||||
* fixed speed.
|
||||
*/
|
||||
-static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs,
|
||||
- phy_interface_t interface)
|
||||
+static void mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs,
|
||||
+ phy_interface_t interface)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
@@ -73,8 +70,6 @@ static int mtk_pcs_setup_mode_force(stru
|
||||
regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
|
||||
val &= ~SGMII_PHYA_PWD;
|
||||
regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
|
||||
-
|
||||
- return 0;
|
||||
}
|
||||
|
||||
static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
|
||||
@@ -83,15 +78,14 @@ static int mtk_pcs_config(struct phylink
|
||||
bool permit_pause_to_mac)
|
||||
{
|
||||
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
- int err = 0;
|
||||
|
||||
/* Setup SGMIISYS with the determined property */
|
||||
if (interface != PHY_INTERFACE_MODE_SGMII)
|
||||
- err = mtk_pcs_setup_mode_force(mpcs, interface);
|
||||
+ mtk_pcs_setup_mode_force(mpcs, interface);
|
||||
else if (phylink_autoneg_inband(mode))
|
||||
- err = mtk_pcs_setup_mode_an(mpcs);
|
||||
+ mtk_pcs_setup_mode_an(mpcs);
|
||||
|
||||
- return err;
|
||||
+ return 0;
|
||||
}
|
||||
|
||||
static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
|
||||
@ -0,0 +1,46 @@
|
||||
From c000dca098002da193b98099df051c9ead0cacb4 Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:10:52 +0100
|
||||
Subject: [PATCH 03/10] net: mtk_eth_soc: add pcs_get_state() implementation
|
||||
|
||||
Add a pcs_get_state() implementation which uses the advertisements
|
||||
to compute the resulting link modes, and BMSR contents to determine
|
||||
negotiation and link status.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 15 +++++++++++++++
|
||||
1 file changed, 15 insertions(+)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -19,6 +19,20 @@ static struct mtk_pcs *pcs_to_mtk_pcs(st
|
||||
return container_of(pcs, struct mtk_pcs, pcs);
|
||||
}
|
||||
|
||||
+static void mtk_pcs_get_state(struct phylink_pcs *pcs,
|
||||
+ struct phylink_link_state *state)
|
||||
+{
|
||||
+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
+ unsigned int bm, adv;
|
||||
+
|
||||
+ /* Read the BMSR and LPA */
|
||||
+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm);
|
||||
+ regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv);
|
||||
+
|
||||
+ phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm),
|
||||
+ FIELD_GET(SGMII_LPA, adv));
|
||||
+}
|
||||
+
|
||||
/* For SGMII interface mode */
|
||||
static void mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs)
|
||||
{
|
||||
@@ -117,6 +131,7 @@ static void mtk_pcs_link_up(struct phyli
|
||||
}
|
||||
|
||||
static const struct phylink_pcs_ops mtk_pcs_ops = {
|
||||
+ .pcs_get_state = mtk_pcs_get_state,
|
||||
.pcs_config = mtk_pcs_config,
|
||||
.pcs_an_restart = mtk_pcs_restart_an,
|
||||
.pcs_link_up = mtk_pcs_link_up,
|
||||
@ -0,0 +1,130 @@
|
||||
From 0d2351dc2768061689abd4de1529fa206bbd574e Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:10:58 +0100
|
||||
Subject: [PATCH 04/10] net: mtk_eth_soc: convert mtk_sgmii to use
|
||||
regmap_update_bits()
|
||||
|
||||
mtk_sgmii does a lot of read-modify-write operations, for which there
|
||||
is a specific regmap function. Use this function instead of open-coding
|
||||
the operations.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 61 ++++++++++-------------
|
||||
1 file changed, 26 insertions(+), 35 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -36,23 +36,18 @@ static void mtk_pcs_get_state(struct phy
|
||||
/* For SGMII interface mode */
|
||||
static void mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs)
|
||||
{
|
||||
- unsigned int val;
|
||||
-
|
||||
/* Setup the link timer and QPHY power up inside SGMIISYS */
|
||||
regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER,
|
||||
SGMII_LINK_TIMER_DEFAULT);
|
||||
|
||||
- regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
|
||||
- val |= SGMII_REMOTE_FAULT_DIS;
|
||||
- regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
|
||||
-
|
||||
- regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
|
||||
- val |= SGMII_AN_RESTART;
|
||||
- regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
|
||||
-
|
||||
- regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
|
||||
- val &= ~SGMII_PHYA_PWD;
|
||||
- regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
+ SGMII_REMOTE_FAULT_DIS, SGMII_REMOTE_FAULT_DIS);
|
||||
+
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
+ SGMII_AN_RESTART, SGMII_AN_RESTART);
|
||||
+
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
+ SGMII_PHYA_PWD, 0);
|
||||
}
|
||||
|
||||
/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a
|
||||
@@ -61,29 +56,26 @@ static void mtk_pcs_setup_mode_an(struct
|
||||
static void mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs,
|
||||
phy_interface_t interface)
|
||||
{
|
||||
- unsigned int val;
|
||||
+ unsigned int rgc3;
|
||||
|
||||
- regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val);
|
||||
- val &= ~RG_PHY_SPEED_MASK;
|
||||
if (interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||
- val |= RG_PHY_SPEED_3_125G;
|
||||
- regmap_write(mpcs->regmap, mpcs->ana_rgc3, val);
|
||||
+ rgc3 = RG_PHY_SPEED_3_125G;
|
||||
+
|
||||
+ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
|
||||
+ RG_PHY_SPEED_3_125G, rgc3);
|
||||
|
||||
/* Disable SGMII AN */
|
||||
- regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
|
||||
- val &= ~SGMII_AN_ENABLE;
|
||||
- regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
+ SGMII_AN_ENABLE, 0);
|
||||
|
||||
/* Set the speed etc but leave the duplex unchanged */
|
||||
- regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
|
||||
- val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK;
|
||||
- val |= SGMII_SPEED_1000;
|
||||
- regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
+ SGMII_IF_MODE_MASK & ~SGMII_DUPLEX_FULL,
|
||||
+ SGMII_SPEED_1000);
|
||||
|
||||
/* Release PHYA power down state */
|
||||
- regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
|
||||
- val &= ~SGMII_PHYA_PWD;
|
||||
- regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
+ SGMII_PHYA_PWD, 0);
|
||||
}
|
||||
|
||||
static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
|
||||
@@ -105,29 +97,28 @@ static int mtk_pcs_config(struct phylink
|
||||
static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
|
||||
{
|
||||
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
- unsigned int val;
|
||||
|
||||
- regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
|
||||
- val |= SGMII_AN_RESTART;
|
||||
- regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
+ SGMII_AN_RESTART, SGMII_AN_RESTART);
|
||||
}
|
||||
|
||||
static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
|
||||
phy_interface_t interface, int speed, int duplex)
|
||||
{
|
||||
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
- unsigned int val;
|
||||
+ unsigned int sgm_mode;
|
||||
|
||||
if (!phy_interface_mode_is_8023z(interface))
|
||||
return;
|
||||
|
||||
/* SGMII force duplex setting */
|
||||
- regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
|
||||
- val &= ~SGMII_DUPLEX_FULL;
|
||||
if (duplex == DUPLEX_FULL)
|
||||
- val |= SGMII_DUPLEX_FULL;
|
||||
+ sgm_mode = SGMII_DUPLEX_FULL;
|
||||
+ else
|
||||
+ sgm_mode = 0;
|
||||
|
||||
- regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
+ SGMII_DUPLEX_FULL, sgm_mode);
|
||||
}
|
||||
|
||||
static const struct phylink_pcs_ops mtk_pcs_ops = {
|
||||
@ -0,0 +1,52 @@
|
||||
From 12198c3a410fe69843e335c1bbf6d4c2a4d48e4e Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:11:03 +0100
|
||||
Subject: [PATCH 05/10] net: mtk_eth_soc: add out of band forcing of speed and
|
||||
duplex in pcs_link_up
|
||||
|
||||
Add support for forcing the link speed and duplex setting in the
|
||||
pcs_link_up() method for out of band modes, which will be useful when
|
||||
we finish converting the pcs_config() method. Until then, we still have
|
||||
to force duplex for 802.3z modes to work correctly.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 28 ++++++++++++++---------
|
||||
1 file changed, 17 insertions(+), 11 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -108,17 +108,23 @@ static void mtk_pcs_link_up(struct phyli
|
||||
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
unsigned int sgm_mode;
|
||||
|
||||
- if (!phy_interface_mode_is_8023z(interface))
|
||||
- return;
|
||||
+ if (!phylink_autoneg_inband(mode) ||
|
||||
+ phy_interface_mode_is_8023z(interface)) {
|
||||
+ /* Force the speed and duplex setting */
|
||||
+ if (speed == SPEED_10)
|
||||
+ sgm_mode = SGMII_SPEED_10;
|
||||
+ else if (speed == SPEED_100)
|
||||
+ sgm_mode = SGMII_SPEED_100;
|
||||
+ else
|
||||
+ sgm_mode = SGMII_SPEED_1000;
|
||||
|
||||
- /* SGMII force duplex setting */
|
||||
- if (duplex == DUPLEX_FULL)
|
||||
- sgm_mode = SGMII_DUPLEX_FULL;
|
||||
- else
|
||||
- sgm_mode = 0;
|
||||
+ if (duplex == DUPLEX_FULL)
|
||||
+ sgm_mode |= SGMII_DUPLEX_FULL;
|
||||
|
||||
- regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
- SGMII_DUPLEX_FULL, sgm_mode);
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
+ SGMII_DUPLEX_FULL | SGMII_SPEED_MASK,
|
||||
+ sgm_mode);
|
||||
+ }
|
||||
}
|
||||
|
||||
static const struct phylink_pcs_ops mtk_pcs_ops = {
|
||||
@ -0,0 +1,48 @@
|
||||
From 6f38fffe2179dd29612aea2c67c46ed6682b4e46 Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:11:08 +0100
|
||||
Subject: [PATCH 06/10] net: mtk_eth_soc: move PHY power up
|
||||
|
||||
The PHY power up is common to both configuration paths, so move it into
|
||||
the parent function. We need to do this for all serdes modes.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 11 ++++-------
|
||||
1 file changed, 4 insertions(+), 7 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -45,9 +45,6 @@ static void mtk_pcs_setup_mode_an(struct
|
||||
|
||||
regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
SGMII_AN_RESTART, SGMII_AN_RESTART);
|
||||
-
|
||||
- regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
- SGMII_PHYA_PWD, 0);
|
||||
}
|
||||
|
||||
/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a
|
||||
@@ -72,10 +69,6 @@ static void mtk_pcs_setup_mode_force(str
|
||||
regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
SGMII_IF_MODE_MASK & ~SGMII_DUPLEX_FULL,
|
||||
SGMII_SPEED_1000);
|
||||
-
|
||||
- /* Release PHYA power down state */
|
||||
- regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
- SGMII_PHYA_PWD, 0);
|
||||
}
|
||||
|
||||
static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
|
||||
@@ -91,6 +84,10 @@ static int mtk_pcs_config(struct phylink
|
||||
else if (phylink_autoneg_inband(mode))
|
||||
mtk_pcs_setup_mode_an(mpcs);
|
||||
|
||||
+ /* Release PHYA power down state */
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
+ SGMII_PHYA_PWD, 0);
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -0,0 +1,48 @@
|
||||
From f752c0df13dfeb721c11d3debb79f08cf437344f Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:11:13 +0100
|
||||
Subject: [PATCH 07/10] net: mtk_eth_soc: move interface speed selection
|
||||
|
||||
Move the selection of the underlying interface speed to the pcs_config
|
||||
function, so we always program the interface speed.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 18 ++++++++++--------
|
||||
1 file changed, 10 insertions(+), 8 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -53,14 +53,6 @@ static void mtk_pcs_setup_mode_an(struct
|
||||
static void mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs,
|
||||
phy_interface_t interface)
|
||||
{
|
||||
- unsigned int rgc3;
|
||||
-
|
||||
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||
- rgc3 = RG_PHY_SPEED_3_125G;
|
||||
-
|
||||
- regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
|
||||
- RG_PHY_SPEED_3_125G, rgc3);
|
||||
-
|
||||
/* Disable SGMII AN */
|
||||
regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
SGMII_AN_ENABLE, 0);
|
||||
@@ -77,6 +69,16 @@ static int mtk_pcs_config(struct phylink
|
||||
bool permit_pause_to_mac)
|
||||
{
|
||||
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
+ unsigned int rgc3;
|
||||
+
|
||||
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||
+ rgc3 = RG_PHY_SPEED_3_125G;
|
||||
+ else
|
||||
+ rgc3 = 0;
|
||||
+
|
||||
+ /* Configure the underlying interface speed */
|
||||
+ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
|
||||
+ RG_PHY_SPEED_3_125G, rgc3);
|
||||
|
||||
/* Setup SGMIISYS with the determined property */
|
||||
if (interface != PHY_INTERFACE_MODE_SGMII)
|
||||
@ -0,0 +1,52 @@
|
||||
From c125c66ea71b9377ae2478c4f1b87b180cc5c6ef Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:11:18 +0100
|
||||
Subject: [PATCH 08/10] net: mtk_eth_soc: add advertisement programming
|
||||
|
||||
Program the advertisement into the mtk PCS block.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 13 ++++++++++++-
|
||||
1 file changed, 12 insertions(+), 1 deletion(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -70,16 +70,27 @@ static int mtk_pcs_config(struct phylink
|
||||
{
|
||||
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
unsigned int rgc3;
|
||||
+ int advertise;
|
||||
+ bool changed;
|
||||
|
||||
if (interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||
rgc3 = RG_PHY_SPEED_3_125G;
|
||||
else
|
||||
rgc3 = 0;
|
||||
|
||||
+ advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
|
||||
+ advertising);
|
||||
+ if (advertise < 0)
|
||||
+ return advertise;
|
||||
+
|
||||
/* Configure the underlying interface speed */
|
||||
regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
|
||||
RG_PHY_SPEED_3_125G, rgc3);
|
||||
|
||||
+ /* Update the advertisement, noting whether it has changed */
|
||||
+ regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
|
||||
+ SGMII_ADVERTISE, advertise, &changed);
|
||||
+
|
||||
/* Setup SGMIISYS with the determined property */
|
||||
if (interface != PHY_INTERFACE_MODE_SGMII)
|
||||
mtk_pcs_setup_mode_force(mpcs, interface);
|
||||
@@ -90,7 +101,7 @@ static int mtk_pcs_config(struct phylink
|
||||
regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
SGMII_PHYA_PWD, 0);
|
||||
|
||||
- return 0;
|
||||
+ return changed;
|
||||
}
|
||||
|
||||
static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
|
||||
@ -0,0 +1,63 @@
|
||||
From 3027d89f87707e7f3e5b683e0d37a32afb5bde96 Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:11:23 +0100
|
||||
Subject: [PATCH 09/10] net: mtk_eth_soc: move and correct link timer
|
||||
programming
|
||||
|
||||
Program the link timer appropriately for the interface mode being
|
||||
used, using the newly introduced phylink helper that provides the
|
||||
nanosecond link timer interval.
|
||||
|
||||
The intervals are 1.6ms for SGMII based protocols and 10ms for
|
||||
802.3z based protocols.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 13 ++++++++-----
|
||||
1 file changed, 8 insertions(+), 5 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -36,10 +36,6 @@ static void mtk_pcs_get_state(struct phy
|
||||
/* For SGMII interface mode */
|
||||
static void mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs)
|
||||
{
|
||||
- /* Setup the link timer and QPHY power up inside SGMIISYS */
|
||||
- regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER,
|
||||
- SGMII_LINK_TIMER_DEFAULT);
|
||||
-
|
||||
regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
SGMII_REMOTE_FAULT_DIS, SGMII_REMOTE_FAULT_DIS);
|
||||
|
||||
@@ -69,8 +65,8 @@ static int mtk_pcs_config(struct phylink
|
||||
bool permit_pause_to_mac)
|
||||
{
|
||||
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
+ int advertise, link_timer;
|
||||
unsigned int rgc3;
|
||||
- int advertise;
|
||||
bool changed;
|
||||
|
||||
if (interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||
@@ -83,6 +79,10 @@ static int mtk_pcs_config(struct phylink
|
||||
if (advertise < 0)
|
||||
return advertise;
|
||||
|
||||
+ link_timer = phylink_get_link_timer_ns(interface);
|
||||
+ if (link_timer < 0)
|
||||
+ return link_timer;
|
||||
+
|
||||
/* Configure the underlying interface speed */
|
||||
regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
|
||||
RG_PHY_SPEED_3_125G, rgc3);
|
||||
@@ -91,6 +91,9 @@ static int mtk_pcs_config(struct phylink
|
||||
regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
|
||||
SGMII_ADVERTISE, advertise, &changed);
|
||||
|
||||
+ /* Setup the link timer and QPHY power up inside SGMIISYS */
|
||||
+ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
|
||||
+
|
||||
/* Setup SGMIISYS with the determined property */
|
||||
if (interface != PHY_INTERFACE_MODE_SGMII)
|
||||
mtk_pcs_setup_mode_force(mpcs, interface);
|
||||
@ -0,0 +1,132 @@
|
||||
From 81b0f12a2a8a1699a7d49c3995e5f71e4ec018e6 Mon Sep 17 00:00:00 2001
|
||||
From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
|
||||
Date: Thu, 27 Oct 2022 14:11:28 +0100
|
||||
Subject: [PATCH 10/10] net: mtk_eth_soc: add support for in-band 802.3z
|
||||
negotiation
|
||||
|
||||
As a result of help from Frank Wunderlich to investigate and test, we
|
||||
now know how to program this PCS for in-band 802.3z negotiation. Add
|
||||
support for this by moving the contents of the two functions into the
|
||||
common mtk_pcs_config() function and adding the register settings for
|
||||
802.3z negotiation.
|
||||
|
||||
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 77 ++++++++++++-----------
|
||||
1 file changed, 42 insertions(+), 35 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -33,41 +33,15 @@ static void mtk_pcs_get_state(struct phy
|
||||
FIELD_GET(SGMII_LPA, adv));
|
||||
}
|
||||
|
||||
-/* For SGMII interface mode */
|
||||
-static void mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs)
|
||||
-{
|
||||
- regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
- SGMII_REMOTE_FAULT_DIS, SGMII_REMOTE_FAULT_DIS);
|
||||
-
|
||||
- regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
- SGMII_AN_RESTART, SGMII_AN_RESTART);
|
||||
-}
|
||||
-
|
||||
-/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a
|
||||
- * fixed speed.
|
||||
- */
|
||||
-static void mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs,
|
||||
- phy_interface_t interface)
|
||||
-{
|
||||
- /* Disable SGMII AN */
|
||||
- regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
- SGMII_AN_ENABLE, 0);
|
||||
-
|
||||
- /* Set the speed etc but leave the duplex unchanged */
|
||||
- regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
- SGMII_IF_MODE_MASK & ~SGMII_DUPLEX_FULL,
|
||||
- SGMII_SPEED_1000);
|
||||
-}
|
||||
-
|
||||
static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
|
||||
phy_interface_t interface,
|
||||
const unsigned long *advertising,
|
||||
bool permit_pause_to_mac)
|
||||
{
|
||||
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
+ unsigned int rgc3, sgm_mode, bmcr;
|
||||
int advertise, link_timer;
|
||||
- unsigned int rgc3;
|
||||
- bool changed;
|
||||
+ bool changed, use_an;
|
||||
|
||||
if (interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||
rgc3 = RG_PHY_SPEED_3_125G;
|
||||
@@ -83,6 +57,37 @@ static int mtk_pcs_config(struct phylink
|
||||
if (link_timer < 0)
|
||||
return link_timer;
|
||||
|
||||
+ /* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and
|
||||
+ * we assume that fixes it's speed at bitrate = line rate (in
|
||||
+ * other words, 1000Mbps or 2500Mbps).
|
||||
+ */
|
||||
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
|
||||
+ sgm_mode = SGMII_IF_MODE_SGMII;
|
||||
+ if (phylink_autoneg_inband(mode)) {
|
||||
+ sgm_mode |= SGMII_REMOTE_FAULT_DIS |
|
||||
+ SGMII_SPEED_DUPLEX_AN;
|
||||
+ use_an = true;
|
||||
+ } else {
|
||||
+ use_an = false;
|
||||
+ }
|
||||
+ } else if (phylink_autoneg_inband(mode)) {
|
||||
+ /* 1000base-X or 2500base-X autoneg */
|
||||
+ sgm_mode = SGMII_REMOTE_FAULT_DIS;
|
||||
+ use_an = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
|
||||
+ advertising);
|
||||
+ } else {
|
||||
+ /* 1000base-X or 2500base-X without autoneg */
|
||||
+ sgm_mode = 0;
|
||||
+ use_an = false;
|
||||
+ }
|
||||
+
|
||||
+ if (use_an) {
|
||||
+ /* FIXME: Do we need to set AN_RESTART here? */
|
||||
+ bmcr = SGMII_AN_RESTART | SGMII_AN_ENABLE;
|
||||
+ } else {
|
||||
+ bmcr = 0;
|
||||
+ }
|
||||
+
|
||||
/* Configure the underlying interface speed */
|
||||
regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
|
||||
RG_PHY_SPEED_3_125G, rgc3);
|
||||
@@ -94,11 +99,14 @@ static int mtk_pcs_config(struct phylink
|
||||
/* Setup the link timer and QPHY power up inside SGMIISYS */
|
||||
regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
|
||||
|
||||
- /* Setup SGMIISYS with the determined property */
|
||||
- if (interface != PHY_INTERFACE_MODE_SGMII)
|
||||
- mtk_pcs_setup_mode_force(mpcs, interface);
|
||||
- else if (phylink_autoneg_inband(mode))
|
||||
- mtk_pcs_setup_mode_an(mpcs);
|
||||
+ /* Update the sgmsys mode register */
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
|
||||
+ SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN |
|
||||
+ SGMII_IF_MODE_SGMII, sgm_mode);
|
||||
+
|
||||
+ /* Update the BMCR */
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
+ SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
|
||||
|
||||
/* Release PHYA power down state */
|
||||
regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
@@ -121,8 +129,7 @@ static void mtk_pcs_link_up(struct phyli
|
||||
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
|
||||
unsigned int sgm_mode;
|
||||
|
||||
- if (!phylink_autoneg_inband(mode) ||
|
||||
- phy_interface_mode_is_8023z(interface)) {
|
||||
+ if (!phylink_autoneg_inband(mode)) {
|
||||
/* Force the speed and duplex setting */
|
||||
if (speed == SPEED_10)
|
||||
sgm_mode = SGMII_SPEED_10;
|
||||
@ -0,0 +1,119 @@
|
||||
From 7ff82416de8295c61423ef6fd75f052d3837d2f7 Mon Sep 17 00:00:00 2001
|
||||
From: Alexander Couzens <lynxis@fe80.eu>
|
||||
Date: Wed, 1 Feb 2023 19:23:29 +0100
|
||||
Subject: [PATCH 11/13] net: mediatek: sgmii: ensure the SGMII PHY is powered
|
||||
down on configuration
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
The code expect the PHY to be in power down which is only true after reset.
|
||||
Allow changes of the SGMII parameters more than once.
|
||||
|
||||
Only power down when reconfiguring to avoid bouncing the link when there's
|
||||
no reason to - based on code from Russell King.
|
||||
|
||||
There are cases when the SGMII_PHYA_PWD register contains 0x9 which
|
||||
prevents SGMII from working. The SGMII still shows link but no traffic
|
||||
can flow. Writing 0x0 to the PHYA_PWD register fix the issue. 0x0 was
|
||||
taken from a good working state of the SGMII interface.
|
||||
|
||||
Fixes: 42c03844e93d ("net-next: mediatek: add support for MediaTek MT7622 SoC")
|
||||
Suggested-by: Russell King (Oracle) <linux@armlinux.org.uk>
|
||||
Signed-off-by: Alexander Couzens <lynxis@fe80.eu>
|
||||
[ bmork: rebased and squashed into one patch ]
|
||||
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
|
||||
Signed-off-by: Bjørn Mork <bjorn@mork.no>
|
||||
Acked-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Tested-by: Daniel Golle <daniel@makrotopia.org>
|
||||
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
||||
---
|
||||
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 ++
|
||||
drivers/net/ethernet/mediatek/mtk_sgmii.c | 39 +++++++++++++++------
|
||||
2 files changed, 30 insertions(+), 11 deletions(-)
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -1073,11 +1073,13 @@ struct mtk_soc_data {
|
||||
* @regmap: The register map pointing at the range used to setup
|
||||
* SGMII modes
|
||||
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
|
||||
+ * @interface: Currently configured interface mode
|
||||
* @pcs: Phylink PCS structure
|
||||
*/
|
||||
struct mtk_pcs {
|
||||
struct regmap *regmap;
|
||||
u32 ana_rgc3;
|
||||
+ phy_interface_t interface;
|
||||
struct phylink_pcs pcs;
|
||||
};
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
|
||||
@@ -43,11 +43,6 @@ static int mtk_pcs_config(struct phylink
|
||||
int advertise, link_timer;
|
||||
bool changed, use_an;
|
||||
|
||||
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||
- rgc3 = RG_PHY_SPEED_3_125G;
|
||||
- else
|
||||
- rgc3 = 0;
|
||||
-
|
||||
advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
|
||||
advertising);
|
||||
if (advertise < 0)
|
||||
@@ -88,9 +83,22 @@ static int mtk_pcs_config(struct phylink
|
||||
bmcr = 0;
|
||||
}
|
||||
|
||||
- /* Configure the underlying interface speed */
|
||||
- regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
|
||||
- RG_PHY_SPEED_3_125G, rgc3);
|
||||
+ if (mpcs->interface != interface) {
|
||||
+ /* PHYA power down */
|
||||
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
+ SGMII_PHYA_PWD, SGMII_PHYA_PWD);
|
||||
+
|
||||
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||
+ rgc3 = RG_PHY_SPEED_3_125G;
|
||||
+ else
|
||||
+ rgc3 = 0;
|
||||
+
|
||||
+ /* Configure the underlying interface speed */
|
||||
+ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
|
||||
+ RG_PHY_SPEED_3_125G, rgc3);
|
||||
+
|
||||
+ mpcs->interface = interface;
|
||||
+ }
|
||||
|
||||
/* Update the advertisement, noting whether it has changed */
|
||||
regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
|
||||
@@ -108,9 +116,17 @@ static int mtk_pcs_config(struct phylink
|
||||
regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
|
||||
SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
|
||||
|
||||
- /* Release PHYA power down state */
|
||||
- regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
|
||||
- SGMII_PHYA_PWD, 0);
|
||||
+ /* Release PHYA power down state
|
||||
+ * Only removing bit SGMII_PHYA_PWD isn't enough.
|
||||
+ * There are cases when the SGMII_PHYA_PWD register contains 0x9 which
|
||||
+ * prevents SGMII from working. The SGMII still shows link but no traffic
|
||||
+ * can flow. Writing 0x0 to the PHYA_PWD register fix the issue. 0x0 was
|
||||
+ * taken from a good working state of the SGMII interface.
|
||||
+ * Unknown how much the QPHY needs but it is racy without a sleep.
|
||||
+ * Tested on mt7622 & mt7986.
|
||||
+ */
|
||||
+ usleep_range(50, 100);
|
||||
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
|
||||
|
||||
return changed;
|
||||
}
|
||||
@@ -171,6 +187,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss,
|
||||
return PTR_ERR(ss->pcs[i].regmap);
|
||||
|
||||
ss->pcs[i].pcs.ops = &mtk_pcs_ops;
|
||||
+ ss->pcs[i].interface = PHY_INTERFACE_MODE_NA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user