Merge Mainline

This commit is contained in:
CN_SZTL 2020-11-15 00:49:56 +08:00
commit d2489e6aeb
No known key found for this signature in database
GPG Key ID: 6850B6345C862176
19 changed files with 241 additions and 217 deletions

View File

@ -7,12 +7,13 @@
menu "Global build settings"
config JSON_ADD_IMAGE_INFO
bool "Create JSON info files per build image"
config JSON_OVERVIEW_IMAGE_INFO
bool "Create JSON info file overview per target"
default BUILDBOT
help
The JSON info files contain information about the device and
build images, stored next to the firmware images.
Create a JSON info file called profiles.json in the target
directory containing machine readable list of built profiles
and resulting images.
config ALL_NONSHARED
bool "Select all target specific packages by default"

View File

@ -527,8 +527,11 @@ endef
define Device/Build/image
GZ_SUFFIX := $(if $(filter %dtb %gz,$(2)),,$(if $(and $(findstring ext4,$(1)),$(CONFIG_TARGET_IMAGES_GZIP)),.gz))
$$(_TARGET): $(BIN_DIR)/$(call IMAGE_NAME,$(1),$(2))$$(GZ_SUFFIX)
$$(_TARGET): $(if $(CONFIG_JSON_OVERVIEW_IMAGE_INFO), \
$(BUILD_DIR)/json_info_files/$(call IMAGE_NAME,$(1),$(2)).json, \
$(BIN_DIR)/$(call IMAGE_NAME,$(1),$(2))$$(GZ_SUFFIX))
$(eval $(call Device/Export,$(KDIR)/tmp/$(call IMAGE_NAME,$(1),$(2)),$(1)))
ROOTFS/$(1)/$(3) := \
$(KDIR)/root.$(1)$$(strip \
$$(if $$(FS_OPTIONS/$(1)),+fs=$$(call param_mangle,$$(FS_OPTIONS/$(1)))) \
@ -550,32 +553,35 @@ define Device/Build/image
$(BIN_DIR)/$(call IMAGE_NAME,$(1),$(2)): $(KDIR)/tmp/$(call IMAGE_NAME,$(1),$(2))
cp $$^ $$@
$(if $(CONFIG_JSON_ADD_IMAGE_INFO), \
DEVICE_ID="$(DEVICE_NAME)" \
BIN_DIR="$(BIN_DIR)" \
IMAGE_NAME="$(IMAGE_NAME)" \
IMAGE_TYPE=$(word 1,$(subst ., ,$(2))) \
IMAGE_PREFIX="$(IMAGE_PREFIX)" \
DEVICE_VENDOR="$(DEVICE_VENDOR)" \
DEVICE_MODEL="$(DEVICE_MODEL)" \
DEVICE_VARIANT="$(DEVICE_VARIANT)" \
DEVICE_ALT0_VENDOR="$(DEVICE_ALT0_VENDOR)" \
DEVICE_ALT0_MODEL="$(DEVICE_ALT0_MODEL)" \
DEVICE_ALT0_VARIANT="$(DEVICE_ALT0_VARIANT)" \
DEVICE_ALT1_VENDOR="$(DEVICE_ALT1_VENDOR)" \
DEVICE_ALT1_MODEL="$(DEVICE_ALT1_MODEL)" \
DEVICE_ALT1_VARIANT="$(DEVICE_ALT1_VARIANT)" \
DEVICE_ALT2_VENDOR="$(DEVICE_ALT2_VENDOR)" \
DEVICE_ALT2_MODEL="$(DEVICE_ALT2_MODEL)" \
DEVICE_ALT2_VARIANT="$(DEVICE_ALT2_VARIANT)" \
DEVICE_TITLE="$(DEVICE_TITLE)" \
TARGET="$(BOARD)" \
SUBTARGET="$(if $(SUBTARGET),$(SUBTARGET),generic)" \
VERSION_NUMBER="$(VERSION_NUMBER)" \
VERSION_CODE="$(VERSION_CODE)" \
SUPPORTED_DEVICES="$(SUPPORTED_DEVICES)" \
$(TOPDIR)/scripts/json_add_image_info.py \
)
$(BUILD_DIR)/json_info_files/$(call IMAGE_NAME,$(1),$(2)).json: $(BIN_DIR)/$(call IMAGE_NAME,$(1),$(2))$$(GZ_SUFFIX)
@mkdir -p $$(shell dirname $$@)
DEVICE_ID="$(DEVICE_NAME)" \
BIN_DIR="$(BIN_DIR)" \
SOURCE_DATE_EPOCH=$(SOURCE_DATE_EPOCH) \
IMAGE_NAME="$(IMAGE_NAME)" \
IMAGE_TYPE=$(word 1,$(subst ., ,$(2))) \
IMAGE_PREFIX="$(IMAGE_PREFIX)" \
DEVICE_VENDOR="$(DEVICE_VENDOR)" \
DEVICE_MODEL="$(DEVICE_MODEL)" \
DEVICE_VARIANT="$(DEVICE_VARIANT)" \
DEVICE_ALT0_VENDOR="$(DEVICE_ALT0_VENDOR)" \
DEVICE_ALT0_MODEL="$(DEVICE_ALT0_MODEL)" \
DEVICE_ALT0_VARIANT="$(DEVICE_ALT0_VARIANT)" \
DEVICE_ALT1_VENDOR="$(DEVICE_ALT1_VENDOR)" \
DEVICE_ALT1_MODEL="$(DEVICE_ALT1_MODEL)" \
DEVICE_ALT1_VARIANT="$(DEVICE_ALT1_VARIANT)" \
DEVICE_ALT2_VENDOR="$(DEVICE_ALT2_VENDOR)" \
DEVICE_ALT2_MODEL="$(DEVICE_ALT2_MODEL)" \
DEVICE_ALT2_VARIANT="$(DEVICE_ALT2_VARIANT)" \
DEVICE_TITLE="$(DEVICE_TITLE)" \
DEVICE_PACKAGES="$(DEVICE_PACKAGES)" \
TARGET="$(BOARD)" \
SUBTARGET="$(if $(SUBTARGET),$(SUBTARGET),generic)" \
VERSION_NUMBER="$(VERSION_NUMBER)" \
VERSION_CODE="$(VERSION_CODE)" \
SUPPORTED_DEVICES="$(SUPPORTED_DEVICES)" \
$(TOPDIR)/scripts/json_add_image_info.py $$@
endef
@ -594,8 +600,6 @@ define Device/Build/artifact
endef
define Device/Build
$(shell rm -f $(BIN_DIR)/$(IMG_PREFIX)-$(1).json)
$(if $(CONFIG_TARGET_ROOTFS_INITRAMFS),$(call Device/Build/initramfs,$(1)))
$(call Device/Build/kernel,$(1))
@ -679,6 +683,7 @@ define BuildImage
image_prepare: compile
mkdir -p $(BIN_DIR) $(KDIR)/tmp
rm -rf $(BUILD_DIR)/json_info_files
$(call Image/Prepare)
else

View File

@ -7,12 +7,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=annie
PKG_VERSION:=0.10.3-67298d5
PKG_VERSION:=0.10.3-68e47f1
PKG_RELEASE:=1
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/iawia002/annie.git
PKG_SOURCE_VERSION:=67298d55aaf43d840b8d7283b0ac99736173d285
PKG_SOURCE_VERSION:=68e47f1ae8b26b6db89118e995c8cafc1d5ab339
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION)

View File

@ -7,7 +7,7 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-turboacc
PKG_VERSION:=1.0
PKG_RELEASE:=1
PKG_RELEASE:=2
PKG_LICENSE:=GPLv3.0+

View File

@ -13,7 +13,7 @@ function index()
end
local function fastpath_status()
return luci.sys.call("[ x$(cat /sys/module/xt_FLOWOFFLOAD/refcnt 2>/dev/null) != x0 ] || lsmod | grep -q fast_classifier") == 0
return luci.sys.call("{ [ -e /sys/module/xt_FLOWOFFLOAD/refcnt ] && [ x$(cat /sys/module/xt_FLOWOFFLOAD/refcnt 2>/dev/null) != x0 ]; } || lsmod | grep -q fast_classifier") == 0
end
local function bbr_status()
@ -21,7 +21,7 @@ local function bbr_status()
end
local function fullconebat_status()
return luci.sys.call("[ x$(cat /sys/module/xt_FULLCONENAT/refcnt 2>/dev/null) != x0 ]") == 0
return luci.sys.call("[ -e /sys/module/xt_FULLCONENAT/refcnt ] && [ x$(cat /sys/module/xt_FULLCONENAT/refcnt 2>/dev/null) != x0 ]") == 0
end
local function dnscaching_status()

View File

@ -1,5 +1,3 @@
local ipkg = require('luci.model.ipkg')
local kernel_version = luci.sys.exec("echo -n $(uname -r)")
m = Map("turboacc")
@ -38,7 +36,7 @@ sfe_bridge.default = 0
sfe_bridge.description = translate("Enable Bridge Acceleration (may be functional conflict with bridge-mode VPN server)")
sfe_bridge:depends("sfe_flow", 1)
if ipkg.installed("ip6tables") then
if nixio.fs.access("/proc/sys/net/ipv6") then
sfe_ipv6 = s:option(Flag, "sfe_ipv6", translate("IPv6 Acceleration"))
sfe_ipv6.default = 0
sfe_ipv6.description = translate("Enable IPv6 Acceleration")
@ -48,14 +46,12 @@ end
if nixio.fs.access("/lib/modules/" .. kernel_version .. "/tcp_bbr.ko") then
bbr_cca = s:option(Flag, "bbr_cca", translate("BBR CCA"))
bbr_cca.default = 0
bbr_cca.rmempty = false
bbr_cca.description = translate("Using BBR CCA can improve TCP network performance effectively")
end
if nixio.fs.access("/lib/modules/" .. kernel_version .. "/xt_FULLCONENAT.ko") then
fullcone_nat = s:option(Flag, "fullcone_nat", translate("FullCone NAT"))
fullcone_nat.default = 0
fullcone_nat.rmempty = false
fullcone_nat.description = translate("Using FullCone NAT can improve gaming performance effectively")
end

View File

@ -20,6 +20,11 @@ inital_conf(){
config_get "dns_caching_mode" "config" "dns_caching_mode" "0"
config_get "dns_caching_v4_dns" "config" "dns_caching_v4_dns"
config_get "dns_caching_v6_dns" "config" "dns_caching_v6_dns"
[ ! -e "/lib/modules/$(uname -r)/xt_FLOWOFFLOAD.ko" ] && { sw_flow="0"; hw_flow="0"; }
[ ! -e "/lib/modules/$(uname -r)/fast-classifier.ko" ] && { sfe_flow="0"; sfe_bridge="0"; sfe_ipv6="0"; }
[ ! -e "/lib/modules/$(uname -r)/tcp_bbr.ko" ] && bbr_cca="0"
[ ! -e "/lib/modules/$(uname -r)/xt_FULLCONENAT.ko" ] && fullcone_nat="0"
}
start_pdnsd() {
@ -33,87 +38,87 @@ global {
server_port=5333; # pdnsd监听的端口不要和别的服务冲突即可
status_ctl = on;
paranoid=on; # 二次请求模式如果请求主DNS服务器返回的是垃圾地址就向备用服务器请求
query_method=udp_only;
neg_domain_pol = off;
query_method=udp_only;
neg_domain_pol = off;
par_queries = 400; # 最多同时请求数
min_ttl = 1h; # DNS结果最短缓存时间
max_ttl = 1w; # DNS结果最长缓存时间
timeout = 10; # DNS请求超时时间单位秒
}
server {
label = "routine";
ip = ${dns_caching_v4_dns}; # 这里为主要上级 dns 的 ip 地址建议填写一个当地最快的DNS地址
server {
label = "routine";
ip = ${dns_caching_v4_dns}; # 这里为主要上级 dns 的 ip 地址建议填写一个当地最快的DNS地址
timeout = 5; # DNS请求超时时间
reject = 74.125.127.102, # 以下是脏IP也就是DNS污染一般会返回的结果如果收到如下DNS结果会触发二次请求TCP协议一般不会碰到脏IP
74.125.155.102,
74.125.39.102,
74.125.39.113,
209.85.229.138,
128.121.126.139,
159.106.121.75,
169.132.13.103,
192.67.198.6,
202.106.1.2,
202.181.7.85,
203.161.230.171,
203.98.7.65,
207.12.88.98,
208.56.31.43,
209.145.54.50,
209.220.30.174,
209.36.73.33,
211.94.66.147,
213.169.251.35,
216.221.188.182,
216.234.179.13,
243.185.187.39,
37.61.54.158,
4.36.66.178,
46.82.174.68,
59.24.3.173,
64.33.88.161,
64.33.99.47,
64.66.163.251,
65.104.202.252,
65.160.219.113,
66.45.252.237,
69.55.52.253,
72.14.205.104,
72.14.205.99,
78.16.49.15,
8.7.198.45,
93.46.8.89,
37.61.54.158,
243.185.187.39,
190.93.247.4,
190.93.246.4,
190.93.245.4,
190.93.244.4,
65.49.2.178,
189.163.17.5,
23.89.5.60,
49.2.123.56,
54.76.135.1,
77.4.7.92,
118.5.49.6,
159.24.3.173,
188.5.4.96,
197.4.4.12,
220.250.64.24,
243.185.187.30,
249.129.46.48,
253.157.14.165;
reject_policy = fail;
74.125.155.102,
74.125.39.102,
74.125.39.113,
209.85.229.138,
128.121.126.139,
159.106.121.75,
169.132.13.103,
192.67.198.6,
202.106.1.2,
202.181.7.85,
203.161.230.171,
203.98.7.65,
207.12.88.98,
208.56.31.43,
209.145.54.50,
209.220.30.174,
209.36.73.33,
211.94.66.147,
213.169.251.35,
216.221.188.182,
216.234.179.13,
243.185.187.39,
37.61.54.158,
4.36.66.178,
46.82.174.68,
59.24.3.173,
64.33.88.161,
64.33.99.47,
64.66.163.251,
65.104.202.252,
65.160.219.113,
66.45.252.237,
69.55.52.253,
72.14.205.104,
72.14.205.99,
78.16.49.15,
8.7.198.45,
93.46.8.89,
37.61.54.158,
243.185.187.39,
190.93.247.4,
190.93.246.4,
190.93.245.4,
190.93.244.4,
65.49.2.178,
189.163.17.5,
23.89.5.60,
49.2.123.56,
54.76.135.1,
77.4.7.92,
118.5.49.6,
159.24.3.173,
188.5.4.96,
197.4.4.12,
220.250.64.24,
243.185.187.30,
249.129.46.48,
253.157.14.165;
reject_policy = fail;
}
server {
label = "special"; # 这个随便写
ip = 117.50.10.10,52.80.52.52,119.29.29.29; # 这里为备用DNS服务器的 ip 地址
port = 5353; # 推荐使用53以外的端口DNS服务器必须支持
server {
label = "special"; # 这个随便写
ip = 117.50.10.10,52.80.52.52,119.29.29.29; # 这里为备用DNS服务器的 ip 地址
port = 5353; # 推荐使用53以外的端口DNS服务器必须支持
proxy_only = on;
timeout = 5;
}
timeout = 5;
}
source {
owner=localhost;
@ -217,7 +222,7 @@ start(){
lsmod | grep -q fast_classifier || modprobe fast_classifier 2>"/dev/null"
echo "${sfe_bridge}" > "/sys/fast_classifier/skip_to_bridge_ingress" 2>"/dev/null"
if [ "${sfe_ipv6}" -eq "1" ]; then
[ ! -f "/dev/sfe_ipv6" ] && mknod "/dev/sfe_ipv6" "c" "$(cat "/sys/sfe_ipv6/debug_dev")" "0"
[ ! -e "/dev/sfe_ipv6" ] && mknod "/dev/sfe_ipv6" "c" "$(cat "/sys/sfe_ipv6/debug_dev")" "0"
else
rm -f "/dev/sfe_ipv6"
fi

View File

@ -1,58 +0,0 @@
#
# Copyright (c) 2017 Yu Wang <wangyucn@gmail.com>
#
# This is free software, licensed under the MIT.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=speederv2-tunnel
PKG_VERSION:=20190408
PKG_RELEASE:=1
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/wangyu-/UDPspeeder.git
PKG_SOURCE_VERSION:=ecc90928d33741dbe726b547f2d8322540c26ea0
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE
PKG_MAINTAINER:=wangyu-
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_BUILD_PARALLEL:=1
include $(INCLUDE_DIR)/package.mk
define Package/speederv2-tunnel
SECTION:=net
CATEGORY:=Network
TITLE:=UDP Network Speed-Up Tool
URL:=https://github.com/wangyu-/UDPspeeder
DEPENDS:= +libstdcpp +librt
endef
define Package/speederv2-tunnel/description
A Tunnel which Improves your Network Quality on a High-latency Lossy Link by using Forward Error Correction,for All Traffics(TCP/UDP/ICMP)
endef
MAKE_FLAGS += cross
define Build/Prepare
$(PKG_UNPACK)
sed -i 's/cc_cross=.*/cc_cross=$(TARGET_CXX)/g' $(PKG_BUILD_DIR)/makefile
sed -i '/\gitversion/d' $(PKG_BUILD_DIR)/makefile
echo 'const char * const gitversion = "$(PKG_VERSION)";' > $(PKG_BUILD_DIR)/git_version.h
$(Build/Patch)
endef
define Package/speederv2-tunnel/install
$(INSTALL_DIR) $(1)/usr/bin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/speederv2_cross $(1)/usr/bin/speederv2
( cd $(1)/usr/bin; $(LN) speederv2 udpspeeder; )
endef
$(eval $(call BuildPackage,speederv2-tunnel))

View File

@ -8,7 +8,7 @@ PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/1715173329/dnsforwarder.git
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE_VERSION:=693b554e59479c2867c74f0bb5e26290b93747c5
PKG_SOURCE_VERSION:=587e61ae4d75dc976f538088b715a3c8ee26c144
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_MAINTAINER:=Dennis <openwrt@tossp.com>
PKG_LICENSE:=GPL-3.0

View File

@ -8,12 +8,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=iproute2
PKG_VERSION:=5.8.0
PKG_VERSION:=5.9.0
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=@KERNEL/linux/utils/net/iproute2
PKG_HASH:=cfcd1f890290f8c8afcc91d9444ad929b9252c16f9ab3f286c50dd3c59dc646e
PKG_HASH:=a25dac94bcdcf2f73316c7f812115ea7a5710580bad892b08a83d00c6b33dacf
PKG_BUILD_PARALLEL:=1
PKG_BUILD_DEPENDS:=iptables
PKG_LICENSE:=GPL-2.0

View File

@ -1,6 +1,6 @@
--- a/tc/Makefile
+++ b/tc/Makefile
@@ -127,6 +127,9 @@ CFLAGS += -DCONFIG_GACT -DCONFIG_GACT_PR
@@ -128,6 +128,9 @@ CFLAGS += -DCONFIG_GACT -DCONFIG_GACT_PR
ifneq ($(IPT_LIB_DIR),)
CFLAGS += -DIPT_LIB_DIR=\"$(IPT_LIB_DIR)\"
endif

View File

@ -9,7 +9,7 @@
endif
TCLIB := tc_core.o
@@ -143,7 +143,7 @@ MODDESTDIR := $(DESTDIR)$(LIBDIR)/tc
@@ -144,7 +144,7 @@ MODDESTDIR := $(DESTDIR)$(LIBDIR)/tc
all: tc $(TCSO)
tc: $(TCOBJ) $(LIBNETLINK) libtc.a
@ -18,7 +18,7 @@
libtc.a: $(TCLIB)
$(QUIET_AR)$(AR) rcs $@ $^
@@ -165,6 +165,7 @@ install: all
@@ -166,6 +166,7 @@ install: all
clean:
rm -f $(TCOBJ) $(TCLIB) libtc.a tc *.so emp_ematch.tab.h; \
rm -f emp_ematch.tab.*
@ -26,7 +26,7 @@
q_atm.so: q_atm.c
$(QUIET_CC)$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -shared -fpic -o q_atm.so q_atm.c -latm
@@ -204,4 +205,15 @@ static-syms.h: $(wildcard *.c)
@@ -205,4 +206,15 @@ static-syms.h: $(wildcard *.c)
sed -n '/'$$s'[^ ]* =/{s:.* \([^ ]*'$$s'[^ ]*\) .*:extern char \1[] __attribute__((weak)); if (!strcmp(sym, "\1")) return \1;:;p}' $$files ; \
done > $@

View File

@ -8,12 +8,12 @@
include $(TOPDIR)/rules.mk
LUCI_TITLE:=LuCI Support for speederv2-tunnel
LUCI_DEPENDS:=+speederv2-tunnel
LUCI_DEPENDS:=+UDPspeeder
LUCI_PKGARCH:=all
PKG_NAME:=luci-app-speederv2
PKG_VERSION:=1.1.0
PKG_RELEASE:=3
PKG_RELEASE:=4
PKG_LICENSE:=GPLv3
PKG_LICENSE_FILES:=LICENSE

View File

@ -18,6 +18,6 @@ end
function action_status()
luci.http.prepare_content("application/json")
luci.http.write_json({
running = is_running("speederv2")
running = is_running("udpspeeder")
})
end

View File

@ -74,7 +74,7 @@ start_instance() {
[ $? -eq 0 ] && server_ip="[$server_ip]"
procd_open_instance
procd_set_param command /usr/bin/speederv2
procd_set_param command /usr/bin/udpspeeder
procd_append_param command -c
procd_append_param command -r ${server_ip}:${server_port}
procd_append_param command -l ${listen_ip}:${listen_port} && _log "info" "listening on: ${listen_ip}:${listen_port}"

View File

@ -12,9 +12,9 @@ PKG_RELEASE:=1
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL=$(PROJECT_GIT)/project/procd.git
PKG_SOURCE_DATE:=2020-08-10
PKG_SOURCE_VERSION:=fad899769e1411cc273785461f073a0f7931b9a0
PKG_MIRROR_HASH:=f56b621f78f821f7070d85f63448f684af7e2fe0c85233a8d054683cc34f6d06
PKG_SOURCE_DATE:=2020-11-06
PKG_SOURCE_VERSION:=b0de894830a93da5b303a3d89a42baf163d0a58c
PKG_MIRROR_HASH:=ba7f7631fa74d3bede0241f5d96ee4939f15164e79290d1ac7fd15fc8ed628d9
CMAKE_INSTALL:=1
PKG_LICENSE:=GPL-2.0

View File

@ -1,18 +1,22 @@
#!/usr/bin/env python3
import json
import os
from os import getenv
from pathlib import Path
from sys import argv
import hashlib
import json
if len(argv) != 2:
print("ERROR: JSON info script requires output arg")
exit(1)
def e(variable, default=None):
return os.environ.get(variable, default)
json_path = Path(argv[1])
bin_dir = Path(getenv("BIN_DIR"))
image_file = bin_dir / getenv("IMAGE_NAME")
json_path = "{}{}{}.json".format(e("BIN_DIR"), os.sep, e("IMAGE_PREFIX"))
with open(os.path.join(e("BIN_DIR"), e("IMAGE_NAME")), "rb") as image_file:
image_hash = hashlib.sha256(image_file.read()).hexdigest()
if not image_file.is_file():
print("Skip JSON creation for non existing image ", image_file)
exit(0)
def get_titles():
@ -20,36 +24,42 @@ def get_titles():
for prefix in ["", "ALT0_", "ALT1_", "ALT2_"]:
title = {}
for var in ["vendor", "model", "variant"]:
if e("DEVICE_{}{}".format(prefix, var.upper())):
title[var] = e("DEVICE_{}{}".format(prefix, var.upper()))
if getenv("DEVICE_{}{}".format(prefix, var.upper())):
title[var] = getenv("DEVICE_{}{}".format(prefix, var.upper()))
if title:
titles.append(title)
if not titles:
titles.append({"title": e("DEVICE_TITLE")})
titles.append({"title": getenv("DEVICE_TITLE")})
return titles
if not os.path.exists(json_path):
device_info = {
"id": e("DEVICE_ID"),
"image_prefix": e("IMAGE_PREFIX"),
"images": [],
"metadata_version": 1,
"supported_devices": e("SUPPORTED_DEVICES").split(),
"target": "{}/{}".format(e("TARGET"), e("SUBTARGET", "generic")),
"titles": get_titles(),
"version_commit": e("VERSION_CODE"),
"version_number": e("VERSION_NUMBER"),
}
else:
with open(json_path, "r") as json_file:
device_info = json.load(json_file)
device_id = getenv("DEVICE_ID")
image_hash = hashlib.sha256(image_file.read_bytes()).hexdigest()
image_info = {"type": e("IMAGE_TYPE"), "name": e("IMAGE_NAME"), "sha256": image_hash}
device_info["images"].append(image_info)
image_info = {
"metadata_version": 1,
"target": "{}/{}".format(getenv("TARGET"), getenv("SUBTARGET")),
"version_code": getenv("VERSION_CODE"),
"version_number": getenv("VERSION_NUMBER"),
"source_date_epoch": getenv("SOURCE_DATE_EPOCH"),
"profiles": {
device_id: {
"image_prefix": getenv("IMAGE_PREFIX"),
"images": [
{
"type": getenv("IMAGE_TYPE"),
"name": getenv("IMAGE_NAME"),
"sha256": image_hash,
}
],
"device_packages": getenv("DEVICE_PACKAGES").split(),
"supported_devices": getenv("SUPPORTED_DEVICES").split(),
"titles": get_titles(),
}
},
}
with open(json_path, "w") as json_file:
json.dump(device_info, json_file, sort_keys=True, indent=" ")
json_path.write_text(json.dumps(image_info, separators=(",", ":")))

View File

@ -0,0 +1,55 @@
#!/usr/bin/env python3
from os import getenv, environ
from pathlib import Path
from subprocess import run, PIPE
from sys import argv
import json
if len(argv) != 2:
print("JSON info files script requires ouput file as argument")
exit(1)
output_path = Path(argv[1])
assert getenv("WORK_DIR"), "$WORK_DIR required"
work_dir = Path(getenv("WORK_DIR"))
output = {}
for json_file in work_dir.glob("*.json"):
image_info = json.loads(json_file.read_text())
if not output:
output.update(image_info)
else:
# get first (and only) profile in json file
device_id = next(iter(image_info["profiles"].keys()))
if device_id not in output["profiles"]:
output["profiles"].update(image_info["profiles"])
else:
output["profiles"][device_id]["images"].append(
image_info["profiles"][device_id]["images"][0]
)
if output:
default_packages, output["arch_packages"] = run(
[
"make",
"--no-print-directory",
"-C",
"target/linux/{}".format(output['target'].split('/')[0]),
"val.DEFAULT_PACKAGES",
"val.ARCH_PACKAGES",
],
stdout=PIPE,
stderr=PIPE,
check=True,
env=environ.copy().update({"TOPDIR": Path().cwd()}),
universal_newlines=True,
).stdout.splitlines()
output["default_packages"] = default_packages.split()
output_path.write_text(json.dumps(output, sort_keys=True, separators=(",", ":")))
else:
print("JSON info file script could not find any JSON files for target")

View File

@ -114,6 +114,7 @@ _call_image: staging_dir/host/.prereq-build
$(MAKE) package_install
$(MAKE) -s prepare_rootfs
$(MAKE) -s build_image
$(MAKE) -s json_overview_image_info
$(MAKE) -s checksum
_call_manifest: FORCE
@ -162,6 +163,7 @@ prepare_rootfs: FORCE
$(CP) $(TARGET_DIR) $(TARGET_DIR_ORIG)
$(call prepare_rootfs,$(TARGET_DIR),$(USER_FILES))
build_image: FORCE
@echo
@echo Building images...
@ -169,6 +171,14 @@ build_image: FORCE
$(NO_TRACE_MAKE) -C target/linux/$(BOARD)/image install TARGET_BUILD=1 IB=1 EXTRA_IMAGE_NAME="$(EXTRA_IMAGE_NAME)" \
$(if $(USER_PROFILE),PROFILE="$(USER_PROFILE)")
$(BIN_DIR)/profiles.json: FORCE
$(if $(CONFIG_JSON_OVERVIEW_IMAGE_INFO), \
WORK_DIR=$(BUILD_DIR)/json_info_files \
$(SCRIPT_DIR)/json_overview_image_info.py $@ \
)
json_overview_image_info: $(BIN_DIR)/profiles.json
checksum: FORCE
@echo
@echo Calculating checksums...