summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/Makefile.dtbs1
-rw-r--r--scripts/Makefile.extrawarn18
-rw-r--r--scripts/Makefile.vmlinux79
-rw-r--r--scripts/Makefile.vmlinux_o26
-rwxr-xr-xscripts/atomic/gen-atomics.sh1
-rwxr-xr-xscripts/atomic/gen-rust-atomic-helpers.sh67
-rwxr-xr-xscripts/checkpatch.pl14
-rw-r--r--scripts/coccinelle/api/platform_no_drv_owner.cocci9
-rw-r--r--scripts/coccinelle/misc/of_table.cocci14
-rw-r--r--scripts/coccinelle/misc/ptr_err_to_pe.cocci34
-rwxr-xr-xscripts/crypto/gen-hash-testvecs.py27
-rwxr-xr-xscripts/decode_stacktrace.sh35
-rw-r--r--scripts/dtc/checks.c23
-rw-r--r--scripts/dtc/data.c47
-rwxr-xr-xscripts/dtc/dt_to_config8
-rw-r--r--scripts/dtc/dtc-lexer.l15
-rw-r--r--scripts/dtc/dtc.c6
-rw-r--r--scripts/dtc/dtc.h5
-rw-r--r--scripts/dtc/fdtoverlay.c8
-rw-r--r--scripts/dtc/flattree.c2
-rw-r--r--scripts/dtc/libfdt/fdt.c8
-rw-r--r--scripts/dtc/libfdt/fdt.h4
-rw-r--r--scripts/dtc/libfdt/fdt_overlay.c8
-rw-r--r--scripts/dtc/libfdt/fdt_rw.c41
-rw-r--r--scripts/dtc/libfdt/libfdt.h179
-rw-r--r--scripts/dtc/libfdt/libfdt_internal.h14
-rw-r--r--scripts/dtc/livetree.c25
-rw-r--r--scripts/dtc/srcpos.c17
-rw-r--r--scripts/dtc/srcpos.h1
-rw-r--r--scripts/dtc/treesource.c52
-rw-r--r--scripts/dtc/util.c16
-rw-r--r--scripts/dtc/util.h5
-rw-r--r--scripts/dtc/version_gen.h2
-rwxr-xr-xscripts/extract-vmlinux8
-rw-r--r--scripts/gcc-plugins/gcc-common.h7
-rw-r--r--scripts/gdb/linux/timerlist.py2
-rwxr-xr-xscripts/generate_rust_analyzer.py4
-rwxr-xr-xscripts/headers_install.sh2
-rw-r--r--scripts/kconfig/expr.h1
-rw-r--r--scripts/kconfig/lexer.l1
-rw-r--r--scripts/kconfig/nconf.gui.c8
-rw-r--r--scripts/kconfig/parser.y47
-rw-r--r--scripts/kconfig/qconf.cc13
-rw-r--r--scripts/kconfig/symbol.c7
-rw-r--r--scripts/kconfig/tests/conftest.py17
-rw-r--r--scripts/kconfig/tests/err_transitional/Kconfig52
-rw-r--r--scripts/kconfig/tests/err_transitional/__init__.py14
-rw-r--r--scripts/kconfig/tests/err_transitional/expected_stderr7
-rw-r--r--scripts/kconfig/tests/transitional/Kconfig100
-rw-r--r--scripts/kconfig/tests/transitional/__init__.py18
-rw-r--r--scripts/kconfig/tests/transitional/expected_config12
-rw-r--r--scripts/kconfig/tests/transitional/initial_config16
-rwxr-xr-xscripts/kernel-doc.py34
-rw-r--r--scripts/lib/kdoc/kdoc_parser.py848
-rwxr-xr-xscripts/link-vmlinux.sh5
-rwxr-xr-xscripts/min-tool-version.sh6
-rwxr-xr-xscripts/misc-check4
-rwxr-xr-xscripts/mksysmap6
-rw-r--r--scripts/mod/file2alias.c35
-rw-r--r--scripts/mod/modpost.c15
-rw-r--r--scripts/mod/modpost.h2
-rw-r--r--scripts/rustdoc_test_gen.rs2
-rwxr-xr-xscripts/selinux/install_policy.sh2
-rwxr-xr-xscripts/sphinx-build-wrapper719
-rwxr-xr-xscripts/sphinx-pre-install2669
65 files changed, 3764 insertions, 1730 deletions
diff --git a/scripts/Makefile.dtbs b/scripts/Makefile.dtbs
index 8d56c0815f33..2d321b813600 100644
--- a/scripts/Makefile.dtbs
+++ b/scripts/Makefile.dtbs
@@ -97,6 +97,7 @@ DTC_FLAGS += -Wno-unit_address_vs_reg \
-Wno-avoid_unnecessary_addr_size \
-Wno-alias_paths \
-Wno-graph_child_address \
+ -Wno-interrupt_map \
-Wno-simple_bus_reg
else
DTC_FLAGS += -Wunique_unit_address_if_enabled
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index dca175fffcab..1434cb6208cb 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -25,8 +25,6 @@ ifneq ($(CONFIG_FRAME_WARN),0)
KBUILD_CFLAGS += -Wframe-larger-than=$(CONFIG_FRAME_WARN)
endif
-KBUILD_CPPFLAGS-$(CONFIG_WERROR) += -Werror
-KBUILD_CPPFLAGS += $(KBUILD_CPPFLAGS-y)
KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
ifdef CONFIG_CC_IS_CLANG
@@ -214,10 +212,20 @@ KBUILD_CFLAGS += -Wno-unused-parameter
endif
#
-# W=e - error out on warnings
+# W=e and CONFIG_WERROR - error out on warnings
#
-ifneq ($(findstring e, $(KBUILD_EXTRA_WARN)),)
+ifneq ($(findstring e, $(KBUILD_EXTRA_WARN))$(CONFIG_WERROR),)
-KBUILD_CFLAGS += -Werror
+KBUILD_CPPFLAGS += -Werror
+KBUILD_AFLAGS += -Wa,--fatal-warnings
+KBUILD_LDFLAGS += --fatal-warnings
+KBUILD_USERCFLAGS += -Werror
+KBUILD_USERLDFLAGS += -Wl,--fatal-warnings
+KBUILD_RUSTFLAGS += -Dwarnings
endif
+
+# Hostprog flags are used during build bootstrapping and can not rely on CONFIG_ symbols.
+KBUILD_HOSTCFLAGS += -Werror
+KBUILD_HOSTLDFLAGS += -Wl,--fatal-warnings
+KBUILD_HOSTRUSTFLAGS += -Dwarnings
diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
index b64862dc6f08..7c6ae9886f8f 100644
--- a/scripts/Makefile.vmlinux
+++ b/scripts/Makefile.vmlinux
@@ -9,20 +9,6 @@ include $(srctree)/scripts/Makefile.lib
targets :=
-ifdef CONFIG_ARCH_VMLINUX_NEEDS_RELOCS
-vmlinux-final := vmlinux.unstripped
-
-quiet_cmd_strip_relocs = RSTRIP $@
- cmd_strip_relocs = $(OBJCOPY) --remove-section='.rel*' --remove-section=!'.rel*.dyn' $< $@
-
-vmlinux: $(vmlinux-final) FORCE
- $(call if_changed,strip_relocs)
-
-targets += vmlinux
-else
-vmlinux-final := vmlinux
-endif
-
%.o: %.c FORCE
$(call if_changed_rule,cc_o_c)
@@ -61,19 +47,14 @@ targets += .builtin-dtbs-list
ifdef CONFIG_GENERIC_BUILTIN_DTB
targets += .builtin-dtbs.S .builtin-dtbs.o
-$(vmlinux-final): .builtin-dtbs.o
+vmlinux.unstripped: .builtin-dtbs.o
endif
-# vmlinux
+# vmlinux.unstripped
# ---------------------------------------------------------------------------
-ifdef CONFIG_MODULES
-targets += .vmlinux.export.o
-$(vmlinux-final): .vmlinux.export.o
-endif
-
ifdef CONFIG_ARCH_WANTS_PRE_LINK_VMLINUX
-$(vmlinux-final): arch/$(SRCARCH)/tools/vmlinux.arch.o
+vmlinux.unstripped: arch/$(SRCARCH)/tools/vmlinux.arch.o
arch/$(SRCARCH)/tools/vmlinux.arch.o: vmlinux.o FORCE
$(Q)$(MAKE) $(build)=arch/$(SRCARCH)/tools $@
@@ -86,17 +67,61 @@ cmd_link_vmlinux = \
$< "$(LD)" "$(KBUILD_LDFLAGS)" "$(LDFLAGS_vmlinux)" "$@"; \
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
-targets += $(vmlinux-final)
-$(vmlinux-final): scripts/link-vmlinux.sh vmlinux.o $(KBUILD_LDS) FORCE
+targets += vmlinux.unstripped .vmlinux.export.o
+vmlinux.unstripped: scripts/link-vmlinux.sh vmlinux.o .vmlinux.export.o $(KBUILD_LDS) FORCE
+$(call if_changed_dep,link_vmlinux)
ifdef CONFIG_DEBUG_INFO_BTF
-$(vmlinux-final): $(RESOLVE_BTFIDS)
+vmlinux.unstripped: $(RESOLVE_BTFIDS)
endif
ifdef CONFIG_BUILDTIME_TABLE_SORT
-$(vmlinux-final): scripts/sorttable
+vmlinux.unstripped: scripts/sorttable
endif
+# vmlinux
+# ---------------------------------------------------------------------------
+
+remove-section-y := .modinfo
+remove-section-$(CONFIG_ARCH_VMLINUX_NEEDS_RELOCS) += '.rel*'
+
+remove-symbols := -w --strip-symbol='__mod_device_table__*'
+
+# To avoid warnings: "empty loadable segment detected at ..." from GNU objcopy,
+# it is necessary to remove the PT_LOAD flag from the segment.
+quiet_cmd_strip_relocs = OBJCOPY $@
+ cmd_strip_relocs = $(OBJCOPY) $(patsubst %,--set-section-flags %=noload,$(remove-section-y)) $< $@; \
+ $(OBJCOPY) $(addprefix --remove-section=,$(remove-section-y)) $(remove-symbols) $@
+
+targets += vmlinux
+vmlinux: vmlinux.unstripped FORCE
+ $(call if_changed,strip_relocs)
+
+# modules.builtin.modinfo
+# ---------------------------------------------------------------------------
+
+OBJCOPYFLAGS_modules.builtin.modinfo := -j .modinfo -O binary
+
+targets += modules.builtin.modinfo
+modules.builtin.modinfo: vmlinux.unstripped FORCE
+ $(call if_changed,objcopy)
+
+# modules.builtin
+# ---------------------------------------------------------------------------
+
+__default: modules.builtin
+
+# The second line aids cases where multiple modules share the same object.
+
+quiet_cmd_modules_builtin = GEN $@
+ cmd_modules_builtin = \
+ tr '\0' '\n' < $< | \
+ sed -n 's/^[[:alnum:]:_]*\.file=//p' | \
+ tr ' ' '\n' | uniq | sed -e 's:^:kernel/:' -e 's/$$/.ko/' > $@
+
+targets += modules.builtin
+modules.builtin: modules.builtin.modinfo FORCE
+ $(call if_changed,modules_builtin)
+
# modules.builtin.ranges
# ---------------------------------------------------------------------------
ifdef CONFIG_BUILTIN_MODULE_RANGES
@@ -110,7 +135,7 @@ modules.builtin.ranges: $(srctree)/scripts/generate_builtin_ranges.awk \
modules.builtin vmlinux.map vmlinux.o.map FORCE
$(call if_changed,modules_builtin_ranges)
-vmlinux.map: $(vmlinux-final)
+vmlinux.map: vmlinux.unstripped
@:
endif
diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
index b024ffb3e201..23c8751285d7 100644
--- a/scripts/Makefile.vmlinux_o
+++ b/scripts/Makefile.vmlinux_o
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
PHONY := __default
-__default: vmlinux.o modules.builtin.modinfo modules.builtin
+__default: vmlinux.o
include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
@@ -73,30 +73,6 @@ vmlinux.o: $(initcalls-lds) vmlinux.a $(KBUILD_VMLINUX_LIBS) FORCE
targets += vmlinux.o
-# modules.builtin.modinfo
-# ---------------------------------------------------------------------------
-
-OBJCOPYFLAGS_modules.builtin.modinfo := -j .modinfo -O binary
-
-targets += modules.builtin.modinfo
-modules.builtin.modinfo: vmlinux.o FORCE
- $(call if_changed,objcopy)
-
-# modules.builtin
-# ---------------------------------------------------------------------------
-
-# The second line aids cases where multiple modules share the same object.
-
-quiet_cmd_modules_builtin = GEN $@
- cmd_modules_builtin = \
- tr '\0' '\n' < $< | \
- sed -n 's/^[[:alnum:]:_]*\.file=//p' | \
- tr ' ' '\n' | uniq | sed -e 's:^:kernel/:' -e 's/$$/.ko/' > $@
-
-targets += modules.builtin
-modules.builtin: modules.builtin.modinfo FORCE
- $(call if_changed,modules_builtin)
-
# Add FORCE to the prerequisites of a target to force it to be always rebuilt.
# ---------------------------------------------------------------------------
diff --git a/scripts/atomic/gen-atomics.sh b/scripts/atomic/gen-atomics.sh
index 5b98a8307693..02508d0d6fe4 100755
--- a/scripts/atomic/gen-atomics.sh
+++ b/scripts/atomic/gen-atomics.sh
@@ -11,6 +11,7 @@ cat <<EOF |
gen-atomic-instrumented.sh linux/atomic/atomic-instrumented.h
gen-atomic-long.sh linux/atomic/atomic-long.h
gen-atomic-fallback.sh linux/atomic/atomic-arch-fallback.h
+gen-rust-atomic-helpers.sh ../rust/helpers/atomic.c
EOF
while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
diff --git a/scripts/atomic/gen-rust-atomic-helpers.sh b/scripts/atomic/gen-rust-atomic-helpers.sh
new file mode 100755
index 000000000000..45b1e100ed7c
--- /dev/null
+++ b/scripts/atomic/gen-rust-atomic-helpers.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+ATOMICDIR=$(dirname $0)
+
+. ${ATOMICDIR}/atomic-tbl.sh
+
+#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
+gen_proto_order_variant()
+{
+ local meta="$1"; shift
+ local pfx="$1"; shift
+ local name="$1"; shift
+ local sfx="$1"; shift
+ local order="$1"; shift
+ local atomic="$1"; shift
+ local int="$1"; shift
+
+ local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
+
+ local ret="$(gen_ret_type "${meta}" "${int}")"
+ local params="$(gen_params "${int}" "${atomic}" "$@")"
+ local args="$(gen_args "$@")"
+ local retstmt="$(gen_ret_stmt "${meta}")"
+
+cat <<EOF
+__rust_helper ${ret}
+rust_helper_${atomicname}(${params})
+{
+ ${retstmt}${atomicname}(${args});
+}
+
+EOF
+}
+
+cat << EOF
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by $0
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provides helpers for the various atomic functions for Rust.
+ */
+#ifndef _RUST_ATOMIC_API_H
+#define _RUST_ATOMIC_API_H
+
+#include <linux/atomic.h>
+
+// TODO: Remove this after INLINE_HELPERS support is added.
+#ifndef __rust_helper
+#define __rust_helper
+#endif
+
+EOF
+
+grep '^[a-z]' "$1" | while read name meta args; do
+ gen_proto "${meta}" "${name}" "atomic" "int" ${args}
+done
+
+grep '^[a-z]' "$1" | while read name meta args; do
+ gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
+done
+
+cat <<EOF
+#endif /* _RUST_ATOMIC_API_H */
+EOF
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e722dd6fa8ef..92669904eecc 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2636,6 +2636,11 @@ sub exclude_global_initialisers {
$realfile =~ m@/bpf/.*\.bpf\.c$@;
}
+sub is_userspace {
+ my ($realfile) = @_;
+ return ($realfile =~ m@^tools/@ || $realfile =~ m@^scripts/@);
+}
+
sub process {
my $filename = shift;
@@ -3294,7 +3299,7 @@ sub process {
# file delta changes
$line =~ /^\s*(?:[\w\.\-\+]*\/)++[\w\.\-\+]+:/ ||
# filename then :
- $line =~ /^\s*(?:Fixes:|$link_tags_search|$signature_tags)/i ||
+ $line =~ /^\s*(?:Fixes:|https?:|$link_tags_search|$signature_tags)/i ||
# A Fixes:, link or signature tag line
$commit_log_possible_stack_dump)) {
WARN("COMMIT_LOG_LONG_LINE",
@@ -7018,21 +7023,20 @@ sub process {
# }
# }
# }
-
# strcpy uses that should likely be strscpy
- if ($line =~ /\bstrcpy\s*\(/) {
+ if ($line =~ /\bstrcpy\s*\(/ && !is_userspace($realfile)) {
WARN("STRCPY",
"Prefer strscpy over strcpy - see: https://github.com/KSPP/linux/issues/88\n" . $herecurr);
}
# strlcpy uses that should likely be strscpy
- if ($line =~ /\bstrlcpy\s*\(/) {
+ if ($line =~ /\bstrlcpy\s*\(/ && !is_userspace($realfile)) {
WARN("STRLCPY",
"Prefer strscpy over strlcpy - see: https://github.com/KSPP/linux/issues/89\n" . $herecurr);
}
# strncpy uses that should likely be strscpy or strscpy_pad
- if ($line =~ /\bstrncpy\s*\(/) {
+ if ($line =~ /\bstrncpy\s*\(/ && !is_userspace($realfile)) {
WARN("STRNCPY",
"Prefer strscpy, strscpy_pad, or __nonstring over strncpy - see: https://github.com/KSPP/linux/issues/90\n" . $herecurr);
}
diff --git a/scripts/coccinelle/api/platform_no_drv_owner.cocci b/scripts/coccinelle/api/platform_no_drv_owner.cocci
index 8fa050eeb7e5..5e869858bda8 100644
--- a/scripts/coccinelle/api/platform_no_drv_owner.cocci
+++ b/scripts/coccinelle/api/platform_no_drv_owner.cocci
@@ -10,12 +10,21 @@ virtual org
virtual report
@match1@
+declarer name builtin_i2c_driver;
+declarer name builtin_platform_driver;
+declarer name builtin_platform_driver_probe;
declarer name module_i2c_driver;
declarer name module_platform_driver;
declarer name module_platform_driver_probe;
identifier __driver;
@@
(
+ builtin_i2c_driver(__driver);
+|
+ builtin_platform_driver(__driver);
+|
+ builtin_platform_driver_probe(__driver, ...);
+|
module_i2c_driver(__driver);
|
module_platform_driver(__driver);
diff --git a/scripts/coccinelle/misc/of_table.cocci b/scripts/coccinelle/misc/of_table.cocci
index 4693ea744753..17881cb0884b 100644
--- a/scripts/coccinelle/misc/of_table.cocci
+++ b/scripts/coccinelle/misc/of_table.cocci
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/// Make sure (of/i2c/platform)_device_id tables are NULL terminated
+/// Make sure (of/i2c/platform/spi)_device_id tables are NULL terminated
//
// Keywords: of_table i2c_table platform_table
// Confidence: Medium
@@ -15,14 +15,14 @@ identifier var, arr;
expression E;
@@
(
-struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = {
+struct \(of_device_id \| i2c_device_id \| platform_device_id \| spi_device_id\) arr[] = {
...,
{
.var = E,
* }
};
|
-struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = {
+struct \(of_device_id \| i2c_device_id \| platform_device_id \| spi_device_id\) arr[] = {
...,
* { ..., E, ... },
};
@@ -33,7 +33,7 @@ identifier var, arr;
expression E;
@@
(
-struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = {
+struct \(of_device_id \| i2c_device_id \| platform_device_id \| spi_device_id\) arr[] = {
...,
{
.var = E,
@@ -42,7 +42,7 @@ struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = {
+ { }
};
|
-struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = {
+struct \(of_device_id \| i2c_device_id \| platform_device_id \| spi_device_id\) arr[] = {
...,
{ ..., E, ... },
+ { },
@@ -55,7 +55,7 @@ identifier var, arr;
expression E;
@@
(
-struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = {
+struct \(of_device_id \| i2c_device_id \| platform_device_id \| spi_device_id\) arr[] = {
...,
{
.var = E,
@@ -63,7 +63,7 @@ struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = {
@p1
};
|
-struct \(of_device_id \| i2c_device_id \| platform_device_id\) arr[] = {
+struct \(of_device_id \| i2c_device_id \| platform_device_id \| spi_device_id\) arr[] = {
...,
{ ..., E, ... }
@p1
diff --git a/scripts/coccinelle/misc/ptr_err_to_pe.cocci b/scripts/coccinelle/misc/ptr_err_to_pe.cocci
new file mode 100644
index 000000000000..0494c7709245
--- /dev/null
+++ b/scripts/coccinelle/misc/ptr_err_to_pe.cocci
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/// Use %pe format specifier instead of PTR_ERR() for printing error pointers.
+///
+/// For printing error pointers (i.e., a pointer for which IS_ERR() is true)
+/// %pe will print a symbolic error name (e.g., -EINVAL), opposed to the raw
+/// errno (e.g., -22) produced by PTR_ERR().
+/// It also makes the code cleaner by saving a redundant call to PTR_ERR().
+///
+// Confidence: High
+// Copyright: (C) 2025 NVIDIA CORPORATION & AFFILIATES.
+// URL: https://coccinelle.gitlabpages.inria.fr/website
+// Options: --no-includes --include-headers
+
+virtual context
+virtual org
+virtual report
+
+@r@
+expression ptr;
+constant fmt;
+position p;
+identifier print_func;
+@@
+* print_func(..., fmt, ..., PTR_ERR@p(ptr), ...)
+
+@script:python depends on r && report@
+p << r.p;
+@@
+coccilib.report.print_report(p[0], "WARNING: Consider using %pe to print PTR_ERR()")
+
+@script:python depends on r && org@
+p << r.p;
+@@
+coccilib.org.print_todo(p[0], "WARNING: Consider using %pe to print PTR_ERR()")
diff --git a/scripts/crypto/gen-hash-testvecs.py b/scripts/crypto/gen-hash-testvecs.py
index 4ac927d40cf5..fc063f2ee95f 100755
--- a/scripts/crypto/gen-hash-testvecs.py
+++ b/scripts/crypto/gen-hash-testvecs.py
@@ -84,11 +84,16 @@ def print_c_struct_u8_array_field(name, value):
print_bytes('\t\t\t', value, 8)
print('\t\t},')
+def alg_digest_size_const(alg):
+ if alg == 'blake2s':
+ return 'BLAKE2S_HASH_SIZE'
+ return f'{alg.upper()}_DIGEST_SIZE'
+
def gen_unkeyed_testvecs(alg):
print('')
print('static const struct {')
print('\tsize_t data_len;')
- print(f'\tu8 digest[{alg.upper()}_DIGEST_SIZE];')
+ print(f'\tu8 digest[{alg_digest_size_const(alg)}];')
print('} hash_testvecs[] = {')
for data_len in DATA_LENS:
data = rand_bytes(data_len)
@@ -103,7 +108,7 @@ def gen_unkeyed_testvecs(alg):
for data_len in range(len(data) + 1):
hash_update(ctx, compute_hash(alg, data[:data_len]))
print_static_u8_array_definition(
- f'hash_testvec_consolidated[{alg.upper()}_DIGEST_SIZE]',
+ f'hash_testvec_consolidated[{alg_digest_size_const(alg)}]',
hash_final(ctx))
def gen_hmac_testvecs(alg):
@@ -119,6 +124,20 @@ def gen_hmac_testvecs(alg):
f'hmac_testvec_consolidated[{alg.upper()}_DIGEST_SIZE]',
ctx.digest())
+BLAKE2S_KEY_SIZE = 32
+BLAKE2S_HASH_SIZE = 32
+
+def gen_additional_blake2s_testvecs():
+ hashes = b''
+ for key_len in range(BLAKE2S_KEY_SIZE + 1):
+ for out_len in range(1, BLAKE2S_HASH_SIZE + 1):
+ h = hashlib.blake2s(digest_size=out_len, key=rand_bytes(key_len))
+ h.update(rand_bytes(100))
+ hashes += h.digest()
+ print_static_u8_array_definition(
+ 'blake2s_keyed_testvec_consolidated[BLAKE2S_HASH_SIZE]',
+ compute_hash('blake2s', hashes))
+
def gen_additional_poly1305_testvecs():
key = b'\xff' * POLY1305_KEY_SIZE
data = b''
@@ -141,7 +160,9 @@ alg = sys.argv[1]
print('/* SPDX-License-Identifier: GPL-2.0-or-later */')
print(f'/* This file was generated by: {sys.argv[0]} {" ".join(sys.argv[1:])} */')
gen_unkeyed_testvecs(alg)
-if alg == 'poly1305':
+if alg == 'blake2s':
+ gen_additional_blake2s_testvecs()
+elif alg == 'poly1305':
gen_additional_poly1305_testvecs()
else:
gen_hmac_testvecs(alg)
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 17abc4e7a985..c73cb802a0a3 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -242,8 +242,10 @@ debuginfod_get_vmlinux() {
decode_code() {
local scripts=`dirname "${BASH_SOURCE[0]}"`
+ local lim="Code: "
- echo "$1" | $scripts/decodecode
+ echo -n "${1%%${lim}*}"
+ echo "${lim}${1##*${lim}}" | $scripts/decodecode
}
handle_line() {
@@ -255,10 +257,11 @@ handle_line() {
basepath=${basepath%/init/main.c:*)}
fi
- local words
+ local words spaces
- # Tokenize
- read -a words <<<"$1"
+ # Tokenize: words and spaces to preserve the alignment
+ read -ra words <<<"$1"
+ IFS='#' read -ra spaces <<<"$(shopt -s extglob; echo "${1//+([^[:space:]])/#}")"
# Remove hex numbers. Do it ourselves until it happens in the
# kernel
@@ -270,19 +273,13 @@ handle_line() {
for i in "${!words[@]}"; do
# Remove the address
if [[ ${words[$i]} =~ \[\<([^]]+)\>\] ]]; then
- unset words[$i]
- fi
-
- # Format timestamps with tabs
- if [[ ${words[$i]} == \[ && ${words[$i+1]} == *\] ]]; then
- unset words[$i]
- words[$i+1]=$(printf "[%13s\n" "${words[$i+1]}")
+ unset words[$i] spaces[$i]
fi
done
if [[ ${words[$last]} =~ ^[0-9a-f]+\] ]]; then
words[$last-1]="${words[$last-1]} ${words[$last]}"
- unset words[$last]
+ unset words[$last] spaces[$last]
last=$(( $last - 1 ))
fi
@@ -294,7 +291,7 @@ handle_line() {
local info_str=""
if [[ ${words[$last]} =~ \([A-Z]*\) ]]; then
info_str=${words[$last]}
- unset words[$last]
+ unset words[$last] spaces[$last]
last=$(( $last - 1 ))
fi
@@ -311,7 +308,7 @@ handle_line() {
modbuildid=
fi
symbol=${words[$last-1]}
- unset words[$last-1]
+ unset words[$last-1] spaces[$last-1]
else
# The symbol is the last element, process it
symbol=${words[$last]}
@@ -323,12 +320,10 @@ handle_line() {
parse_symbol # modifies $symbol
# Add up the line number to the symbol
- if [[ -z ${module} ]]
- then
- echo "${words[@]}" "$symbol ${info_str}"
- else
- echo "${words[@]}" "$symbol $module ${info_str}"
- fi
+ for i in "${!words[@]}"; do
+ echo -n "${spaces[i]}${words[i]}"
+ done
+ echo "${spaces[$last]}${symbol}${module:+ ${module}}${info_str:+ ${info_str}}"
}
while read line; do
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 6e06aeab5503..7e3fed5005b3 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -1024,7 +1024,7 @@ static void check_i2c_bus_bridge(struct check *c, struct dt_info *dti, struct no
} else if (strprefixeq(node->name, node->basenamelen, "i2c")) {
struct node *child;
for_each_child(node, child) {
- if (strprefixeq(child->name, node->basenamelen, "i2c-bus"))
+ if (strprefixeq(child->name, child->basenamelen, "i2c-bus"))
return;
}
node->bus = &i2c_bus;
@@ -1217,9 +1217,7 @@ WARNING(avoid_default_addr_size, check_avoid_default_addr_size, NULL,
static void check_avoid_unnecessary_addr_size(struct check *c, struct dt_info *dti,
struct node *node)
{
- struct property *prop;
struct node *child;
- bool has_reg = false;
if (!node->parent || node->addr_cells < 0 || node->size_cells < 0)
return;
@@ -1228,13 +1226,18 @@ static void check_avoid_unnecessary_addr_size(struct check *c, struct dt_info *d
return;
for_each_child(node, child) {
- prop = get_property(child, "reg");
- if (prop)
- has_reg = true;
+ /*
+ * Even if the child devices' address space is not mapped into
+ * the parent bus (no 'ranges' property on node), children can
+ * still have registers on a local bus, or map local addresses
+ * to another subordinate address space. The properties on the
+ * child nodes then make #address-cells/#size-cells necessary:
+ */
+ if (get_property(child, "reg") || get_property(child, "ranges"))
+ return;
}
- if (!has_reg)
- FAIL(c, dti, node, "unnecessary #address-cells/#size-cells without \"ranges\", \"dma-ranges\" or child \"reg\" property");
+ FAIL(c, dti, node, "unnecessary #address-cells/#size-cells without \"ranges\", \"dma-ranges\" or child \"reg\" or \"ranges\" property");
}
WARNING(avoid_unnecessary_addr_size, check_avoid_unnecessary_addr_size, NULL, &avoid_default_addr_size);
@@ -1673,6 +1676,10 @@ static void check_interrupt_map(struct check *c,
cellprop = get_property(provider_node, "#address-cells");
if (cellprop)
parent_cellsize += propval_cell(cellprop);
+ else
+ FAIL_PROP(c, dti, node, irq_map_prop,
+ "Missing property '#address-cells' in node %s, using 0 as fallback",
+ provider_node->fullpath);
cell += 1 + parent_cellsize;
if (cell > map_cells)
diff --git a/scripts/dtc/data.c b/scripts/dtc/data.c
index 14734233ad8b..5b25aa060416 100644
--- a/scripts/dtc/data.c
+++ b/scripts/dtc/data.c
@@ -228,11 +228,7 @@ struct data data_add_marker(struct data d, enum markertype type, char *ref)
{
struct marker *m;
- m = xmalloc(sizeof(*m));
- m->offset = d.len;
- m->type = type;
- m->ref = ref;
- m->next = NULL;
+ m = alloc_marker(d.len, type, ref);
return data_append_markers(d, m);
}
@@ -254,3 +250,44 @@ bool data_is_one_string(struct data d)
return true;
}
+
+struct data data_insert_data(struct data d, struct marker *m, struct data old)
+{
+ unsigned int offset = m->offset;
+ struct marker *next = m->next;
+ struct marker *marker;
+ struct data new_data;
+ char *ref;
+
+ new_data = data_insert_at_marker(d, m, old.val, old.len);
+
+ /* Copy all markers from old value */
+ marker = old.markers;
+ for_each_marker(marker) {
+ ref = NULL;
+
+ if (marker->ref)
+ ref = xstrdup(marker->ref);
+
+ m->next = alloc_marker(marker->offset + offset, marker->type,
+ ref);
+ m = m->next;
+ }
+ m->next = next;
+
+ return new_data;
+}
+
+struct marker *alloc_marker(unsigned int offset, enum markertype type,
+ char *ref)
+{
+ struct marker *m;
+
+ m = xmalloc(sizeof(*m));
+ m->offset = offset;
+ m->type = type;
+ m->ref = ref;
+ m->next = NULL;
+
+ return m;
+}
diff --git a/scripts/dtc/dt_to_config b/scripts/dtc/dt_to_config
index 299d1c2b20d7..70d6d5f06bdc 100755
--- a/scripts/dtc/dt_to_config
+++ b/scripts/dtc/dt_to_config
@@ -51,10 +51,10 @@ $num_pr_flags = $pr_flag_pos_config_test_fail + 1;
"compatible is white listed",
"matching driver and/or kernel config is hard coded",
"kernel config hard coded in Makefile",
- "one or more kernel config file options is not set",
- "one or more kernel config file options is set to 'm'",
- "one or more kernel config file options is set to 'y'",
- "one of more kernel config file options fails to have correct value"
+ "one or more kernel config file options are not set",
+ "one or more kernel config file options are set to 'm'",
+ "one or more kernel config file options are set to 'y'",
+ "one or more kernel config file options fail to have correct value"
);
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index de60a70b6bdb..15d585c80798 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -151,6 +151,21 @@ static void PRINTF(1, 2) lexical_error(const char *fmt, ...);
return DT_LABEL;
}
+<V1>{LABEL} {
+ /* Missed includes or macro definitions while
+ * preprocessing can lead to unexpected identifiers in
+ * the input. Report a slightly more informative error
+ * in this case */
+
+ lexical_error("Unexpected '%s'", yytext);
+
+ /* Treat it as a literal which often generates further
+ * useful error messages */
+
+ yylval.integer = 0;
+ return DT_LITERAL;
+ }
+
<V1>([0-9]+|0[xX][0-9a-fA-F]+)(U|L|UL|LL|ULL)? {
char *e;
DPRINT("Integer Literal: '%s'\n", yytext);
diff --git a/scripts/dtc/dtc.c b/scripts/dtc/dtc.c
index 0655c2e2c362..b3445b7d6473 100644
--- a/scripts/dtc/dtc.c
+++ b/scripts/dtc/dtc.c
@@ -15,7 +15,7 @@ int quiet; /* Level of quietness */
unsigned int reservenum;/* Number of memory reservation slots */
int minsize; /* Minimum blob size */
int padsize; /* Additional padding to blob */
-int alignsize; /* Additional padding to blob accroding to the alignsize */
+int alignsize; /* Additional padding to blob according to the alignsize */
int phandle_format = PHANDLE_EPAPR; /* Use linux,phandle or phandle properties */
int generate_symbols; /* enable symbols & fixup support */
int generate_fixups; /* suppress generation of fixups on symbol support */
@@ -289,7 +289,9 @@ int main(int argc, char *argv[])
if (!depfile)
die("Couldn't open dependency file %s: %s\n", depname,
strerror(errno));
- fprintf(depfile, "%s:", outname);
+
+ fprint_path_escaped(depfile, outname);
+ fputc(':', depfile);
}
if (inform == NULL)
diff --git a/scripts/dtc/dtc.h b/scripts/dtc/dtc.h
index 4c4aaca1fc41..3a220b9afc99 100644
--- a/scripts/dtc/dtc.h
+++ b/scripts/dtc/dtc.h
@@ -38,7 +38,7 @@ extern int quiet; /* Level of quietness */
extern unsigned int reservenum; /* Number of memory reservation slots */
extern int minsize; /* Minimum blob size */
extern int padsize; /* Additional padding to blob */
-extern int alignsize; /* Additional padding to blob accroding to the alignsize */
+extern int alignsize; /* Additional padding to blob according to the alignsize */
extern int phandle_format; /* Use linux,phandle or phandle properties */
extern int generate_symbols; /* generate symbols for nodes with labels */
extern int generate_fixups; /* generate fixups */
@@ -182,7 +182,10 @@ struct data data_append_addr(struct data d, uint64_t addr);
struct data data_append_byte(struct data d, uint8_t byte);
struct data data_append_zeroes(struct data d, int len);
struct data data_append_align(struct data d, int align);
+struct data data_insert_data(struct data d, struct marker *m, struct data old);
+struct marker *alloc_marker(unsigned int offset, enum markertype type,
+ char *ref);
struct data data_add_marker(struct data d, enum markertype type, char *ref);
bool data_is_one_string(struct data d);
diff --git a/scripts/dtc/fdtoverlay.c b/scripts/dtc/fdtoverlay.c
index 699b4f616502..ee1eb8f3ad28 100644
--- a/scripts/dtc/fdtoverlay.c
+++ b/scripts/dtc/fdtoverlay.c
@@ -46,6 +46,7 @@ static void *apply_one(char *base, const char *overlay, size_t *buf_len,
char *tmp = NULL;
char *tmpo;
int ret;
+ bool has_symbols;
/*
* We take copies first, because a failed apply can trash
@@ -62,6 +63,8 @@ static void *apply_one(char *base, const char *overlay, size_t *buf_len,
fdt_strerror(ret));
goto fail;
}
+ ret = fdt_path_offset(tmp, "/__symbols__");
+ has_symbols = ret >= 0;
memcpy(tmpo, overlay, fdt_totalsize(overlay));
@@ -74,6 +77,11 @@ static void *apply_one(char *base, const char *overlay, size_t *buf_len,
if (ret) {
fprintf(stderr, "\nFailed to apply '%s': %s\n",
name, fdt_strerror(ret));
+ if (!has_symbols) {
+ fprintf(stderr,
+ "base blob does not have a '/__symbols__' node, "
+ "make sure you have compiled the base blob with '-@' option\n");
+ }
goto fail;
}
diff --git a/scripts/dtc/flattree.c b/scripts/dtc/flattree.c
index 1bcd8089c5b9..30e6de2044b2 100644
--- a/scripts/dtc/flattree.c
+++ b/scripts/dtc/flattree.c
@@ -503,7 +503,7 @@ void dt_to_asm(FILE *f, struct dt_info *dti, int version)
* Reserve map entries.
* Align the reserve map to a doubleword boundary.
* Each entry is an (address, size) pair of u64 values.
- * Always supply a zero-sized temination entry.
+ * Always supply a zero-sized termination entry.
*/
asm_emit_align(f, 8);
emit_label(f, symprefix, "reserve_map");
diff --git a/scripts/dtc/libfdt/fdt.c b/scripts/dtc/libfdt/fdt.c
index 20c6415b9ced..95f644c31f94 100644
--- a/scripts/dtc/libfdt/fdt.c
+++ b/scripts/dtc/libfdt/fdt.c
@@ -312,14 +312,14 @@ int fdt_next_subnode(const void *fdt, int offset)
return offset;
}
-const char *fdt_find_string_(const char *strtab, int tabsize, const char *s)
+const char *fdt_find_string_len_(const char *strtab, int tabsize, const char *s,
+ int slen)
{
- int len = strlen(s) + 1;
- const char *last = strtab + tabsize - len;
+ const char *last = strtab + tabsize - (slen + 1);
const char *p;
for (p = strtab; p <= last; p++)
- if (memcmp(p, s, len) == 0)
+ if (memcmp(p, s, slen) == 0 && p[slen] == '\0')
return p;
return NULL;
}
diff --git a/scripts/dtc/libfdt/fdt.h b/scripts/dtc/libfdt/fdt.h
index 0c91aa7f67b5..a07abfcc7108 100644
--- a/scripts/dtc/libfdt/fdt.h
+++ b/scripts/dtc/libfdt/fdt.h
@@ -7,7 +7,7 @@
* Copyright 2012 Kim Phillips, Freescale Semiconductor.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct fdt_header {
fdt32_t magic; /* magic word FDT_MAGIC */
@@ -45,7 +45,7 @@ struct fdt_property {
char data[];
};
-#endif /* !__ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
#define FDT_MAGIC 0xd00dfeed /* 4: version, 4: total size */
#define FDT_TAGSIZE sizeof(fdt32_t)
diff --git a/scripts/dtc/libfdt/fdt_overlay.c b/scripts/dtc/libfdt/fdt_overlay.c
index 28b667ffc490..e6b9eb643958 100644
--- a/scripts/dtc/libfdt/fdt_overlay.c
+++ b/scripts/dtc/libfdt/fdt_overlay.c
@@ -307,7 +307,6 @@ static int overlay_update_local_references(void *fdto, uint32_t delta)
/**
* overlay_fixup_one_phandle - Set an overlay phandle to the base one
- * @fdt: Base Device Tree blob
* @fdto: Device tree overlay blob
* @symbols_off: Node offset of the symbols node in the base device tree
* @path: Path to a node holding a phandle in the overlay
@@ -328,8 +327,7 @@ static int overlay_update_local_references(void *fdto, uint32_t delta)
* 0 on success
* Negative error code on failure
*/
-static int overlay_fixup_one_phandle(void *fdt, void *fdto,
- int symbols_off,
+static int overlay_fixup_one_phandle(void *fdto, int symbols_off,
const char *path, uint32_t path_len,
const char *name, uint32_t name_len,
int poffset, uint32_t phandle)
@@ -351,7 +349,7 @@ static int overlay_fixup_one_phandle(void *fdt, void *fdto,
name, name_len, poffset,
&phandle_prop,
sizeof(phandle_prop));
-};
+}
/**
* overlay_fixup_phandle - Set an overlay phandle to the base one
@@ -443,7 +441,7 @@ static int overlay_fixup_phandle(void *fdt, void *fdto, int symbols_off,
if ((*endptr != '\0') || (endptr <= (sep + 1)))
return -FDT_ERR_BADOVERLAY;
- ret = overlay_fixup_one_phandle(fdt, fdto, symbols_off,
+ ret = overlay_fixup_one_phandle(fdto, symbols_off,
path, path_len, name, name_len,
poffset, phandle);
if (ret)
diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c
index 3621d3651d3f..7475cafce071 100644
--- a/scripts/dtc/libfdt/fdt_rw.c
+++ b/scripts/dtc/libfdt/fdt_rw.c
@@ -124,31 +124,33 @@ static int fdt_splice_string_(void *fdt, int newlen)
* allocated. Ignored if can_assume(NO_ROLLBACK)
* @return offset of string in the string table (whether found or added)
*/
-static int fdt_find_add_string_(void *fdt, const char *s, int *allocated)
+static int fdt_find_add_string_(void *fdt, const char *s, int slen,
+ int *allocated)
{
char *strtab = (char *)fdt + fdt_off_dt_strings(fdt);
const char *p;
char *new;
- int len = strlen(s) + 1;
int err;
if (!can_assume(NO_ROLLBACK))
*allocated = 0;
- p = fdt_find_string_(strtab, fdt_size_dt_strings(fdt), s);
+ p = fdt_find_string_len_(strtab, fdt_size_dt_strings(fdt), s, slen);
if (p)
/* found it */
return (p - strtab);
new = strtab + fdt_size_dt_strings(fdt);
- err = fdt_splice_string_(fdt, len);
+ err = fdt_splice_string_(fdt, slen + 1);
if (err)
return err;
if (!can_assume(NO_ROLLBACK))
*allocated = 1;
- memcpy(new, s, len);
+ memcpy(new, s, slen);
+ new[slen] = '\0';
+
return (new - strtab);
}
@@ -181,13 +183,15 @@ int fdt_del_mem_rsv(void *fdt, int n)
return fdt_splice_mem_rsv_(fdt, re, 1, 0);
}
-static int fdt_resize_property_(void *fdt, int nodeoffset, const char *name,
+static int fdt_resize_property_(void *fdt, int nodeoffset,
+ const char *name, int namelen,
int len, struct fdt_property **prop)
{
int oldlen;
int err;
- *prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
+ *prop = fdt_get_property_namelen_w(fdt, nodeoffset, name, namelen,
+ &oldlen);
if (!*prop)
return oldlen;
@@ -200,7 +204,7 @@ static int fdt_resize_property_(void *fdt, int nodeoffset, const char *name,
}
static int fdt_add_property_(void *fdt, int nodeoffset, const char *name,
- int len, struct fdt_property **prop)
+ int namelen, int len, struct fdt_property **prop)
{
int proplen;
int nextoffset;
@@ -211,7 +215,7 @@ static int fdt_add_property_(void *fdt, int nodeoffset, const char *name,
if ((nextoffset = fdt_check_node_offset_(fdt, nodeoffset)) < 0)
return nextoffset;
- namestroff = fdt_find_add_string_(fdt, name, &allocated);
+ namestroff = fdt_find_add_string_(fdt, name, namelen, &allocated);
if (namestroff < 0)
return namestroff;
@@ -255,17 +259,18 @@ int fdt_set_name(void *fdt, int nodeoffset, const char *name)
return 0;
}
-int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name,
- int len, void **prop_data)
+int fdt_setprop_placeholder_namelen(void *fdt, int nodeoffset, const char *name,
+ int namelen, int len, void **prop_data)
{
struct fdt_property *prop;
int err;
FDT_RW_PROBE(fdt);
- err = fdt_resize_property_(fdt, nodeoffset, name, len, &prop);
+ err = fdt_resize_property_(fdt, nodeoffset, name, namelen, len, &prop);
if (err == -FDT_ERR_NOTFOUND)
- err = fdt_add_property_(fdt, nodeoffset, name, len, &prop);
+ err = fdt_add_property_(fdt, nodeoffset, name, namelen, len,
+ &prop);
if (err)
return err;
@@ -273,13 +278,14 @@ int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name,
return 0;
}
-int fdt_setprop(void *fdt, int nodeoffset, const char *name,
- const void *val, int len)
+int fdt_setprop_namelen(void *fdt, int nodeoffset, const char *name,
+ int namelen, const void *val, int len)
{
void *prop_data;
int err;
- err = fdt_setprop_placeholder(fdt, nodeoffset, name, len, &prop_data);
+ err = fdt_setprop_placeholder_namelen(fdt, nodeoffset, name, namelen,
+ len, &prop_data);
if (err)
return err;
@@ -307,7 +313,8 @@ int fdt_appendprop(void *fdt, int nodeoffset, const char *name,
prop->len = cpu_to_fdt32(newlen);
memcpy(prop->data + oldlen, val, len);
} else {
- err = fdt_add_property_(fdt, nodeoffset, name, len, &prop);
+ err = fdt_add_property_(fdt, nodeoffset, name, strlen(name),
+ len, &prop);
if (err)
return err;
memcpy(prop->data, val, len);
diff --git a/scripts/dtc/libfdt/libfdt.h b/scripts/dtc/libfdt/libfdt.h
index 2d409d8e829b..914bf90785ab 100644
--- a/scripts/dtc/libfdt/libfdt.h
+++ b/scripts/dtc/libfdt/libfdt.h
@@ -14,7 +14,7 @@ extern "C" {
#endif
#define FDT_FIRST_SUPPORTED_VERSION 0x02
-#define FDT_LAST_COMPATIBLE_VERSION 0x10
+#define FDT_LAST_COMPATIBLE_VERSION 0x10
#define FDT_LAST_SUPPORTED_VERSION 0x11
/* Error codes: informative error codes */
@@ -263,16 +263,16 @@ int fdt_next_subnode(const void *fdt, int offset);
struct fdt_header *fdth = (struct fdt_header *)fdt; \
fdth->name = cpu_to_fdt32(val); \
}
-fdt_set_hdr_(magic);
-fdt_set_hdr_(totalsize);
-fdt_set_hdr_(off_dt_struct);
-fdt_set_hdr_(off_dt_strings);
-fdt_set_hdr_(off_mem_rsvmap);
-fdt_set_hdr_(version);
-fdt_set_hdr_(last_comp_version);
-fdt_set_hdr_(boot_cpuid_phys);
-fdt_set_hdr_(size_dt_strings);
-fdt_set_hdr_(size_dt_struct);
+fdt_set_hdr_(magic)
+fdt_set_hdr_(totalsize)
+fdt_set_hdr_(off_dt_struct)
+fdt_set_hdr_(off_dt_strings)
+fdt_set_hdr_(off_mem_rsvmap)
+fdt_set_hdr_(version)
+fdt_set_hdr_(last_comp_version)
+fdt_set_hdr_(boot_cpuid_phys)
+fdt_set_hdr_(size_dt_strings)
+fdt_set_hdr_(size_dt_struct)
#undef fdt_set_hdr_
/**
@@ -285,7 +285,7 @@ size_t fdt_header_size(const void *fdt);
/**
* fdt_header_size_ - internal function to get header size from a version number
- * @version: devicetree version number
+ * @version: device tree version number
*
* Return: size of DTB header in bytes
*/
@@ -554,7 +554,7 @@ int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen);
* -FDT_ERR_BADPATH, given path does not begin with '/' and the first
* component is not a valid alias
* -FDT_ERR_NOTFOUND, if the requested node does not exist
- * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADMAGIC,
* -FDT_ERR_BADVERSION,
* -FDT_ERR_BADSTATE,
* -FDT_ERR_BADSTRUCTURE,
@@ -599,7 +599,7 @@ const char *fdt_get_name(const void *fdt, int nodeoffset, int *lenp);
* structure block offset of the property (>=0), on success
* -FDT_ERR_NOTFOUND, if the requested node has no properties
* -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_BEGIN_NODE tag
- * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADMAGIC,
* -FDT_ERR_BADVERSION,
* -FDT_ERR_BADSTATE,
* -FDT_ERR_BADSTRUCTURE,
@@ -620,7 +620,7 @@ int fdt_first_property_offset(const void *fdt, int nodeoffset);
* structure block offset of the next property (>=0), on success
* -FDT_ERR_NOTFOUND, if the given property is the last in its node
* -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_PROP tag
- * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADMAGIC,
* -FDT_ERR_BADVERSION,
* -FDT_ERR_BADSTATE,
* -FDT_ERR_BADSTRUCTURE,
@@ -712,6 +712,13 @@ const struct fdt_property *fdt_get_property_namelen(const void *fdt,
int nodeoffset,
const char *name,
int namelen, int *lenp);
+static inline struct fdt_property *
+fdt_get_property_namelen_w(void *fdt, int nodeoffset, const char *name,
+ int namelen, int *lenp)
+{
+ return (struct fdt_property *)(uintptr_t)fdt_get_property_namelen(
+ fdt, nodeoffset, name, namelen, lenp);
+}
#endif
/**
@@ -764,7 +771,7 @@ static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset,
* to within the device blob itself, not a copy of the value). If
* lenp is non-NULL, the length of the property value is also
* returned, in the integer pointed to by lenp. If namep is non-NULL,
- * the property's namne will also be returned in the char * pointed to
+ * the property's name will also be returned in the char * pointed to
* by namep (this will be a pointer to within the device tree's string
* block, not a new copy of the name).
*
@@ -772,7 +779,7 @@ static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset,
* pointer to the property's value
* if lenp is non-NULL, *lenp contains the length of the property
* value (>=0)
- * if namep is non-NULL *namep contiains a pointer to the property
+ * if namep is non-NULL *namep contains a pointer to the property
* name.
* NULL, on error
* if lenp is non-NULL, *lenp contains an error code (<0):
@@ -866,7 +873,7 @@ uint32_t fdt_get_phandle(const void *fdt, int nodeoffset);
/**
* fdt_get_alias_namelen - get alias based on substring
* @fdt: pointer to the device tree blob
- * @name: name of the alias th look up
+ * @name: name of the alias to look up
* @namelen: number of characters of name to consider
*
* Identical to fdt_get_alias(), but only examine the first @namelen
@@ -883,7 +890,7 @@ const char *fdt_get_alias_namelen(const void *fdt,
/**
* fdt_get_alias - retrieve the path referenced by a given alias
* @fdt: pointer to the device tree blob
- * @name: name of the alias th look up
+ * @name: name of the alias to look up
*
* fdt_get_alias() retrieves the value of a given alias. That is, the
* value of the property named @name in the node /aliases.
@@ -1259,8 +1266,8 @@ const char *fdt_stringlist_get(const void *fdt, int nodeoffset,
*
* returns:
* 0 <= n < FDT_MAX_NCELLS, on success
- * 2, if the node has no #address-cells property
- * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
+ * 2, if the node has no #address-cells property
+ * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
* #address-cells property
* -FDT_ERR_BADMAGIC,
* -FDT_ERR_BADVERSION,
@@ -1280,8 +1287,8 @@ int fdt_address_cells(const void *fdt, int nodeoffset);
*
* returns:
* 0 <= n < FDT_MAX_NCELLS, on success
- * 1, if the node has no #size-cells property
- * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
+ * 1, if the node has no #size-cells property
+ * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
* #size-cells property
* -FDT_ERR_BADMAGIC,
* -FDT_ERR_BADVERSION,
@@ -1562,7 +1569,7 @@ static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
* @fdt: pointer to the device tree blob
* @name: name of property to add
* @len: length of property value in bytes
- * @valp: returns a pointer to where where the value should be placed
+ * @valp: returns a pointer to where the value should be placed
*
* returns:
* 0, on success
@@ -1660,6 +1667,38 @@ int fdt_del_mem_rsv(void *fdt, int n);
int fdt_set_name(void *fdt, int nodeoffset, const char *name);
/**
+ * fdt_setprop_namelen - create or change a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @namelen: length of the name
+ * @val: pointer to data to set the property value to
+ * @len: length of the property value
+ *
+ * fdt_setprop_namelen() sets the value of the named property in the given
+ * node to the given value and length, creating the property if it
+ * does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop_namelen(void *fdt, int nodeoffset, const char *name,
+ int namelen, const void *val, int len);
+
+/**
* fdt_setprop - create or change a property
* @fdt: pointer to the device tree blob
* @nodeoffset: offset of the node whose property to change
@@ -1687,8 +1726,44 @@ int fdt_set_name(void *fdt, int nodeoffset, const char *name);
* -FDT_ERR_BADLAYOUT,
* -FDT_ERR_TRUNCATED, standard meanings
*/
-int fdt_setprop(void *fdt, int nodeoffset, const char *name,
- const void *val, int len);
+static inline int fdt_setprop(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len)
+{
+ return fdt_setprop_namelen(fdt, nodeoffset, name, strlen(name), val,
+ len);
+}
+
+/**
+ * fdt_setprop_placeholder_namelen - allocate space for a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @namelen: length of the name
+ * @len: length of the property value
+ * @prop_data: return pointer to property data
+ *
+ * fdt_setprop_placeholder_namelen() allocates the named property in the given node.
+ * If the property exists it is resized. In either case a pointer to the
+ * property data is returned.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop_placeholder_namelen(void *fdt, int nodeoffset, const char *name,
+ int namelen, int len, void **prop_data);
/**
* fdt_setprop_placeholder - allocate space for a property
@@ -1698,7 +1773,7 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name,
* @len: length of the property value
* @prop_data: return pointer to property data
*
- * fdt_setprop_placeholer() allocates the named property in the given node.
+ * fdt_setprop_placeholder() allocates the named property in the given node.
* If the property exists it is resized. In either case a pointer to the
* property data is returned.
*
@@ -1718,8 +1793,13 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name,
* -FDT_ERR_BADLAYOUT,
* -FDT_ERR_TRUNCATED, standard meanings
*/
-int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name,
- int len, void **prop_data);
+static inline int fdt_setprop_placeholder(void *fdt, int nodeoffset,
+ const char *name, int len,
+ void **prop_data)
+{
+ return fdt_setprop_placeholder_namelen(fdt, nodeoffset, name,
+ strlen(name), len, prop_data);
+}
/**
* fdt_setprop_u32 - set a property to a 32-bit integer
@@ -1839,6 +1919,38 @@ static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
#define fdt_setprop_string(fdt, nodeoffset, name, str) \
fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
+/**
+ * fdt_setprop_namelen_string - set a property to a string value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @namelen: number of characters of name to consider
+ * @str: string value for the property
+ *
+ * fdt_setprop_namelen_string() sets the value of the named property in the
+ * given node to the given string value (using the length of the
+ * string to determine the new length of the property), or creates a
+ * new property with that value if it does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_setprop_namelen_string(fdt, nodeoffset, name, namelen, str) \
+ fdt_setprop_namelen((fdt), (nodeoffset), (name), (namelen), (str), \
+ strlen(str) + 1)
/**
* fdt_setprop_empty - set a property to an empty value
@@ -2059,7 +2171,7 @@ int fdt_appendprop_addrrange(void *fdt, int parent, int nodeoffset,
* @nodeoffset: offset of the node whose property to nop
* @name: name of the property to nop
*
- * fdt_del_property() will delete the given property.
+ * fdt_delprop() will delete the given property.
*
* This function will delete data from the blob, and will therefore
* change the offsets of some existing nodes.
@@ -2111,8 +2223,7 @@ int fdt_add_subnode_namelen(void *fdt, int parentoffset,
* change the offsets of some existing nodes.
*
* returns:
- * structure block offset of the created nodeequested subnode (>=0), on
- * success
+ * structure block offset of the created subnode (>=0), on success
* -FDT_ERR_NOTFOUND, if the requested subnode does not exist
* -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE
* tag
@@ -2122,7 +2233,7 @@ int fdt_add_subnode_namelen(void *fdt, int parentoffset,
* blob to contain the new node
* -FDT_ERR_NOSPACE
* -FDT_ERR_BADLAYOUT
- * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADMAGIC,
* -FDT_ERR_BADVERSION,
* -FDT_ERR_BADSTATE,
* -FDT_ERR_BADSTRUCTURE,
@@ -2167,7 +2278,7 @@ int fdt_del_node(void *fdt, int nodeoffset);
* returns:
* 0, on success
* -FDT_ERR_NOSPACE, there's not enough space in the base device tree
- * -FDT_ERR_NOTFOUND, the overlay points to some inexistant nodes or
+ * -FDT_ERR_NOTFOUND, the overlay points to some nonexistent nodes or
* properties in the base DT
* -FDT_ERR_BADPHANDLE,
* -FDT_ERR_BADOVERLAY,
diff --git a/scripts/dtc/libfdt/libfdt_internal.h b/scripts/dtc/libfdt/libfdt_internal.h
index 16bda1906a7b..b60b5456f596 100644
--- a/scripts/dtc/libfdt/libfdt_internal.h
+++ b/scripts/dtc/libfdt/libfdt_internal.h
@@ -20,7 +20,15 @@ int32_t fdt_ro_probe_(const void *fdt);
int fdt_check_node_offset_(const void *fdt, int offset);
int fdt_check_prop_offset_(const void *fdt, int offset);
-const char *fdt_find_string_(const char *strtab, int tabsize, const char *s);
+
+const char *fdt_find_string_len_(const char *strtab, int tabsize, const char *s,
+ int s_len);
+static inline const char *fdt_find_string_(const char *strtab, int tabsize,
+ const char *s)
+{
+ return fdt_find_string_len_(strtab, tabsize, s, strlen(s));
+}
+
int fdt_node_end_offset_(void *fdt, int nodeoffset);
static inline const void *fdt_offset_ptr_(const void *fdt, int offset)
@@ -47,8 +55,8 @@ static inline struct fdt_reserve_entry *fdt_mem_rsv_w_(void *fdt, int n)
}
/*
- * Internal helpers to access tructural elements of the device tree
- * blob (rather than for exaple reading integers from within property
+ * Internal helpers to access structural elements of the device tree
+ * blob (rather than for example reading integers from within property
* values). We assume that we are either given a naturally aligned
* address for the platform or if we are not, we are on a platform
* where unaligned memory reads will be handled in a graceful manner.
diff --git a/scripts/dtc/livetree.c b/scripts/dtc/livetree.c
index 49f723002f85..d51d05830b18 100644
--- a/scripts/dtc/livetree.c
+++ b/scripts/dtc/livetree.c
@@ -174,7 +174,7 @@ struct node *merge_nodes(struct node *old_node, struct node *new_node)
old_prop->val = new_prop->val;
old_prop->deleted = 0;
- free(old_prop->srcpos);
+ srcpos_free(old_prop->srcpos);
old_prop->srcpos = new_prop->srcpos;
free(new_prop);
new_prop = NULL;
@@ -504,7 +504,7 @@ struct node *get_subnode(struct node *node, const char *nodename)
struct node *child;
for_each_child(node, child)
- if (streq(child->name, nodename))
+ if (streq(child->name, nodename) && !child->deleted)
return child;
return NULL;
@@ -1014,9 +1014,7 @@ static void add_local_fixup_entry(struct dt_info *dti,
/* walk the path components creating nodes if they don't exist */
for (wn = lfn, i = 1; i < depth; i++, wn = nwn) {
/* if no node exists, create it */
- nwn = get_subnode(wn, compp[i]);
- if (!nwn)
- nwn = build_and_name_child_node(wn, compp[i]);
+ nwn = build_root_node(wn, compp[i]);
}
free(compp);
@@ -1058,16 +1056,29 @@ void generate_label_tree(struct dt_info *dti, const char *name, bool allocph)
void generate_fixups_tree(struct dt_info *dti, const char *name)
{
+ struct node *n = get_subnode(dti->dt, name);
+
+ /* Start with an empty __fixups__ node to not get duplicates */
+ if (n)
+ n->deleted = true;
+
if (!any_fixup_tree(dti, dti->dt))
return;
- generate_fixups_tree_internal(dti, build_root_node(dti->dt, name),
+ generate_fixups_tree_internal(dti,
+ build_and_name_child_node(dti->dt, name),
dti->dt);
}
void generate_local_fixups_tree(struct dt_info *dti, const char *name)
{
+ struct node *n = get_subnode(dti->dt, name);
+
+ /* Start with an empty __local_fixups__ node to not get duplicates */
+ if (n)
+ n->deleted = true;
if (!any_local_fixup_tree(dti, dti->dt))
return;
- generate_local_fixups_tree_internal(dti, build_root_node(dti->dt, name),
+ generate_local_fixups_tree_internal(dti,
+ build_and_name_child_node(dti->dt, name),
dti->dt);
}
diff --git a/scripts/dtc/srcpos.c b/scripts/dtc/srcpos.c
index 8e4d18a90b47..5bb57bf6856c 100644
--- a/scripts/dtc/srcpos.c
+++ b/scripts/dtc/srcpos.c
@@ -160,8 +160,10 @@ FILE *srcfile_relative_open(const char *fname, char **fullnamep)
strerror(errno));
}
- if (depfile)
- fprintf(depfile, " %s", fullname);
+ if (depfile) {
+ fputc(' ', depfile);
+ fprint_path_escaped(depfile, fullname);
+ }
if (fullnamep)
*fullnamep = fullname;
@@ -285,6 +287,17 @@ struct srcpos *srcpos_extend(struct srcpos *pos, struct srcpos *newtail)
return pos;
}
+void srcpos_free(struct srcpos *pos)
+{
+ struct srcpos *p_next;
+
+ while (pos) {
+ p_next = pos->next;
+ free(pos);
+ pos = p_next;
+ }
+}
+
char *
srcpos_string(struct srcpos *pos)
{
diff --git a/scripts/dtc/srcpos.h b/scripts/dtc/srcpos.h
index 4318d7ad34d9..4d60b50e3119 100644
--- a/scripts/dtc/srcpos.h
+++ b/scripts/dtc/srcpos.h
@@ -88,6 +88,7 @@ extern void srcpos_update(struct srcpos *pos, const char *text, int len);
extern struct srcpos *srcpos_copy(struct srcpos *pos);
extern struct srcpos *srcpos_extend(struct srcpos *new_srcpos,
struct srcpos *old_srcpos);
+extern void srcpos_free(struct srcpos *pos);
extern char *srcpos_string(struct srcpos *pos);
extern char *srcpos_string_first(struct srcpos *pos, int level);
extern char *srcpos_string_last(struct srcpos *pos, int level);
diff --git a/scripts/dtc/treesource.c b/scripts/dtc/treesource.c
index ae15839ba6a5..d25f01fc6937 100644
--- a/scripts/dtc/treesource.c
+++ b/scripts/dtc/treesource.c
@@ -139,26 +139,48 @@ static const char *delim_end[] = {
[TYPE_STRING] = "",
};
+/*
+ * The invariants in the marker list are:
+ * - offsets are non-strictly monotonically increasing
+ * - for a single offset there is at most one type marker
+ * - for a single offset that has both a type marker and non-type markers, the
+ * type marker appears before the others.
+ */
+static struct marker **add_marker(struct marker **mi,
+ enum markertype type, unsigned int offset, char *ref)
+{
+ struct marker *nm;
+
+ while (*mi && (*mi)->offset < offset)
+ mi = &(*mi)->next;
+
+ if (*mi && (*mi)->offset == offset && is_type_marker((*mi)->type)) {
+ if (is_type_marker(type))
+ return mi;
+ mi = &(*mi)->next;
+ }
+
+ if (*mi && (*mi)->offset == offset && type == (*mi)->type)
+ return mi;
+
+ nm = xmalloc(sizeof(*nm));
+ nm->type = type;
+ nm->offset = offset;
+ nm->ref = ref;
+ nm->next = *mi;
+ *mi = nm;
+
+ return &nm->next;
+}
+
static void add_string_markers(struct property *prop)
{
int l, len = prop->val.len;
const char *p = prop->val.val;
+ struct marker **mi = &prop->val.markers;
- for (l = strlen(p) + 1; l < len; l += strlen(p + l) + 1) {
- struct marker *m, **nextp;
-
- m = xmalloc(sizeof(*m));
- m->offset = l;
- m->type = TYPE_STRING;
- m->ref = NULL;
- m->next = NULL;
-
- /* Find the end of the markerlist */
- nextp = &prop->val.markers;
- while (*nextp)
- nextp = &((*nextp)->next);
- *nextp = m;
- }
+ for (l = strlen(p) + 1; l < len; l += strlen(p + l) + 1)
+ mi = add_marker(mi, TYPE_STRING, l, NULL);
}
static enum markertype guess_value_type(struct property *prop)
diff --git a/scripts/dtc/util.c b/scripts/dtc/util.c
index 507f0120cd13..412592320265 100644
--- a/scripts/dtc/util.c
+++ b/scripts/dtc/util.c
@@ -23,6 +23,22 @@
#include "util.h"
#include "version_gen.h"
+void fprint_path_escaped(FILE *fp, const char *path)
+{
+ const char *p = path;
+
+ while (*p) {
+ if (*p == ' ') {
+ fputc('\\', fp);
+ fputc(' ', fp);
+ } else {
+ fputc(*p, fp);
+ }
+
+ p++;
+ }
+}
+
char *xstrdup(const char *s)
{
int len = strlen(s) + 1;
diff --git a/scripts/dtc/util.h b/scripts/dtc/util.h
index b448cd79efd3..800f2e2c55b1 100644
--- a/scripts/dtc/util.h
+++ b/scripts/dtc/util.h
@@ -42,6 +42,11 @@ static inline void NORETURN PRINTF(1, 2) die(const char *str, ...)
exit(1);
}
+/**
+ * Writes path to fp, escaping spaces with a backslash.
+ */
+void fprint_path_escaped(FILE *fp, const char *path);
+
static inline void *xmalloc(size_t len)
{
void *new = malloc(len);
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index bf81ce593685..226c48bf75dc 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.7.0-gbcd02b52"
+#define DTC_VERSION "DTC 1.7.2-g52f07dcc"
diff --git a/scripts/extract-vmlinux b/scripts/extract-vmlinux
index 189956b5a5c8..266df9bc7a48 100755
--- a/scripts/extract-vmlinux
+++ b/scripts/extract-vmlinux
@@ -10,12 +10,15 @@
#
# ----------------------------------------------------------------------
+me=${0##*/}
+
check_vmlinux()
{
if file "$1" | grep -q 'Linux kernel.*boot executable' ||
readelf -h "$1" > /dev/null 2>&1
then
cat "$1"
+ echo "$me: Extracted vmlinux using '$2' from offset $3" >&2
exit 0
fi
}
@@ -30,12 +33,11 @@ try_decompress()
do
pos=${pos%%:*}
tail -c+$pos "$img" | $3 > $tmp 2> /dev/null
- check_vmlinux $tmp
+ check_vmlinux $tmp "$3" $pos
done
}
# Check invocation:
-me=${0##*/}
img=$1
if [ $# -ne 1 -o ! -s "$img" ]
then
@@ -57,7 +59,7 @@ try_decompress '\002!L\030' xxx 'lz4 -d'
try_decompress '(\265/\375' xxx unzstd
# Finally check for uncompressed images or objects:
-check_vmlinux $img
+check_vmlinux "$img" cat 0
# Bail out:
echo "$me: Cannot find vmlinux." >&2
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 6cb6d1051815..8f1b3500f8e2 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -173,10 +173,17 @@ static inline opt_pass *get_pass_for_id(int id)
return g->get_passes()->get_pass_for_id(id);
}
+#if BUILDING_GCC_VERSION < 16000
#define TODO_verify_ssa TODO_verify_il
#define TODO_verify_flow TODO_verify_il
#define TODO_verify_stmts TODO_verify_il
#define TODO_verify_rtl_sharing TODO_verify_il
+#else
+#define TODO_verify_ssa 0
+#define TODO_verify_flow 0
+#define TODO_verify_stmts 0
+#define TODO_verify_rtl_sharing 0
+#endif
#define INSN_DELETED_P(insn) (insn)->deleted()
diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
index 98445671fe83..ccc24d30de80 100644
--- a/scripts/gdb/linux/timerlist.py
+++ b/scripts/gdb/linux/timerlist.py
@@ -56,8 +56,6 @@ def print_base(base):
text += " .index: {}\n".format(base['index'])
text += " .resolution: {} nsecs\n".format(constants.LX_hrtimer_resolution)
-
- text += " .get_time: {}\n".format(base['get_time'])
if constants.LX_CONFIG_HIGH_RES_TIMERS:
text += " .offset: {} nsecs\n".format(base['offset'])
text += "active timers:\n"
diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
index 7c3ea2b55041..fc27f0cca752 100755
--- a/scripts/generate_rust_analyzer.py
+++ b/scripts/generate_rust_analyzer.py
@@ -139,8 +139,8 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edit
"exclude_dirs": [],
}
- append_crate_with_generated("bindings", ["core", "ffi"])
- append_crate_with_generated("uapi", ["core", "ffi"])
+ append_crate_with_generated("bindings", ["core", "ffi", "pin_init"])
+ append_crate_with_generated("uapi", ["core", "ffi", "pin_init"])
append_crate_with_generated("kernel", ["core", "macros", "build_error", "pin_init", "ffi", "bindings", "uapi"])
def is_root_crate(build_file, target):
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 6bbccb43f7e7..4c20c62c4faf 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -32,7 +32,7 @@ fi
sed -E -e '
s/([[:space:](])(__user|__force|__iomem)[[:space:]]/\1/g
s/__attribute_const__([[:space:]]|$)/\1/g
- s@^#include <linux/compiler(|_types).h>@@
+ s@^#include <linux/compiler.h>@@
s/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g
s/(^|[[:space:](])(inline|asm|volatile)([[:space:](]|$)/\1__\2__\3/g
s@#(ifndef|define|endif[[:space:]]*/[*])[[:space:]]*_UAPI@#\1 @
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index fe2231e0e6a4..5f900d18dae0 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -145,6 +145,7 @@ struct symbol {
#define SYMBOL_CONST 0x0001 /* symbol is const */
#define SYMBOL_CHECK 0x0008 /* used during dependency checking */
#define SYMBOL_VALID 0x0080 /* set when symbol.curr is calculated */
+#define SYMBOL_TRANS 0x0100 /* symbol is transitional only (not visible)*/
#define SYMBOL_WRITE 0x0200 /* write symbol to file (KCONFIG_CONFIG) */
#define SYMBOL_WRITTEN 0x0800 /* track info to avoid double-write to .config */
#define SYMBOL_CHECKED 0x2000 /* used during dependency checking */
diff --git a/scripts/kconfig/lexer.l b/scripts/kconfig/lexer.l
index 9c2cdfc33c6f..6d2c92c6095d 100644
--- a/scripts/kconfig/lexer.l
+++ b/scripts/kconfig/lexer.l
@@ -126,6 +126,7 @@ n [A-Za-z0-9_-]
"select" return T_SELECT;
"source" return T_SOURCE;
"string" return T_STRING;
+"transitional" return T_TRANSITIONAL;
"tristate" return T_TRISTATE;
"visible" return T_VISIBLE;
"||" return T_OR;
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 7206437e784a..2d097bc7ef1a 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -173,12 +173,10 @@ void fill_window(WINDOW *win, const char *text)
/* do not go over end of line */
total_lines = min(total_lines, y);
for (i = 0; i < total_lines; i++) {
- char tmp[x+10];
const char *line = get_line(text, i);
- int len = get_line_length(line);
- strncpy(tmp, line, min(len, x));
- tmp[len] = '\0';
- mvwprintw(win, i, 0, "%s", tmp);
+ int len = min(get_line_length(line), x);
+
+ mvwprintw(win, i, 0, "%.*s", len, line);
}
}
diff --git a/scripts/kconfig/parser.y b/scripts/kconfig/parser.y
index e9c3c664e925..49b79dde1725 100644
--- a/scripts/kconfig/parser.y
+++ b/scripts/kconfig/parser.y
@@ -75,6 +75,7 @@ struct menu *current_menu, *current_entry, *current_choice;
%token T_SELECT
%token T_SOURCE
%token T_STRING
+%token T_TRANSITIONAL
%token T_TRISTATE
%token T_VISIBLE
%token T_EOL
@@ -205,6 +206,12 @@ config_option: T_PROMPT T_WORD_QUOTE if_expr T_EOL
printd(DEBUG_PARSE, "%s:%d:prompt\n", cur_filename, cur_lineno);
};
+config_option: T_TRANSITIONAL T_EOL
+{
+ current_entry->sym->flags |= SYMBOL_TRANS;
+ printd(DEBUG_PARSE, "%s:%d:transitional\n", cur_filename, cur_lineno);
+};
+
config_option: default expr if_expr T_EOL
{
menu_add_expr(P_DEFAULT, $2, $3);
@@ -483,6 +490,43 @@ assign_val:
%%
/**
+ * transitional_check_sanity - check transitional symbols have no other
+ * properties
+ *
+ * @menu: menu of the potentially transitional symbol
+ *
+ * Return: -1 if an error is found, 0 otherwise.
+ */
+static int transitional_check_sanity(const struct menu *menu)
+{
+ struct property *prop;
+
+ if (!menu->sym || !(menu->sym->flags & SYMBOL_TRANS))
+ return 0;
+
+ /* Check for depends and visible conditions. */
+ if ((menu->dep && !expr_is_yes(menu->dep)) ||
+ (menu->visibility && !expr_is_yes(menu->visibility))) {
+ fprintf(stderr, "%s:%d: error: %s",
+ menu->filename, menu->lineno,
+ "transitional symbols can only have help sections\n");
+ return -1;
+ }
+
+ /* Check for any property other than "help". */
+ for (prop = menu->sym->prop; prop; prop = prop->next) {
+ if (prop->type != P_COMMENT) {
+ fprintf(stderr, "%s:%d: error: %s",
+ prop->filename, prop->lineno,
+ "transitional symbols can only have help sections\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
* choice_check_sanity - check sanity of a choice member
*
* @menu: menu of the choice member
@@ -558,6 +602,9 @@ void conf_parse(const char *name)
if (menu->sym && sym_check_deps(menu->sym))
yynerrs++;
+ if (transitional_check_sanity(menu))
+ yynerrs++;
+
if (menu->sym && sym_is_choice(menu->sym)) {
menu_for_each_sub_entry(child, menu)
if (child->sym && choice_check_sanity(child))
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index f8992db1870a..b84c9f2485d1 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -1377,6 +1377,19 @@ ConfigMainWindow::ConfigMainWindow(void)
ConfigList::showPromptAction = new QAction("Show Prompt Options", optGroup);
ConfigList::showPromptAction->setCheckable(true);
+ switch (configList->optMode) {
+ case allOpt:
+ ConfigList::showAllAction->setChecked(true);
+ break;
+ case promptOpt:
+ ConfigList::showPromptAction->setChecked(true);
+ break;
+ case normalOpt:
+ default:
+ ConfigList::showNormalAction->setChecked(true);
+ break;
+ }
+
QAction *showDebugAction = new QAction("Show Debug Info", this);
showDebugAction->setCheckable(true);
connect(showDebugAction, &QAction::toggled,
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 26ab10c0fd76..760cac998381 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -214,6 +214,11 @@ static void sym_calc_visibility(struct symbol *sym)
struct property *prop;
tristate tri;
+ if (sym->flags & SYMBOL_TRANS) {
+ sym->visible = yes;
+ return;
+ }
+
/* any prompt visible? */
tri = no;
for_all_prompts(sym, prop) {
@@ -526,7 +531,7 @@ void sym_calc_value(struct symbol *sym)
}
}
- if (sym_is_choice(sym))
+ if (sym_is_choice(sym) || sym->flags & SYMBOL_TRANS)
sym->flags &= ~SYMBOL_WRITE;
}
diff --git a/scripts/kconfig/tests/conftest.py b/scripts/kconfig/tests/conftest.py
index 2a2a7e2da060..d94b79e012c0 100644
--- a/scripts/kconfig/tests/conftest.py
+++ b/scripts/kconfig/tests/conftest.py
@@ -81,7 +81,22 @@ class Conf:
# For interactive modes such as oldaskconfig, oldconfig,
# send 'Enter' key until the program finishes.
if interactive:
- ps.stdin.write(b'\n')
+ try:
+ ps.stdin.write(b'\n')
+ ps.stdin.flush()
+ except (BrokenPipeError, OSError):
+ # Process has exited, stop sending input
+ break
+
+ # Close stdin gracefully
+ try:
+ ps.stdin.close()
+ except (BrokenPipeError, OSError):
+ # Ignore broken pipe on close
+ pass
+
+ # Wait for process to complete
+ ps.wait()
self.retcode = ps.returncode
self.stdout = ps.stdout.read().decode()
diff --git a/scripts/kconfig/tests/err_transitional/Kconfig b/scripts/kconfig/tests/err_transitional/Kconfig
new file mode 100644
index 000000000000..a75ed3b2fe5e
--- /dev/null
+++ b/scripts/kconfig/tests/err_transitional/Kconfig
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+# Test that transitional symbols cannot have properties other than help
+
+config BAD_DEFAULT
+ bool
+ transitional
+ default y
+ help
+ This transitional symbol illegally has a default property.
+
+config BAD_PROMPT
+ bool
+ transitional
+ prompt "Bad prompt"
+ help
+ This transitional symbol illegally has a prompt.
+
+config BAD_SELECT
+ bool
+ transitional
+ select OTHER_SYMBOL
+ help
+ This transitional symbol illegally has a select.
+
+config BAD_IMPLY
+ bool
+ transitional
+ imply OTHER_SYMBOL
+ help
+ This transitional symbol illegally has an imply.
+
+config BAD_DEPENDS
+ bool
+ transitional
+ depends on OTHER_SYMBOL
+ help
+ This transitional symbol illegally has a depends.
+
+config BAD_RANGE
+ int
+ transitional
+ range 1 10
+ help
+ This transitional symbol illegally has a range.
+
+config BAD_NO_TYPE
+ transitional
+ help
+ This transitional symbol illegally has no type specified.
+
+config OTHER_SYMBOL
+ bool
diff --git a/scripts/kconfig/tests/err_transitional/__init__.py b/scripts/kconfig/tests/err_transitional/__init__.py
new file mode 100644
index 000000000000..7dffb5b0833f
--- /dev/null
+++ b/scripts/kconfig/tests/err_transitional/__init__.py
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+"""
+Test that transitional symbols with invalid properties are rejected.
+
+Transitional symbols can only have help sections. Any other properties
+(default, select, depends, etc.) should cause a parser error.
+"""
+
+def test(conf):
+ # This should fail with exit code 1 due to invalid transitional symbol
+ assert conf.olddefconfig() == 1
+
+ # Check that the error message is about transitional symbols
+ assert conf.stderr_contains('expected_stderr')
diff --git a/scripts/kconfig/tests/err_transitional/expected_stderr b/scripts/kconfig/tests/err_transitional/expected_stderr
new file mode 100644
index 000000000000..b52db4f680f4
--- /dev/null
+++ b/scripts/kconfig/tests/err_transitional/expected_stderr
@@ -0,0 +1,7 @@
+Kconfig:46:warning: config symbol defined without type
+Kconfig:7: error: transitional symbols can only have help sections
+Kconfig:14: error: transitional symbols can only have help sections
+Kconfig:21: error: transitional symbols can only have help sections
+Kconfig:28: error: transitional symbols can only have help sections
+Kconfig:32: error: transitional symbols can only have help sections
+Kconfig:42: error: transitional symbols can only have help sections
diff --git a/scripts/kconfig/tests/transitional/Kconfig b/scripts/kconfig/tests/transitional/Kconfig
new file mode 100644
index 000000000000..62c3b24665b9
--- /dev/null
+++ b/scripts/kconfig/tests/transitional/Kconfig
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0
+# Test transitional symbols for config migration with all Kconfig types
+
+# Enable module support for tristate testing
+config MODULES
+ bool "Enable loadable module support"
+ modules
+ default y
+
+# Basic migration tests for all types
+config NEW_BOOL
+ bool "New bool option"
+ default OLD_BOOL
+
+config OLD_BOOL
+ bool
+ transitional
+
+config NEW_TRISTATE
+ tristate "New tristate option"
+ default OLD_TRISTATE
+
+config OLD_TRISTATE
+ tristate
+ transitional
+
+config NEW_STRING
+ string "New string option"
+ default OLD_STRING
+
+config OLD_STRING
+ string
+ transitional
+
+config NEW_HEX
+ hex "New hex option"
+ default OLD_HEX
+
+config OLD_HEX
+ hex
+ transitional
+
+config NEW_INT
+ int "New int option"
+ default OLD_INT
+
+config OLD_INT
+ int
+ transitional
+
+# Precedence tests for all types
+config NEW_BOOL_PRECEDENCE
+ bool "New bool option with precedence"
+ default OLD_BOOL_PRECEDENCE
+
+config OLD_BOOL_PRECEDENCE
+ bool
+ transitional
+
+config NEW_STRING_PRECEDENCE
+ string "New string option with precedence"
+ default OLD_STRING_PRECEDENCE
+
+config OLD_STRING_PRECEDENCE
+ string
+ transitional
+
+config NEW_TRISTATE_PRECEDENCE
+ tristate "New tristate option with precedence"
+ default OLD_TRISTATE_PRECEDENCE
+
+config OLD_TRISTATE_PRECEDENCE
+ tristate
+ transitional
+
+config NEW_HEX_PRECEDENCE
+ hex "New hex option with precedence"
+ default OLD_HEX_PRECEDENCE
+
+config OLD_HEX_PRECEDENCE
+ hex
+ transitional
+
+config NEW_INT_PRECEDENCE
+ int "New int option with precedence"
+ default OLD_INT_PRECEDENCE
+
+config OLD_INT_PRECEDENCE
+ int
+ transitional
+
+# Test that help sections are allowed for transitional symbols
+config OLD_WITH_HELP
+ bool
+ transitional
+ help
+ This transitional symbol has a help section to validate that help is allowed.
+
+config REGULAR_OPTION
+ bool "Regular option"
diff --git a/scripts/kconfig/tests/transitional/__init__.py b/scripts/kconfig/tests/transitional/__init__.py
new file mode 100644
index 000000000000..61937d10edf1
--- /dev/null
+++ b/scripts/kconfig/tests/transitional/__init__.py
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+"""
+Test transitional symbol migration functionality for all Kconfig types.
+
+This tests that:
+- OLD_* options in existing .config cause NEW_* options to be set
+- OLD_* options are not written to the new .config file
+- NEW_* options appear in the new .config file with correct values
+- All Kconfig types work correctly: bool, tristate, string, hex, int
+- User-set NEW values take precedence over conflicting OLD transitional values
+"""
+
+def test(conf):
+ # Run olddefconfig to process the migration with the initial config
+ assert conf.olddefconfig(dot_config='initial_config') == 0
+
+ # Check that the configuration matches expected output
+ assert conf.config_contains('expected_config')
diff --git a/scripts/kconfig/tests/transitional/expected_config b/scripts/kconfig/tests/transitional/expected_config
new file mode 100644
index 000000000000..846e9ddcab91
--- /dev/null
+++ b/scripts/kconfig/tests/transitional/expected_config
@@ -0,0 +1,12 @@
+CONFIG_MODULES=y
+CONFIG_NEW_BOOL=y
+CONFIG_NEW_TRISTATE=m
+CONFIG_NEW_STRING="test string"
+CONFIG_NEW_HEX=0x1234
+CONFIG_NEW_INT=42
+# CONFIG_NEW_BOOL_PRECEDENCE is not set
+CONFIG_NEW_STRING_PRECEDENCE="user value"
+CONFIG_NEW_TRISTATE_PRECEDENCE=y
+CONFIG_NEW_HEX_PRECEDENCE=0xABCD
+CONFIG_NEW_INT_PRECEDENCE=100
+# CONFIG_REGULAR_OPTION is not set
diff --git a/scripts/kconfig/tests/transitional/initial_config b/scripts/kconfig/tests/transitional/initial_config
new file mode 100644
index 000000000000..e648a65e504c
--- /dev/null
+++ b/scripts/kconfig/tests/transitional/initial_config
@@ -0,0 +1,16 @@
+CONFIG_MODULES=y
+CONFIG_OLD_BOOL=y
+CONFIG_OLD_TRISTATE=m
+CONFIG_OLD_STRING="test string"
+CONFIG_OLD_HEX=0x1234
+CONFIG_OLD_INT=42
+# CONFIG_NEW_BOOL_PRECEDENCE is not set
+CONFIG_OLD_BOOL_PRECEDENCE=y
+CONFIG_NEW_STRING_PRECEDENCE="user value"
+CONFIG_OLD_STRING_PRECEDENCE="old value"
+CONFIG_NEW_TRISTATE_PRECEDENCE=y
+CONFIG_OLD_TRISTATE_PRECEDENCE=m
+CONFIG_NEW_HEX_PRECEDENCE=0xABCD
+CONFIG_OLD_HEX_PRECEDENCE=0x5678
+CONFIG_NEW_INT_PRECEDENCE=100
+CONFIG_OLD_INT_PRECEDENCE=200
diff --git a/scripts/kernel-doc.py b/scripts/kernel-doc.py
index fc3d46ef519f..d9fe2bcbd39c 100755
--- a/scripts/kernel-doc.py
+++ b/scripts/kernel-doc.py
@@ -2,8 +2,17 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
#
-# pylint: disable=C0103,R0915
-#
+# pylint: disable=C0103,R0912,R0914,R0915
+
+# NOTE: While kernel-doc requires at least version 3.6 to run, the
+# command line should work with Python 3.2+ (tested with 3.4).
+# The rationale is that it shall fail gracefully during Kernel
+# compilation with older Kernel versions. Due to that:
+# - encoding line is needed here;
+# - no f-strings can be used on this file.
+# - the libraries that require newer versions can only be included
+# after Python version is checked.
+
# Converted from the kernel-doc script originally written in Perl
# under GPLv2, copyrighted since 1998 by the following authors:
#
@@ -107,9 +116,6 @@ SRC_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
-from kdoc_files import KernelFiles # pylint: disable=C0413
-from kdoc_output import RestFormat, ManFormat # pylint: disable=C0413
-
DESC = """
Read C language source or header FILEs, extract embedded documentation comments,
and print formatted documentation to standard output.
@@ -273,14 +279,22 @@ def main():
python_ver = sys.version_info[:2]
if python_ver < (3,6):
- logger.warning("Python 3.6 or later is required by kernel-doc")
+ # Depending on Kernel configuration, kernel-doc --none is called at
+ # build time. As we don't want to break compilation due to the
+ # usage of an old Python version, return 0 here.
+ if args.none:
+ logger.error("Python 3.6 or later is required by kernel-doc. skipping checks")
+ sys.exit(0)
- # Return 0 here to avoid breaking compilation
- sys.exit(0)
+ sys.exit("Python 3.6 or later is required by kernel-doc. Aborting.")
if python_ver < (3,7):
logger.warning("Python 3.7 or later is required for correct results")
+ # Import kernel-doc libraries only after checking Python version
+ from kdoc_files import KernelFiles # pylint: disable=C0415
+ from kdoc_output import RestFormat, ManFormat # pylint: disable=C0415
+
if args.man:
out_style = ManFormat(modulename=args.modulename)
elif args.none:
@@ -308,11 +322,11 @@ def main():
sys.exit(0)
if args.werror:
- print(f"{error_count} warnings as errors")
+ print("%s warnings as errors" % error_count) # pylint: disable=C0209
sys.exit(error_count)
if args.verbose:
- print(f"{error_count} errors")
+ print("%s errors" % error_count) # pylint: disable=C0209
if args.none:
sys.exit(0)
diff --git a/scripts/lib/kdoc/kdoc_parser.py b/scripts/lib/kdoc/kdoc_parser.py
index fe730099eca8..2376f180b1fa 100644
--- a/scripts/lib/kdoc/kdoc_parser.py
+++ b/scripts/lib/kdoc/kdoc_parser.py
@@ -46,7 +46,7 @@ doc_decl = doc_com + KernRe(r'(\w+)', cache=False)
known_section_names = 'description|context|returns?|notes?|examples?'
known_sections = KernRe(known_section_names, flags = re.I)
doc_sect = doc_com + \
- KernRe(r'\s*(\@[.\w]+|\@\.\.\.|' + known_section_names + r')\s*:([^:].*)?$',
+ KernRe(r'\s*(@[.\w]+|@\.\.\.|' + known_section_names + r')\s*:([^:].*)?$',
flags=re.I, cache=False)
doc_content = doc_com_body + KernRe(r'(.*)', cache=False)
@@ -54,13 +54,11 @@ doc_inline_start = KernRe(r'^\s*/\*\*\s*$', cache=False)
doc_inline_sect = KernRe(r'\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)', cache=False)
doc_inline_end = KernRe(r'^\s*\*/\s*$', cache=False)
doc_inline_oneline = KernRe(r'^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$', cache=False)
-attribute = KernRe(r"__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)",
- flags=re.I | re.S, cache=False)
export_symbol = KernRe(r'^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*', cache=False)
export_symbol_ns = KernRe(r'^\s*EXPORT_SYMBOL_NS(_GPL)?\s*\(\s*(\w+)\s*,\s*"\S+"\)\s*', cache=False)
-type_param = KernRe(r"\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False)
+type_param = KernRe(r"@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False)
#
# Tests for the beginning of a kerneldoc block in its various forms.
@@ -75,12 +73,154 @@ doc_begin_func = KernRe(str(doc_com) + # initial " * '
cache = False)
#
+# Here begins a long set of transformations to turn structure member prefixes
+# and macro invocations into something we can parse and generate kdoc for.
+#
+struct_args_pattern = r'([^,)]+)'
+
+struct_xforms = [
+ # Strip attributes
+ (KernRe(r"__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)", flags=re.I | re.S, cache=False), ' '),
+ (KernRe(r'\s*__aligned\s*\([^;]*\)', re.S), ' '),
+ (KernRe(r'\s*__counted_by\s*\([^;]*\)', re.S), ' '),
+ (KernRe(r'\s*__counted_by_(le|be)\s*\([^;]*\)', re.S), ' '),
+ (KernRe(r'\s*__packed\s*', re.S), ' '),
+ (KernRe(r'\s*CRYPTO_MINALIGN_ATTR', re.S), ' '),
+ (KernRe(r'\s*____cacheline_aligned_in_smp', re.S), ' '),
+ (KernRe(r'\s*____cacheline_aligned', re.S), ' '),
+ (KernRe(r'\s*__cacheline_group_(begin|end)\([^\)]+\);'), ''),
+ #
+ # Unwrap struct_group macros based on this definition:
+ # __struct_group(TAG, NAME, ATTRS, MEMBERS...)
+ # which has variants like: struct_group(NAME, MEMBERS...)
+ # Only MEMBERS arguments require documentation.
+ #
+ # Parsing them happens on two steps:
+ #
+ # 1. drop struct group arguments that aren't at MEMBERS,
+ # storing them as STRUCT_GROUP(MEMBERS)
+ #
+ # 2. remove STRUCT_GROUP() ancillary macro.
+ #
+ # The original logic used to remove STRUCT_GROUP() using an
+ # advanced regex:
+ #
+ # \bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*;
+ #
+ # with two patterns that are incompatible with
+ # Python re module, as it has:
+ #
+ # - a recursive pattern: (?1)
+ # - an atomic grouping: (?>...)
+ #
+ # I tried a simpler version: but it didn't work either:
+ # \bSTRUCT_GROUP\(([^\)]+)\)[^;]*;
+ #
+ # As it doesn't properly match the end parenthesis on some cases.
+ #
+ # So, a better solution was crafted: there's now a NestedMatch
+ # class that ensures that delimiters after a search are properly
+ # matched. So, the implementation to drop STRUCT_GROUP() will be
+ # handled in separate.
+ #
+ (KernRe(r'\bstruct_group\s*\(([^,]*,)', re.S), r'STRUCT_GROUP('),
+ (KernRe(r'\bstruct_group_attr\s*\(([^,]*,){2}', re.S), r'STRUCT_GROUP('),
+ (KernRe(r'\bstruct_group_tagged\s*\(([^,]*),([^,]*),', re.S), r'struct \1 \2; STRUCT_GROUP('),
+ (KernRe(r'\b__struct_group\s*\(([^,]*,){3}', re.S), r'STRUCT_GROUP('),
+ #
+ # Replace macros
+ #
+ # TODO: use NestedMatch for FOO($1, $2, ...) matches
+ #
+ # it is better to also move those to the NestedMatch logic,
+ # to ensure that parenthesis will be properly matched.
+ #
+ (KernRe(r'__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)', re.S),
+ r'DECLARE_BITMAP(\1, __ETHTOOL_LINK_MODE_MASK_NBITS)'),
+ (KernRe(r'DECLARE_PHY_INTERFACE_MASK\s*\(([^\)]+)\)', re.S),
+ r'DECLARE_BITMAP(\1, PHY_INTERFACE_MODE_MAX)'),
+ (KernRe(r'DECLARE_BITMAP\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern + r'\)',
+ re.S), r'unsigned long \1[BITS_TO_LONGS(\2)]'),
+ (KernRe(r'DECLARE_HASHTABLE\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern + r'\)',
+ re.S), r'unsigned long \1[1 << ((\2) - 1)]'),
+ (KernRe(r'DECLARE_KFIFO\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern +
+ r',\s*' + struct_args_pattern + r'\)', re.S), r'\2 *\1'),
+ (KernRe(r'DECLARE_KFIFO_PTR\s*\(' + struct_args_pattern + r',\s*' +
+ struct_args_pattern + r'\)', re.S), r'\2 *\1'),
+ (KernRe(r'(?:__)?DECLARE_FLEX_ARRAY\s*\(' + struct_args_pattern + r',\s*' +
+ struct_args_pattern + r'\)', re.S), r'\1 \2[]'),
+ (KernRe(r'DEFINE_DMA_UNMAP_ADDR\s*\(' + struct_args_pattern + r'\)', re.S), r'dma_addr_t \1'),
+ (KernRe(r'DEFINE_DMA_UNMAP_LEN\s*\(' + struct_args_pattern + r'\)', re.S), r'__u32 \1'),
+]
+#
+# Regexes here are guaranteed to have the end limiter matching
+# the start delimiter. Yet, right now, only one replace group
+# is allowed.
+#
+struct_nested_prefixes = [
+ (re.compile(r'\bSTRUCT_GROUP\('), r'\1'),
+]
+
+#
+# Transforms for function prototypes
+#
+function_xforms = [
+ (KernRe(r"^static +"), ""),
+ (KernRe(r"^extern +"), ""),
+ (KernRe(r"^asmlinkage +"), ""),
+ (KernRe(r"^inline +"), ""),
+ (KernRe(r"^__inline__ +"), ""),
+ (KernRe(r"^__inline +"), ""),
+ (KernRe(r"^__always_inline +"), ""),
+ (KernRe(r"^noinline +"), ""),
+ (KernRe(r"^__FORTIFY_INLINE +"), ""),
+ (KernRe(r"__init +"), ""),
+ (KernRe(r"__init_or_module +"), ""),
+ (KernRe(r"__deprecated +"), ""),
+ (KernRe(r"__flatten +"), ""),
+ (KernRe(r"__meminit +"), ""),
+ (KernRe(r"__must_check +"), ""),
+ (KernRe(r"__weak +"), ""),
+ (KernRe(r"__sched +"), ""),
+ (KernRe(r"_noprof"), ""),
+ (KernRe(r"__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +"), ""),
+ (KernRe(r"__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +"), ""),
+ (KernRe(r"__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +"), ""),
+ (KernRe(r"DECL_BUCKET_PARAMS\s*\(\s*(\S+)\s*,\s*(\S+)\s*\)"), r"\1, \2"),
+ (KernRe(r"__attribute_const__ +"), ""),
+ (KernRe(r"__attribute__\s*\(\((?:[\w\s]+(?:\([^)]*\))?\s*,?)+\)\)\s+"), ""),
+]
+
+#
+# Apply a set of transforms to a block of text.
+#
+def apply_transforms(xforms, text):
+ for search, subst in xforms:
+ text = search.sub(subst, text)
+ return text
+
+#
# A little helper to get rid of excess white space
#
multi_space = KernRe(r'\s\s+')
def trim_whitespace(s):
return multi_space.sub(' ', s.strip())
+#
+# Remove struct/enum members that have been marked "private".
+#
+def trim_private_members(text):
+ #
+ # First look for a "public:" block that ends a private region, then
+ # handle the "private until the end" case.
+ #
+ text = KernRe(r'/\*\s*private:.*?/\*\s*public:.*?\*/', flags=re.S).sub('', text)
+ text = KernRe(r'/\*\s*private:.*', flags=re.S).sub('', text)
+ #
+ # We needed the comments to do the above, but now we can take them out.
+ #
+ return KernRe(r'\s*/\*.*?\*/\s*', flags=re.S).sub('', text).strip()
+
class state:
"""
State machine enums
@@ -318,36 +458,26 @@ class KernelDoc:
param = KernRe(r'[\[\)].*').sub('', param, count=1)
- if dtype == "" and param.endswith("..."):
- if KernRe(r'\w\.\.\.$').search(param):
- # For named variable parameters of the form `x...`,
- # remove the dots
- param = param[:-3]
- else:
- # Handles unnamed variable parameters
- param = "..."
-
- if param not in self.entry.parameterdescs or \
- not self.entry.parameterdescs[param]:
-
- self.entry.parameterdescs[param] = "variable arguments"
-
- elif dtype == "" and (not param or param == "void"):
- param = "void"
- self.entry.parameterdescs[param] = "no arguments"
-
- elif dtype == "" and param in ["struct", "union"]:
- # Handle unnamed (anonymous) union or struct
- dtype = param
- param = "{unnamed_" + param + "}"
- self.entry.parameterdescs[param] = "anonymous\n"
- self.entry.anon_struct_union = True
-
- # Handle cache group enforcing variables: they do not need
- # to be described in header files
- elif "__cacheline_group" in param:
- # Ignore __cacheline_group_begin and __cacheline_group_end
- return
+ #
+ # Look at various "anonymous type" cases.
+ #
+ if dtype == '':
+ if param.endswith("..."):
+ if len(param) > 3: # there is a name provided, use that
+ param = param[:-3]
+ if not self.entry.parameterdescs.get(param):
+ self.entry.parameterdescs[param] = "variable arguments"
+
+ elif (not param) or param == "void":
+ param = "void"
+ self.entry.parameterdescs[param] = "no arguments"
+
+ elif param in ["struct", "union"]:
+ # Handle unnamed (anonymous) union or struct
+ dtype = param
+ param = "{unnamed_" + param + "}"
+ self.entry.parameterdescs[param] = "anonymous\n"
+ self.entry.anon_struct_union = True
# Warn if parameter has no description
# (but ignore ones starting with # as these are not parameters
@@ -389,9 +519,6 @@ class KernelDoc:
args = arg_expr.sub(r"\1#", args)
for arg in args.split(splitter):
- # Strip comments
- arg = KernRe(r'\/\*.*\*\/').sub('', arg)
-
# Ignore argument attributes
arg = KernRe(r'\sPOS0?\s').sub(' ', arg)
@@ -407,81 +534,76 @@ class KernelDoc:
# Treat preprocessor directive as a typeless variable
self.push_parameter(ln, decl_type, arg, "",
"", declaration_name)
-
+ #
+ # The pointer-to-function case.
+ #
elif KernRe(r'\(.+\)\s*\(').search(arg):
- # Pointer-to-function
-
arg = arg.replace('#', ',')
-
- r = KernRe(r'[^\(]+\(\*?\s*([\w\[\]\.]*)\s*\)')
+ r = KernRe(r'[^\(]+\(\*?\s*' # Everything up to "(*"
+ r'([\w\[\].]*)' # Capture the name and possible [array]
+ r'\s*\)') # Make sure the trailing ")" is there
if r.match(arg):
param = r.group(1)
else:
self.emit_msg(ln, f"Invalid param: {arg}")
param = arg
-
- dtype = KernRe(r'([^\(]+\(\*?)\s*' + re.escape(param)).sub(r'\1', arg)
- self.push_parameter(ln, decl_type, param, dtype,
- arg, declaration_name)
-
+ dtype = arg.replace(param, '')
+ self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name)
+ #
+ # The array-of-pointers case. Dig the parameter name out from the middle
+ # of the declaration.
+ #
elif KernRe(r'\(.+\)\s*\[').search(arg):
- # Array-of-pointers
-
- arg = arg.replace('#', ',')
- r = KernRe(r'[^\(]+\(\s*\*\s*([\w\[\]\.]*?)\s*(\s*\[\s*[\w]+\s*\]\s*)*\)')
+ r = KernRe(r'[^\(]+\(\s*\*\s*' # Up to "(" and maybe "*"
+ r'([\w.]*?)' # The actual pointer name
+ r'\s*(\[\s*\w+\s*\]\s*)*\)') # The [array portion]
if r.match(arg):
param = r.group(1)
else:
self.emit_msg(ln, f"Invalid param: {arg}")
param = arg
-
- dtype = KernRe(r'([^\(]+\(\*?)\s*' + re.escape(param)).sub(r'\1', arg)
-
- self.push_parameter(ln, decl_type, param, dtype,
- arg, declaration_name)
-
+ dtype = arg.replace(param, '')
+ self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name)
elif arg:
+ #
+ # Clean up extraneous spaces and split the string at commas; the first
+ # element of the resulting list will also include the type information.
+ #
arg = KernRe(r'\s*:\s*').sub(":", arg)
arg = KernRe(r'\s*\[').sub('[', arg)
-
args = KernRe(r'\s*,\s*').split(arg)
- if args[0] and '*' in args[0]:
- args[0] = re.sub(r'(\*+)\s*', r' \1', args[0])
-
- first_arg = []
- r = KernRe(r'^(.*\s+)(.*?\[.*\].*)$')
- if args[0] and r.match(args[0]):
- args.pop(0)
- first_arg.extend(r.group(1))
- first_arg.append(r.group(2))
+ args[0] = re.sub(r'(\*+)\s*', r' \1', args[0])
+ #
+ # args[0] has a string of "type a". If "a" includes an [array]
+ # declaration, we want to not be fooled by any white space inside
+ # the brackets, so detect and handle that case specially.
+ #
+ r = KernRe(r'^([^[\]]*\s+)(.*)$')
+ if r.match(args[0]):
+ args[0] = r.group(2)
+ dtype = r.group(1)
else:
- first_arg = KernRe(r'\s+').split(args.pop(0))
-
- args.insert(0, first_arg.pop())
- dtype = ' '.join(first_arg)
+ # No space in args[0]; this seems wrong but preserves previous behavior
+ dtype = ''
+ bitfield_re = KernRe(r'(.*?):(\w+)')
for param in args:
- if KernRe(r'^(\*+)\s*(.*)').match(param):
- r = KernRe(r'^(\*+)\s*(.*)')
- if not r.match(param):
- self.emit_msg(ln, f"Invalid param: {param}")
- continue
-
- param = r.group(1)
-
+ #
+ # For pointers, shift the star(s) from the variable name to the
+ # type declaration.
+ #
+ r = KernRe(r'^(\*+)\s*(.*)')
+ if r.match(param):
self.push_parameter(ln, decl_type, r.group(2),
f"{dtype} {r.group(1)}",
arg, declaration_name)
-
- elif KernRe(r'(.*?):(\w+)').search(param):
- r = KernRe(r'(.*?):(\w+)')
- if not r.match(param):
- self.emit_msg(ln, f"Invalid param: {param}")
- continue
-
+ #
+ # Perform a similar shift for bitfields.
+ #
+ elif bitfield_re.search(param):
if dtype != "": # Skip unnamed bit-fields
- self.push_parameter(ln, decl_type, r.group(1),
- f"{dtype}:{r.group(2)}",
+ self.push_parameter(ln, decl_type, bitfield_re.group(1),
+ f"{dtype}:{bitfield_re.group(2)}",
arg, declaration_name)
else:
self.push_parameter(ln, decl_type, param, dtype,
@@ -520,13 +642,11 @@ class KernelDoc:
self.emit_msg(ln,
f"No description found for return value of '{declaration_name}'")
- def dump_struct(self, ln, proto):
- """
- Store an entry for an struct or union
- """
-
+ #
+ # Split apart a structure prototype; returns (struct|union, name, members) or None
+ #
+ def split_struct_proto(self, proto):
type_pattern = r'(struct|union)'
-
qualifiers = [
"__attribute__",
"__packed",
@@ -534,288 +654,202 @@ class KernelDoc:
"____cacheline_aligned_in_smp",
"____cacheline_aligned",
]
-
definition_body = r'\{(.*)\}\s*' + "(?:" + '|'.join(qualifiers) + ")?"
- struct_members = KernRe(type_pattern + r'([^\{\};]+)(\{)([^\{\}]*)(\})([^\{\}\;]*)(\;)')
-
- # Extract struct/union definition
- members = None
- declaration_name = None
- decl_type = None
r = KernRe(type_pattern + r'\s+(\w+)\s*' + definition_body)
if r.search(proto):
- decl_type = r.group(1)
- declaration_name = r.group(2)
- members = r.group(3)
+ return (r.group(1), r.group(2), r.group(3))
else:
r = KernRe(r'typedef\s+' + type_pattern + r'\s*' + definition_body + r'\s*(\w+)\s*;')
-
if r.search(proto):
- decl_type = r.group(1)
- declaration_name = r.group(3)
- members = r.group(2)
-
- if not members:
- self.emit_msg(ln, f"{proto} error: Cannot parse struct or union!")
- return
-
- if self.entry.identifier != declaration_name:
- self.emit_msg(ln,
- f"expecting prototype for {decl_type} {self.entry.identifier}. Prototype was for {decl_type} {declaration_name} instead\n")
- return
-
- args_pattern = r'([^,)]+)'
-
- sub_prefixes = [
- (KernRe(r'\/\*\s*private:.*?\/\*\s*public:.*?\*\/', re.S | re.I), ''),
- (KernRe(r'\/\*\s*private:.*', re.S | re.I), ''),
-
- # Strip comments
- (KernRe(r'\/\*.*?\*\/', re.S), ''),
-
- # Strip attributes
- (attribute, ' '),
- (KernRe(r'\s*__aligned\s*\([^;]*\)', re.S), ' '),
- (KernRe(r'\s*__counted_by\s*\([^;]*\)', re.S), ' '),
- (KernRe(r'\s*__counted_by_(le|be)\s*\([^;]*\)', re.S), ' '),
- (KernRe(r'\s*__packed\s*', re.S), ' '),
- (KernRe(r'\s*CRYPTO_MINALIGN_ATTR', re.S), ' '),
- (KernRe(r'\s*____cacheline_aligned_in_smp', re.S), ' '),
- (KernRe(r'\s*____cacheline_aligned', re.S), ' '),
-
- # Unwrap struct_group macros based on this definition:
- # __struct_group(TAG, NAME, ATTRS, MEMBERS...)
- # which has variants like: struct_group(NAME, MEMBERS...)
- # Only MEMBERS arguments require documentation.
- #
- # Parsing them happens on two steps:
- #
- # 1. drop struct group arguments that aren't at MEMBERS,
- # storing them as STRUCT_GROUP(MEMBERS)
- #
- # 2. remove STRUCT_GROUP() ancillary macro.
- #
- # The original logic used to remove STRUCT_GROUP() using an
- # advanced regex:
- #
- # \bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*;
- #
- # with two patterns that are incompatible with
- # Python re module, as it has:
- #
- # - a recursive pattern: (?1)
- # - an atomic grouping: (?>...)
- #
- # I tried a simpler version: but it didn't work either:
- # \bSTRUCT_GROUP\(([^\)]+)\)[^;]*;
- #
- # As it doesn't properly match the end parenthesis on some cases.
- #
- # So, a better solution was crafted: there's now a NestedMatch
- # class that ensures that delimiters after a search are properly
- # matched. So, the implementation to drop STRUCT_GROUP() will be
- # handled in separate.
-
- (KernRe(r'\bstruct_group\s*\(([^,]*,)', re.S), r'STRUCT_GROUP('),
- (KernRe(r'\bstruct_group_attr\s*\(([^,]*,){2}', re.S), r'STRUCT_GROUP('),
- (KernRe(r'\bstruct_group_tagged\s*\(([^,]*),([^,]*),', re.S), r'struct \1 \2; STRUCT_GROUP('),
- (KernRe(r'\b__struct_group\s*\(([^,]*,){3}', re.S), r'STRUCT_GROUP('),
-
- # Replace macros
- #
- # TODO: use NestedMatch for FOO($1, $2, ...) matches
- #
- # it is better to also move those to the NestedMatch logic,
- # to ensure that parenthesis will be properly matched.
-
- (KernRe(r'__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)', re.S), r'DECLARE_BITMAP(\1, __ETHTOOL_LINK_MODE_MASK_NBITS)'),
- (KernRe(r'DECLARE_PHY_INTERFACE_MASK\s*\(([^\)]+)\)', re.S), r'DECLARE_BITMAP(\1, PHY_INTERFACE_MODE_MAX)'),
- (KernRe(r'DECLARE_BITMAP\s*\(' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'unsigned long \1[BITS_TO_LONGS(\2)]'),
- (KernRe(r'DECLARE_HASHTABLE\s*\(' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'unsigned long \1[1 << ((\2) - 1)]'),
- (KernRe(r'DECLARE_KFIFO\s*\(' + args_pattern + r',\s*' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'\2 *\1'),
- (KernRe(r'DECLARE_KFIFO_PTR\s*\(' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'\2 *\1'),
- (KernRe(r'(?:__)?DECLARE_FLEX_ARRAY\s*\(' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'\1 \2[]'),
- (KernRe(r'DEFINE_DMA_UNMAP_ADDR\s*\(' + args_pattern + r'\)', re.S), r'dma_addr_t \1'),
- (KernRe(r'DEFINE_DMA_UNMAP_LEN\s*\(' + args_pattern + r'\)', re.S), r'__u32 \1'),
- (KernRe(r'VIRTIO_DECLARE_FEATURES\s*\(' + args_pattern + r'\)', re.S), r'u64 \1; u64 \1_array[VIRTIO_FEATURES_DWORDS]'),
- ]
-
- # Regexes here are guaranteed to have the end limiter matching
- # the start delimiter. Yet, right now, only one replace group
- # is allowed.
-
- sub_nested_prefixes = [
- (re.compile(r'\bSTRUCT_GROUP\('), r'\1'),
- ]
-
- for search, sub in sub_prefixes:
- members = search.sub(sub, members)
-
- nested = NestedMatch()
-
- for search, sub in sub_nested_prefixes:
- members = nested.sub(search, sub, members)
-
- # Keeps the original declaration as-is
- declaration = members
-
- # Split nested struct/union elements
- #
- # This loop was simpler at the original kernel-doc perl version, as
- # while ($members =~ m/$struct_members/) { ... }
- # reads 'members' string on each interaction.
- #
- # Python behavior is different: it parses 'members' only once,
- # creating a list of tuples from the first interaction.
+ return (r.group(1), r.group(3), r.group(2))
+ return None
+ #
+ # Rewrite the members of a structure or union for easier formatting later on.
+ # Among other things, this function will turn a member like:
+ #
+ # struct { inner_members; } foo;
+ #
+ # into:
+ #
+ # struct foo; inner_members;
+ #
+ def rewrite_struct_members(self, members):
#
- # On other words, this won't get nested structs.
+ # Process struct/union members from the most deeply nested outward. The
+ # trick is in the ^{ below - it prevents a match of an outer struct/union
+ # until the inner one has been munged (removing the "{" in the process).
#
- # So, we need to have an extra loop on Python to override such
- # re limitation.
-
- while True:
- tuples = struct_members.findall(members)
- if not tuples:
- break
-
+ struct_members = KernRe(r'(struct|union)' # 0: declaration type
+ r'([^\{\};]+)' # 1: possible name
+ r'(\{)'
+ r'([^\{\}]*)' # 3: Contents of declaration
+ r'(\})'
+ r'([^\{\};]*)(;)') # 5: Remaining stuff after declaration
+ tuples = struct_members.findall(members)
+ while tuples:
for t in tuples:
newmember = ""
- maintype = t[0]
- s_ids = t[5]
- content = t[3]
-
- oldmember = "".join(t)
-
- for s_id in s_ids.split(','):
+ oldmember = "".join(t) # Reconstruct the original formatting
+ dtype, name, lbr, content, rbr, rest, semi = t
+ #
+ # Pass through each field name, normalizing the form and formatting.
+ #
+ for s_id in rest.split(','):
s_id = s_id.strip()
-
- newmember += f"{maintype} {s_id}; "
+ newmember += f"{dtype} {s_id}; "
+ #
+ # Remove bitfield/array/pointer info, getting the bare name.
+ #
s_id = KernRe(r'[:\[].*').sub('', s_id)
s_id = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', s_id)
-
+ #
+ # Pass through the members of this inner structure/union.
+ #
for arg in content.split(';'):
arg = arg.strip()
-
- if not arg:
- continue
-
- r = KernRe(r'^([^\(]+\(\*?\s*)([\w\.]*)(\s*\).*)')
+ #
+ # Look for (type)(*name)(args) - pointer to function
+ #
+ r = KernRe(r'^([^\(]+\(\*?\s*)([\w.]*)(\s*\).*)')
if r.match(arg):
+ dtype, name, extra = r.group(1), r.group(2), r.group(3)
# Pointer-to-function
- dtype = r.group(1)
- name = r.group(2)
- extra = r.group(3)
-
- if not name:
- continue
-
if not s_id:
# Anonymous struct/union
newmember += f"{dtype}{name}{extra}; "
else:
newmember += f"{dtype}{s_id}.{name}{extra}; "
-
+ #
+ # Otherwise a non-function member.
+ #
else:
- arg = arg.strip()
- # Handle bitmaps
+ #
+ # Remove bitmap and array portions and spaces around commas
+ #
arg = KernRe(r':\s*\d+\s*').sub('', arg)
-
- # Handle arrays
arg = KernRe(r'\[.*\]').sub('', arg)
-
- # Handle multiple IDs
arg = KernRe(r'\s*,\s*').sub(',', arg)
-
+ #
+ # Look for a normal decl - "type name[,name...]"
+ #
r = KernRe(r'(.*)\s+([\S+,]+)')
-
if r.search(arg):
- dtype = r.group(1)
- names = r.group(2)
+ for name in r.group(2).split(','):
+ name = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', name)
+ if not s_id:
+ # Anonymous struct/union
+ newmember += f"{r.group(1)} {name}; "
+ else:
+ newmember += f"{r.group(1)} {s_id}.{name}; "
else:
newmember += f"{arg}; "
- continue
-
- for name in names.split(','):
- name = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', name).strip()
-
- if not name:
- continue
-
- if not s_id:
- # Anonymous struct/union
- newmember += f"{dtype} {name}; "
- else:
- newmember += f"{dtype} {s_id}.{name}; "
-
+ #
+ # At the end of the s_id loop, replace the original declaration with
+ # the munged version.
+ #
members = members.replace(oldmember, newmember)
+ #
+ # End of the tuple loop - search again and see if there are outer members
+ # that now turn up.
+ #
+ tuples = struct_members.findall(members)
+ return members
- # Ignore other nested elements, like enums
- members = re.sub(r'(\{[^\{\}]*\})', '', members)
-
- self.create_parameter_list(ln, decl_type, members, ';',
- declaration_name)
- self.check_sections(ln, declaration_name, decl_type)
-
- # Adjust declaration for better display
+ #
+ # Format the struct declaration into a standard form for inclusion in the
+ # resulting docs.
+ #
+ def format_struct_decl(self, declaration):
+ #
+ # Insert newlines, get rid of extra spaces.
+ #
declaration = KernRe(r'([\{;])').sub(r'\1\n', declaration)
declaration = KernRe(r'\}\s+;').sub('};', declaration)
-
- # Better handle inlined enums
- while True:
- r = KernRe(r'(enum\s+\{[^\}]+),([^\n])')
- if not r.search(declaration):
- break
-
+ #
+ # Format inline enums with each member on its own line.
+ #
+ r = KernRe(r'(enum\s+\{[^\}]+),([^\n])')
+ while r.search(declaration):
declaration = r.sub(r'\1,\n\2', declaration)
-
+ #
+ # Now go through and supply the right number of tabs
+ # for each line.
+ #
def_args = declaration.split('\n')
level = 1
declaration = ""
for clause in def_args:
+ clause = KernRe(r'\s+').sub(' ', clause.strip(), count=1)
+ if clause:
+ if '}' in clause and level > 1:
+ level -= 1
+ if not clause.startswith('#'):
+ declaration += "\t" * level
+ declaration += "\t" + clause + "\n"
+ if "{" in clause and "}" not in clause:
+ level += 1
+ return declaration
- clause = clause.strip()
- clause = KernRe(r'\s+').sub(' ', clause, count=1)
-
- if not clause:
- continue
-
- if '}' in clause and level > 1:
- level -= 1
- if not KernRe(r'^\s*#').match(clause):
- declaration += "\t" * level
+ def dump_struct(self, ln, proto):
+ """
+ Store an entry for an struct or union
+ """
+ #
+ # Do the basic parse to get the pieces of the declaration.
+ #
+ struct_parts = self.split_struct_proto(proto)
+ if not struct_parts:
+ self.emit_msg(ln, f"{proto} error: Cannot parse struct or union!")
+ return
+ decl_type, declaration_name, members = struct_parts
- declaration += "\t" + clause + "\n"
- if "{" in clause and "}" not in clause:
- level += 1
+ if self.entry.identifier != declaration_name:
+ self.emit_msg(ln, f"expecting prototype for {decl_type} {self.entry.identifier}. "
+ f"Prototype was for {decl_type} {declaration_name} instead\n")
+ return
+ #
+ # Go through the list of members applying all of our transformations.
+ #
+ members = trim_private_members(members)
+ members = apply_transforms(struct_xforms, members)
+ nested = NestedMatch()
+ for search, sub in struct_nested_prefixes:
+ members = nested.sub(search, sub, members)
+ #
+ # Deal with embedded struct and union members, and drop enums entirely.
+ #
+ declaration = members
+ members = self.rewrite_struct_members(members)
+ members = re.sub(r'(\{[^\{\}]*\})', '', members)
+ #
+ # Output the result and we are done.
+ #
+ self.create_parameter_list(ln, decl_type, members, ';',
+ declaration_name)
+ self.check_sections(ln, declaration_name, decl_type)
self.output_declaration(decl_type, declaration_name,
- definition=declaration,
+ definition=self.format_struct_decl(declaration),
purpose=self.entry.declaration_purpose)
def dump_enum(self, ln, proto):
"""
Stores an enum inside self.entries array.
"""
-
- # Ignore members marked private
- proto = KernRe(r'\/\*\s*private:.*?\/\*\s*public:.*?\*\/', flags=re.S).sub('', proto)
- proto = KernRe(r'\/\*\s*private:.*}', flags=re.S).sub('}', proto)
-
- # Strip comments
- proto = KernRe(r'\/\*.*?\*\/', flags=re.S).sub('', proto)
-
- # Strip #define macros inside enums
+ #
+ # Strip preprocessor directives. Note that this depends on the
+ # trailing semicolon we added in process_proto_type().
+ #
proto = KernRe(r'#\s*((define|ifdef|if)\s+|endif)[^;]*;', flags=re.S).sub('', proto)
-
#
# Parse out the name and members of the enum. Typedef form first.
#
r = KernRe(r'typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;')
if r.search(proto):
declaration_name = r.group(2)
- members = r.group(1).rstrip()
+ members = trim_private_members(r.group(1))
#
# Failing that, look for a straight enum
#
@@ -823,7 +857,7 @@ class KernelDoc:
r = KernRe(r'enum\s+(\w*)\s*\{(.*)\}')
if r.match(proto):
declaration_name = r.group(1)
- members = r.group(2).rstrip()
+ members = trim_private_members(r.group(2))
#
# OK, this isn't going to work.
#
@@ -892,62 +926,31 @@ class KernelDoc:
Stores a function of function macro inside self.entries array.
"""
- func_macro = False
+ found = func_macro = False
return_type = ''
decl_type = 'function'
-
- # Prefixes that would be removed
- sub_prefixes = [
- (r"^static +", "", 0),
- (r"^extern +", "", 0),
- (r"^asmlinkage +", "", 0),
- (r"^inline +", "", 0),
- (r"^__inline__ +", "", 0),
- (r"^__inline +", "", 0),
- (r"^__always_inline +", "", 0),
- (r"^noinline +", "", 0),
- (r"^__FORTIFY_INLINE +", "", 0),
- (r"__init +", "", 0),
- (r"__init_or_module +", "", 0),
- (r"__deprecated +", "", 0),
- (r"__flatten +", "", 0),
- (r"__meminit +", "", 0),
- (r"__must_check +", "", 0),
- (r"__weak +", "", 0),
- (r"__sched +", "", 0),
- (r"_noprof", "", 0),
- (r"__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +", "", 0),
- (r"__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +", "", 0),
- (r"__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +", "", 0),
- (r"DECL_BUCKET_PARAMS\s*\(\s*(\S+)\s*,\s*(\S+)\s*\)", r"\1, \2", 0),
- (r"__attribute_const__ +", "", 0),
-
- # It seems that Python support for re.X is broken:
- # At least for me (Python 3.13), this didn't work
-# (r"""
-# __attribute__\s*\(\(
-# (?:
-# [\w\s]+ # attribute name
-# (?:\([^)]*\))? # attribute arguments
-# \s*,? # optional comma at the end
-# )+
-# \)\)\s+
-# """, "", re.X),
-
- # So, remove whitespaces and comments from it
- (r"__attribute__\s*\(\((?:[\w\s]+(?:\([^)]*\))?\s*,?)+\)\)\s+", "", 0),
- ]
-
- for search, sub, flags in sub_prefixes:
- prototype = KernRe(search, flags).sub(sub, prototype)
-
- # Macros are a special case, as they change the prototype format
+ #
+ # Apply the initial transformations.
+ #
+ prototype = apply_transforms(function_xforms, prototype)
+ #
+ # If we have a macro, remove the "#define" at the front.
+ #
new_proto = KernRe(r"^#\s*define\s+").sub("", prototype)
if new_proto != prototype:
- is_define_proto = True
prototype = new_proto
- else:
- is_define_proto = False
+ #
+ # Dispense with the simple "#define A B" case here; the key
+ # is the space after the name of the symbol being defined.
+ # NOTE that the seemingly misnamed "func_macro" indicates a
+ # macro *without* arguments.
+ #
+ r = KernRe(r'^(\w+)\s+')
+ if r.search(prototype):
+ return_type = ''
+ declaration_name = r.group(1)
+ func_macro = True
+ found = True
# Yes, this truly is vile. We are looking for:
# 1. Return type (may be nothing if we're looking at a macro)
@@ -965,91 +968,73 @@ class KernelDoc:
# - atomic_set (macro)
# - pci_match_device, __copy_to_user (long return type)
- name = r'[a-zA-Z0-9_~:]+'
- prototype_end1 = r'[^\(]*'
- prototype_end2 = r'[^\{]*'
- prototype_end = fr'\(({prototype_end1}|{prototype_end2})\)'
-
- # Besides compiling, Perl qr{[\w\s]+} works as a non-capturing group.
- # So, this needs to be mapped in Python with (?:...)? or (?:...)+
-
+ name = r'\w+'
type1 = r'(?:[\w\s]+)?'
type2 = r'(?:[\w\s]+\*+)+'
-
- found = False
-
- if is_define_proto:
- r = KernRe(r'^()(' + name + r')\s+')
-
- if r.search(prototype):
- return_type = ''
- declaration_name = r.group(2)
- func_macro = True
-
- found = True
-
+ #
+ # Attempt to match first on (args) with no internal parentheses; this
+ # lets us easily filter out __acquires() and other post-args stuff. If
+ # that fails, just grab the rest of the line to the last closing
+ # parenthesis.
+ #
+ proto_args = r'\(([^\(]*|.*)\)'
+ #
+ # (Except for the simple macro case) attempt to split up the prototype
+ # in the various ways we understand.
+ #
if not found:
patterns = [
- rf'^()({name})\s*{prototype_end}',
- rf'^({type1})\s+({name})\s*{prototype_end}',
- rf'^({type2})\s*({name})\s*{prototype_end}',
+ rf'^()({name})\s*{proto_args}',
+ rf'^({type1})\s+({name})\s*{proto_args}',
+ rf'^({type2})\s*({name})\s*{proto_args}',
]
for p in patterns:
r = KernRe(p)
-
if r.match(prototype):
-
return_type = r.group(1)
declaration_name = r.group(2)
args = r.group(3)
-
self.create_parameter_list(ln, decl_type, args, ',',
declaration_name)
-
found = True
break
+ #
+ # Parsing done; make sure that things are as we expect.
+ #
if not found:
self.emit_msg(ln,
f"cannot understand function prototype: '{prototype}'")
return
-
if self.entry.identifier != declaration_name:
- self.emit_msg(ln,
- f"expecting prototype for {self.entry.identifier}(). Prototype was for {declaration_name}() instead")
+ self.emit_msg(ln, f"expecting prototype for {self.entry.identifier}(). "
+ f"Prototype was for {declaration_name}() instead")
return
-
self.check_sections(ln, declaration_name, "function")
-
self.check_return_section(ln, declaration_name, return_type)
+ #
+ # Store the result.
+ #
+ self.output_declaration(decl_type, declaration_name,
+ typedef=('typedef' in return_type),
+ functiontype=return_type,
+ purpose=self.entry.declaration_purpose,
+ func_macro=func_macro)
- if 'typedef' in return_type:
- self.output_declaration(decl_type, declaration_name,
- typedef=True,
- functiontype=return_type,
- purpose=self.entry.declaration_purpose,
- func_macro=func_macro)
- else:
- self.output_declaration(decl_type, declaration_name,
- typedef=False,
- functiontype=return_type,
- purpose=self.entry.declaration_purpose,
- func_macro=func_macro)
def dump_typedef(self, ln, proto):
"""
Stores a typedef inside self.entries array.
"""
-
- typedef_type = r'((?:\s+[\w\*]+\b){0,7}\s+(?:\w+\b|\*+))\s*'
+ #
+ # We start by looking for function typedefs.
+ #
+ typedef_type = r'typedef((?:\s+[\w*]+\b){0,7}\s+(?:\w+\b|\*+))\s*'
typedef_ident = r'\*?\s*(\w\S+)\s*'
typedef_args = r'\s*\((.*)\);'
- typedef1 = KernRe(r'typedef' + typedef_type + r'\(' + typedef_ident + r'\)' + typedef_args)
- typedef2 = KernRe(r'typedef' + typedef_type + typedef_ident + typedef_args)
-
- # Strip comments
- proto = KernRe(r'/\*.*?\*/', flags=re.S).sub('', proto)
+ typedef1 = KernRe(typedef_type + r'\(' + typedef_ident + r'\)' + typedef_args)
+ typedef2 = KernRe(typedef_type + typedef_ident + typedef_args)
# Parse function typedef prototypes
for r in [typedef1, typedef2]:
@@ -1065,21 +1050,16 @@ class KernelDoc:
f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n")
return
- decl_type = 'function'
- self.create_parameter_list(ln, decl_type, args, ',', declaration_name)
+ self.create_parameter_list(ln, 'function', args, ',', declaration_name)
- self.output_declaration(decl_type, declaration_name,
+ self.output_declaration('function', declaration_name,
typedef=True,
functiontype=return_type,
purpose=self.entry.declaration_purpose)
return
-
- # Handle nested parentheses or brackets
- r = KernRe(r'(\(*.\)\s*|\[*.\]\s*);$')
- while r.search(proto):
- proto = r.sub('', proto)
-
- # Parse simple typedefs
+ #
+ # Not a function, try to parse a simple typedef.
+ #
r = KernRe(r'typedef.*\s+(\w+)\s*;')
if r.match(proto):
declaration_name = r.group(1)
@@ -1262,7 +1242,7 @@ class KernelDoc:
self.dump_section()
# Look for doc_com + <text> + doc_end:
- r = KernRe(r'\s*\*\s*[a-zA-Z_0-9:\.]+\*/')
+ r = KernRe(r'\s*\*\s*[a-zA-Z_0-9:.]+\*/')
if r.match(line):
self.emit_msg(ln, f"suspicious ending line: {line}")
@@ -1473,7 +1453,7 @@ class KernelDoc:
"""Ancillary routine to process a function prototype"""
# strip C99-style comments to end of line
- line = KernRe(r"\/\/.*$", re.S).sub('', line)
+ line = KernRe(r"//.*$", re.S).sub('', line)
#
# Soak up the line's worth of prototype text, stopping at { or ; if present.
#
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 51367c2bfc21..433849ff7529 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -73,10 +73,7 @@ vmlinux_link()
objs="${objs} .builtin-dtbs.o"
fi
- if is_enabled CONFIG_MODULES; then
- objs="${objs} .vmlinux.export.o"
- fi
-
+ objs="${objs} .vmlinux.export.o"
objs="${objs} init/version-timestamp.o"
if [ "${SRCARCH}" = "um" ]; then
diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh
index 0d223b4a9445..99b5575c1ef7 100755
--- a/scripts/min-tool-version.sh
+++ b/scripts/min-tool-version.sh
@@ -24,12 +24,10 @@ gcc)
fi
;;
llvm)
- if [ "$SRCARCH" = s390 -o "$SRCARCH" = x86 ]; then
- echo 15.0.0
- elif [ "$SRCARCH" = loongarch ]; then
+ if [ "$SRCARCH" = loongarch ]; then
echo 18.0.0
else
- echo 13.0.1
+ echo 15.0.0
fi
;;
rustc)
diff --git a/scripts/misc-check b/scripts/misc-check
index 84f08da17b2c..40e5a4b01ff4 100755
--- a/scripts/misc-check
+++ b/scripts/misc-check
@@ -45,7 +45,7 @@ check_tracked_ignored_files () {
# does not automatically fix it.
check_missing_include_linux_export_h () {
- git -C "${srctree:-.}" grep --files-with-matches -E 'EXPORT_SYMBOL((_NS)?(_GPL)?|_GPL_FOR_MODULES)\(.*\)' \
+ git -C "${srctree:-.}" grep --files-with-matches -E 'EXPORT_SYMBOL((_NS)?(_GPL)?|_FOR_MODULES)\(.*\)' \
-- '*.[ch]' :^tools/ :^include/linux/export.h |
xargs -r git -C "${srctree:-.}" grep --files-without-match '#include[[:space:]]*<linux/export\.h>' |
xargs -r printf "%s: warning: EXPORT_SYMBOL() is used, but #include <linux/export.h> is missing\n" >&2
@@ -58,7 +58,7 @@ check_unnecessary_include_linux_export_h () {
git -C "${srctree:-.}" grep --files-with-matches '#include[[:space:]]*<linux/export\.h>' \
-- '*.[c]' :^tools/ |
- xargs -r git -C "${srctree:-.}" grep --files-without-match -E 'EXPORT_SYMBOL((_NS)?(_GPL)?|_GPL_FOR_MODULES)\(.*\)' |
+ xargs -r git -C "${srctree:-.}" grep --files-without-match -E 'EXPORT_SYMBOL((_NS)?(_GPL)?|_FOR_MODULES)\(.*\)' |
xargs -r printf "%s: warning: EXPORT_SYMBOL() is not used, but #include <linux/export.h> is present\n" >&2
}
diff --git a/scripts/mksysmap b/scripts/mksysmap
index 3accbdb269ac..c4531eacde20 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -59,6 +59,9 @@
# EXPORT_SYMBOL (namespace)
/ __kstrtabns_/d
+# MODULE_DEVICE_TABLE (symbol name)
+/ __mod_device_table__/d
+
# ---------------------------------------------------------------------------
# Ignored suffixes
# (do not forget '$' after each pattern)
@@ -79,6 +82,9 @@
/ _SDA_BASE_$/d
/ _SDA2_BASE_$/d
+# MODULE_INFO()
+/ __UNIQUE_ID_modinfo[0-9]*$/d
+
# ---------------------------------------------------------------------------
# Ignored patterns
# (symbols that contain the pattern are ignored)
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 00586119a25b..b3333560b95e 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -94,6 +94,7 @@ module_alias_printf(struct module *mod, bool append_wildcard,
}
}
+ new->builtin_modname = NULL;
list_add_tail(&new->node, &mod->aliases);
}
@@ -1476,8 +1477,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
{
void *symval;
char *zeros = NULL;
- const char *type, *name;
- size_t typelen;
+ const char *type, *name, *modname;
+ size_t typelen, modnamelen;
static const char *prefix = "__mod_device_table__";
/* We're looking for a section relative symbol */
@@ -1488,10 +1489,20 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
return;
- /* All our symbols are of form __mod_device_table__<type>__<name>. */
+ /* All our symbols are of form __mod_device_table__kmod_<modname>__<type>__<name>. */
if (!strstarts(symname, prefix))
return;
- type = symname + strlen(prefix);
+
+ modname = strstr(symname, "__kmod_");
+ if (!modname)
+ return;
+ modname += strlen("__kmod_");
+
+ type = strstr(modname, "__");
+ if (!type)
+ return;
+ modnamelen = type - modname;
+ type += strlen("__");
name = strstr(type, "__");
if (!name)
@@ -1517,5 +1528,21 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
}
}
+ if (mod->is_vmlinux) {
+ struct module_alias *alias;
+
+ /*
+ * If this is vmlinux, record the name of the builtin module.
+ * Traverse the linked list in the reverse order, and set the
+ * builtin_modname unless it has already been set in the
+ * previous call.
+ */
+ list_for_each_entry_reverse(alias, &mod->aliases, node) {
+ if (alias->builtin_modname)
+ break;
+ alias->builtin_modname = xstrndup(modname, modnamelen);
+ }
+ }
+
free(zeros);
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 5ca7c268294e..47c8aa2a6939 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -2067,11 +2067,26 @@ static void write_if_changed(struct buffer *b, const char *fname)
static void write_vmlinux_export_c_file(struct module *mod)
{
struct buffer buf = { };
+ struct module_alias *alias, *next;
buf_printf(&buf,
"#include <linux/export-internal.h>\n");
add_exported_symbols(&buf, mod);
+
+ buf_printf(&buf,
+ "#include <linux/module.h>\n"
+ "#undef __MODULE_INFO_PREFIX\n"
+ "#define __MODULE_INFO_PREFIX\n");
+
+ list_for_each_entry_safe(alias, next, &mod->aliases, node) {
+ buf_printf(&buf, "MODULE_INFO(%s.alias, \"%s\");\n",
+ alias->builtin_modname, alias->str);
+ list_del(&alias->node);
+ free(alias->builtin_modname);
+ free(alias);
+ }
+
write_if_changed(&buf, ".vmlinux.export.c");
free(buf.p);
}
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 9133e4c3803f..2aecb8f25c87 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -99,10 +99,12 @@ buf_write(struct buffer *buf, const char *s, int len);
* struct module_alias - auto-generated MODULE_ALIAS()
*
* @node: linked to module::aliases
+ * @modname: name of the builtin module (only for vmlinux)
* @str: a string for MODULE_ALIAS()
*/
struct module_alias {
struct list_head node;
+ char *builtin_modname;
char str[];
};
diff --git a/scripts/rustdoc_test_gen.rs b/scripts/rustdoc_test_gen.rs
index abb34ada2508..c8f9dc2ab976 100644
--- a/scripts/rustdoc_test_gen.rs
+++ b/scripts/rustdoc_test_gen.rs
@@ -202,7 +202,7 @@ pub extern "C" fn {kunit_name}(__kunit_test: *mut ::kernel::bindings::kunit) {{
// This follows the syntax for declaring test metadata in the proposed KTAP v2 spec, which may
// be used for the proposed KUnit test attributes API. Thus hopefully this will make migration
// easier later on.
- ::kernel::kunit::info(format_args!(" # {kunit_name}.location: {real_path}:{line}\n"));
+ ::kernel::kunit::info(fmt!(" # {kunit_name}.location: {real_path}:{line}\n"));
/// The anchor where the test code body starts.
#[allow(unused)]
diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh
index db40237e60ce..77368a73f111 100755
--- a/scripts/selinux/install_policy.sh
+++ b/scripts/selinux/install_policy.sh
@@ -74,7 +74,7 @@ cd /etc/selinux/dummy/contexts/files
$SF -F file_contexts /
mounts=`cat /proc/$$/mounts | \
- grep -E "ext[234]|jfs|xfs|reiserfs|jffs2|gfs2|btrfs|f2fs|ocfs2" | \
+ grep -E "ext[234]|jfs|xfs|jffs2|gfs2|btrfs|f2fs|ocfs2" | \
awk '{ print $2 '}`
$SF -F file_contexts $mounts
diff --git a/scripts/sphinx-build-wrapper b/scripts/sphinx-build-wrapper
new file mode 100755
index 000000000000..abe8c26ae137
--- /dev/null
+++ b/scripts/sphinx-build-wrapper
@@ -0,0 +1,719 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+#
+# pylint: disable=R0902, R0912, R0913, R0914, R0915, R0917, C0103
+#
+# Converted from docs Makefile and parallel-wrapper.sh, both under
+# GPLv2, copyrighted since 2008 by the following authors:
+#
+# Akira Yokosawa <akiyks@gmail.com>
+# Arnd Bergmann <arnd@arndb.de>
+# Breno Leitao <leitao@debian.org>
+# Carlos Bilbao <carlos.bilbao@amd.com>
+# Dave Young <dyoung@redhat.com>
+# Donald Hunter <donald.hunter@gmail.com>
+# Geert Uytterhoeven <geert+renesas@glider.be>
+# Jani Nikula <jani.nikula@intel.com>
+# Jan Stancek <jstancek@redhat.com>
+# Jonathan Corbet <corbet@lwn.net>
+# Joshua Clayton <stillcompiling@gmail.com>
+# Kees Cook <keescook@chromium.org>
+# Linus Torvalds <torvalds@linux-foundation.org>
+# Magnus Damm <damm+renesas@opensource.se>
+# Masahiro Yamada <masahiroy@kernel.org>
+# Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+# Maxim Cournoyer <maxim.cournoyer@gmail.com>
+# Peter Foley <pefoley2@pefoley.com>
+# Randy Dunlap <rdunlap@infradead.org>
+# Rob Herring <robh@kernel.org>
+# Shuah Khan <shuahkh@osg.samsung.com>
+# Thorsten Blum <thorsten.blum@toblux.com>
+# Tomas Winkler <tomas.winkler@intel.com>
+
+
+"""
+Sphinx build wrapper that handles Kernel-specific business rules:
+
+- it gets the Kernel build environment vars;
+- it determines what's the best parallelism;
+- it handles SPHINXDIRS
+
+This tool ensures that MIN_PYTHON_VERSION is satisfied. If version is
+below that, it seeks for a new Python version. If found, it re-runs using
+the newer version.
+"""
+
+import argparse
+import locale
+import os
+import re
+import shlex
+import shutil
+import subprocess
+import sys
+
+from concurrent import futures
+from glob import glob
+
+LIB_DIR = "lib"
+SRC_DIR = os.path.dirname(os.path.realpath(__file__))
+
+sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
+
+from jobserver import JobserverExec # pylint: disable=C0413
+
+
+def parse_version(version):
+ """Convert a major.minor.patch version into a tuple"""
+ return tuple(int(x) for x in version.split("."))
+
+def ver_str(version):
+ """Returns a version tuple as major.minor.patch"""
+
+ return ".".join([str(x) for x in version])
+
+# Minimal supported Python version needed by Sphinx and its extensions
+MIN_PYTHON_VERSION = parse_version("3.7")
+
+# Default value for --venv parameter
+VENV_DEFAULT = "sphinx_latest"
+
+# List of make targets and its corresponding builder and output directory
+TARGETS = {
+ "cleandocs": {
+ "builder": "clean",
+ },
+ "htmldocs": {
+ "builder": "html",
+ },
+ "epubdocs": {
+ "builder": "epub",
+ "out_dir": "epub",
+ },
+ "texinfodocs": {
+ "builder": "texinfo",
+ "out_dir": "texinfo",
+ },
+ "infodocs": {
+ "builder": "texinfo",
+ "out_dir": "texinfo",
+ },
+ "latexdocs": {
+ "builder": "latex",
+ "out_dir": "latex",
+ },
+ "pdfdocs": {
+ "builder": "latex",
+ "out_dir": "latex",
+ },
+ "xmldocs": {
+ "builder": "xml",
+ "out_dir": "xml",
+ },
+ "linkcheckdocs": {
+ "builder": "linkcheck"
+ },
+}
+
+# Paper sizes. An empty value will pick the default
+PAPER = ["", "a4", "letter"]
+
+class SphinxBuilder:
+ """
+ Handles a sphinx-build target, adding needed arguments to build
+ with the Kernel.
+ """
+
+ def is_rust_enabled(self):
+ """Check if rust is enabled at .config"""
+ config_path = os.path.join(self.srctree, ".config")
+ if os.path.isfile(config_path):
+ with open(config_path, "r", encoding="utf-8") as f:
+ return "CONFIG_RUST=y" in f.read()
+ return False
+
+ def get_path(self, path, abs_path=False):
+ """
+ Ancillary routine to handle patches the right way, as shell does.
+
+ It first expands "~" and "~user". Then, if patch is not absolute,
+ join self.srctree. Finally, if requested, convert to abspath.
+ """
+
+ path = os.path.expanduser(path)
+ if not path.startswith("/"):
+ path = os.path.join(self.srctree, path)
+
+ if abs_path:
+ return os.path.abspath(path)
+
+ return path
+
+ def __init__(self, venv=None, verbose=False, n_jobs=None, interactive=None):
+ """Initialize internal variables"""
+ self.venv = venv
+ self.verbose = None
+
+ # Normal variables passed from Kernel's makefile
+ self.kernelversion = os.environ.get("KERNELVERSION", "unknown")
+ self.kernelrelease = os.environ.get("KERNELRELEASE", "unknown")
+ self.pdflatex = os.environ.get("PDFLATEX", "xelatex")
+
+ if not interactive:
+ self.latexopts = os.environ.get("LATEXOPTS", "-interaction=batchmode -no-shell-escape")
+ else:
+ self.latexopts = os.environ.get("LATEXOPTS", "")
+
+ if not verbose:
+ verbose = bool(os.environ.get("KBUILD_VERBOSE", "") != "")
+
+ # Handle SPHINXOPTS evironment
+ sphinxopts = shlex.split(os.environ.get("SPHINXOPTS", ""))
+
+ # As we handle number of jobs and quiet in separate, we need to pick
+ # it the same way as sphinx-build would pick, so let's use argparse
+ # do to the right argument expansion
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-j', '--jobs', type=int)
+ parser.add_argument('-q', '--quiet', type=int)
+
+ # Other sphinx-build arguments go as-is, so place them
+ # at self.sphinxopts
+ sphinx_args, self.sphinxopts = parser.parse_known_args(sphinxopts)
+ if sphinx_args.quiet == True:
+ self.verbose = False
+
+ if sphinx_args.jobs:
+ self.n_jobs = sphinx_args.jobs
+
+ # Command line arguments was passed, override SPHINXOPTS
+ if verbose is not None:
+ self.verbose = verbose
+
+ self.n_jobs = n_jobs
+
+ # Source tree directory. This needs to be at os.environ, as
+ # Sphinx extensions and media uAPI makefile needs it
+ self.srctree = os.environ.get("srctree")
+ if not self.srctree:
+ self.srctree = "."
+ os.environ["srctree"] = self.srctree
+
+ # Now that we can expand srctree, get other directories as well
+ self.sphinxbuild = os.environ.get("SPHINXBUILD", "sphinx-build")
+ self.kerneldoc = self.get_path(os.environ.get("KERNELDOC",
+ "scripts/kernel-doc.py"))
+ self.obj = os.environ.get("obj", "Documentation")
+ self.builddir = self.get_path(os.path.join(self.obj, "output"),
+ abs_path=True)
+
+ # Media uAPI needs it
+ os.environ["BUILDDIR"] = self.builddir
+
+ # Detect if rust is enabled
+ self.config_rust = self.is_rust_enabled()
+
+ # Get directory locations for LaTeX build toolchain
+ self.pdflatex_cmd = shutil.which(self.pdflatex)
+ self.latexmk_cmd = shutil.which("latexmk")
+
+ self.env = os.environ.copy()
+
+ # If venv parameter is specified, run Sphinx from venv
+ if venv:
+ bin_dir = os.path.join(venv, "bin")
+ if os.path.isfile(os.path.join(bin_dir, "activate")):
+ # "activate" virtual env
+ self.env["PATH"] = bin_dir + ":" + self.env["PATH"]
+ self.env["VIRTUAL_ENV"] = venv
+ if "PYTHONHOME" in self.env:
+ del self.env["PYTHONHOME"]
+ print(f"Setting venv to {venv}")
+ else:
+ sys.exit(f"Venv {venv} not found.")
+
+ def run_sphinx(self, sphinx_build, build_args, *args, **pwargs):
+ """
+ Executes sphinx-build using current python3 command and setting
+ -j parameter if possible to run the build in parallel.
+ """
+
+ with JobserverExec() as jobserver:
+ if jobserver.claim:
+ n_jobs = str(jobserver.claim)
+ else:
+ n_jobs = "auto" # Supported since Sphinx 1.7
+
+ cmd = []
+
+ if self.venv:
+ cmd.append("python")
+ else:
+ cmd.append(sys.executable)
+
+ cmd.append(sphinx_build)
+
+ # if present, SPHINXOPTS or command line --jobs overrides default
+ if self.n_jobs:
+ n_jobs = str(self.n_jobs)
+
+ if n_jobs:
+ cmd += [f"-j{n_jobs}"]
+
+ if not self.verbose:
+ cmd.append("-q")
+
+ cmd += self.sphinxopts
+
+ cmd += build_args
+
+ if self.verbose:
+ print(" ".join(cmd))
+
+ rc = subprocess.call(cmd, *args, **pwargs)
+
+ def handle_html(self, css, output_dir):
+ """
+ Extra steps for HTML and epub output.
+
+ For such targets, we need to ensure that CSS will be properly
+ copied to the output _static directory
+ """
+
+ if not css:
+ return
+
+ css = os.path.expanduser(css)
+ if not css.startswith("/"):
+ css = os.path.join(self.srctree, css)
+
+ static_dir = os.path.join(output_dir, "_static")
+ os.makedirs(static_dir, exist_ok=True)
+
+ try:
+ shutil.copy2(css, static_dir)
+ except (OSError, IOError) as e:
+ print(f"Warning: Failed to copy CSS: {e}", file=sys.stderr)
+
+ def build_pdf_file(self, latex_cmd, from_dir, path):
+ """Builds a single pdf file using latex_cmd"""
+ try:
+ subprocess.run(latex_cmd + [path],
+ cwd=from_dir, check=True)
+
+ return True
+ except subprocess.CalledProcessError:
+ # LaTeX PDF error code is almost useless: it returns
+ # error codes even when build succeeds but has warnings.
+ # So, we'll ignore the results
+ return False
+
+ def pdf_parallel_build(self, tex_suffix, latex_cmd, tex_files, n_jobs):
+ """Build PDF files in parallel if possible"""
+ builds = {}
+ build_failed = False
+ max_len = 0
+ has_tex = False
+
+ # Process files in parallel
+ with futures.ThreadPoolExecutor(max_workers=n_jobs) as executor:
+ jobs = {}
+
+ for from_dir, pdf_dir, entry in tex_files:
+ name = entry.name
+
+ if not name.endswith(tex_suffix):
+ continue
+
+ name = name[:-len(tex_suffix)]
+
+ max_len = max(max_len, len(name))
+
+ has_tex = True
+
+ future = executor.submit(self.build_pdf_file, latex_cmd,
+ from_dir, entry.path)
+ jobs[future] = (from_dir, name, entry.path)
+
+ for future in futures.as_completed(jobs):
+ from_dir, name, path = jobs[future]
+
+ pdf_name = name + ".pdf"
+ pdf_from = os.path.join(from_dir, pdf_name)
+
+ try:
+ success = future.result()
+
+ if success and os.path.exists(pdf_from):
+ pdf_to = os.path.join(pdf_dir, pdf_name)
+
+ os.rename(pdf_from, pdf_to)
+ builds[name] = os.path.relpath(pdf_to, self.builddir)
+ else:
+ builds[name] = "FAILED"
+ build_failed = True
+ except Exception as e:
+ builds[name] = f"FAILED ({str(e)})"
+ build_failed = True
+
+ # Handle case where no .tex files were found
+ if not has_tex:
+ name = "Sphinx LaTeX builder"
+ max_len = max(max_len, len(name))
+ builds[name] = "FAILED (no .tex file was generated)"
+ build_failed = True
+
+ return builds, build_failed, max_len
+
+ def handle_pdf(self, output_dirs):
+ """
+ Extra steps for PDF output.
+
+ As PDF is handled via a LaTeX output, after building the .tex file,
+ a new build is needed to create the PDF output from the latex
+ directory.
+ """
+ builds = {}
+ max_len = 0
+ tex_suffix = ".tex"
+
+ # Get all tex files that will be used for PDF build
+ tex_files = []
+ for from_dir in output_dirs:
+ pdf_dir = os.path.join(from_dir, "../pdf")
+ os.makedirs(pdf_dir, exist_ok=True)
+
+ if self.latexmk_cmd:
+ latex_cmd = [self.latexmk_cmd, f"-{self.pdflatex}"]
+ else:
+ latex_cmd = [self.pdflatex]
+
+ latex_cmd.extend(shlex.split(self.latexopts))
+
+ # Get a list of tex files to process
+ with os.scandir(from_dir) as it:
+ for entry in it:
+ if entry.name.endswith(tex_suffix):
+ tex_files.append((from_dir, pdf_dir, entry))
+
+ # When using make, this won't be used, as the number of jobs comes
+ # from POSIX jobserver. So, this covers the case where build comes
+ # from command line. On such case, serialize by default, except if
+ # the user explicitly sets the number of jobs.
+ n_jobs = 1
+
+ # n_jobs is either an integer or "auto". Only use it if it is a number
+ if self.n_jobs:
+ try:
+ n_jobs = int(self.n_jobs)
+ except ValueError:
+ pass
+
+ # When using make, jobserver.claim is the number of jobs that were
+ # used with "-j" and that aren't used by other make targets
+ with JobserverExec() as jobserver:
+ n_jobs = 1
+
+ # Handle the case when a parameter is passed via command line,
+ # using it as default, if jobserver doesn't claim anything
+ if self.n_jobs:
+ try:
+ n_jobs = int(self.n_jobs)
+ except ValueError:
+ pass
+
+ if jobserver.claim:
+ n_jobs = jobserver.claim
+
+ # Build files in parallel
+ builds, build_failed, max_len = self.pdf_parallel_build(tex_suffix,
+ latex_cmd,
+ tex_files,
+ n_jobs)
+
+ msg = "Summary"
+ msg += "\n" + "=" * len(msg)
+ print()
+ print(msg)
+
+ for pdf_name, pdf_file in builds.items():
+ print(f"{pdf_name:<{max_len}}: {pdf_file}")
+
+ print()
+
+ # return an error if a PDF file is missing
+
+ if build_failed:
+ sys.exit(f"PDF build failed: not all PDF files were created.")
+ else:
+ print("All PDF files were built.")
+
+ def handle_info(self, output_dirs):
+ """
+ Extra steps for Info output.
+
+ For texinfo generation, an additional make is needed from the
+ texinfo directory.
+ """
+
+ for output_dir in output_dirs:
+ try:
+ subprocess.run(["make", "info"], cwd=output_dir, check=True)
+ except subprocess.CalledProcessError as e:
+ sys.exit(f"Error generating info docs: {e}")
+
+ def cleandocs(self, builder):
+
+ shutil.rmtree(self.builddir, ignore_errors=True)
+
+ def build(self, target, sphinxdirs=None, conf="conf.py",
+ theme=None, css=None, paper=None):
+ """
+ Build documentation using Sphinx. This is the core function of this
+ module. It prepares all arguments required by sphinx-build.
+ """
+
+ builder = TARGETS[target]["builder"]
+ out_dir = TARGETS[target].get("out_dir", "")
+
+ # Cleandocs doesn't require sphinx-build
+ if target == "cleandocs":
+ self.cleandocs(builder)
+ return
+
+ # Other targets require sphinx-build
+ sphinxbuild = shutil.which(self.sphinxbuild, path=self.env["PATH"])
+ if not sphinxbuild:
+ sys.exit(f"Error: {self.sphinxbuild} not found in PATH.\n")
+
+ if builder == "latex":
+ if not self.pdflatex_cmd and not self.latexmk_cmd:
+ sys.exit("Error: pdflatex or latexmk required for PDF generation")
+
+ docs_dir = os.path.abspath(os.path.join(self.srctree, "Documentation"))
+
+ # Prepare base arguments for Sphinx build
+ kerneldoc = self.kerneldoc
+ if kerneldoc.startswith(self.srctree):
+ kerneldoc = os.path.relpath(kerneldoc, self.srctree)
+
+ # Prepare common Sphinx options
+ args = [
+ "-b", builder,
+ "-c", docs_dir,
+ ]
+
+ if builder == "latex":
+ if not paper:
+ paper = PAPER[1]
+
+ args.extend(["-D", f"latex_elements.papersize={paper}paper"])
+
+ if self.config_rust:
+ args.extend(["-t", "rustdoc"])
+
+ if conf:
+ self.env["SPHINX_CONF"] = self.get_path(conf, abs_path=True)
+
+ if not sphinxdirs:
+ sphinxdirs = os.environ.get("SPHINXDIRS", ".")
+
+ # The sphinx-build tool has a bug: internally, it tries to set
+ # locale with locale.setlocale(locale.LC_ALL, ''). This causes a
+ # crash if language is not set. Detect and fix it.
+ try:
+ locale.setlocale(locale.LC_ALL, '')
+ except Exception:
+ self.env["LC_ALL"] = "C"
+ self.env["LANG"] = "C"
+
+ # sphinxdirs can be a list or a whitespace-separated string
+ sphinxdirs_list = []
+ for sphinxdir in sphinxdirs:
+ if isinstance(sphinxdir, list):
+ sphinxdirs_list += sphinxdir
+ else:
+ for name in sphinxdir.split(" "):
+ sphinxdirs_list.append(name)
+
+ # Build each directory
+ output_dirs = []
+ for sphinxdir in sphinxdirs_list:
+ src_dir = os.path.join(docs_dir, sphinxdir)
+ doctree_dir = os.path.join(self.builddir, ".doctrees")
+ output_dir = os.path.join(self.builddir, sphinxdir, out_dir)
+
+ # Make directory names canonical
+ src_dir = os.path.normpath(src_dir)
+ doctree_dir = os.path.normpath(doctree_dir)
+ output_dir = os.path.normpath(output_dir)
+
+ os.makedirs(doctree_dir, exist_ok=True)
+ os.makedirs(output_dir, exist_ok=True)
+
+ output_dirs.append(output_dir)
+
+ build_args = args + [
+ "-d", doctree_dir,
+ "-D", f"kerneldoc_bin={kerneldoc}",
+ "-D", f"version={self.kernelversion}",
+ "-D", f"release={self.kernelrelease}",
+ "-D", f"kerneldoc_srctree={self.srctree}",
+ src_dir,
+ output_dir,
+ ]
+
+ # Execute sphinx-build
+ try:
+ self.run_sphinx(sphinxbuild, build_args, env=self.env)
+ except Exception as e:
+ sys.exit(f"Build failed: {e}")
+
+ # Ensure that html/epub will have needed static files
+ if target in ["htmldocs", "epubdocs"]:
+ self.handle_html(css, output_dir)
+
+ # PDF and Info require a second build step
+ if target == "pdfdocs":
+ self.handle_pdf(output_dirs)
+ elif target == "infodocs":
+ self.handle_info(output_dirs)
+
+ @staticmethod
+ def get_python_version(cmd):
+ """
+ Get python version from a Python binary. As we need to detect if
+ are out there newer python binaries, we can't rely on sys.release here.
+ """
+
+ result = subprocess.run([cmd, "--version"], check=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ universal_newlines=True)
+ version = result.stdout.strip()
+
+ match = re.search(r"(\d+\.\d+\.\d+)", version)
+ if match:
+ return parse_version(match.group(1))
+
+ print(f"Can't parse version {version}")
+ return (0, 0, 0)
+
+ @staticmethod
+ def find_python():
+ """
+ Detect if are out there any python 3.xy version newer than the
+ current one.
+
+ Note: this routine is limited to up to 2 digits for python3. We
+ may need to update it one day, hopefully on a distant future.
+ """
+ patterns = [
+ "python3.[0-9]",
+ "python3.[0-9][0-9]",
+ ]
+
+ # Seek for a python binary newer than MIN_PYTHON_VERSION
+ for path in os.getenv("PATH", "").split(":"):
+ for pattern in patterns:
+ for cmd in glob(os.path.join(path, pattern)):
+ if os.path.isfile(cmd) and os.access(cmd, os.X_OK):
+ version = SphinxBuilder.get_python_version(cmd)
+ if version >= MIN_PYTHON_VERSION:
+ return cmd
+
+ return None
+
+ @staticmethod
+ def check_python():
+ """
+ Check if the current python binary satisfies our minimal requirement
+ for Sphinx build. If not, re-run with a newer version if found.
+ """
+ cur_ver = sys.version_info[:3]
+ if cur_ver >= MIN_PYTHON_VERSION:
+ return
+
+ python_ver = ver_str(cur_ver)
+
+ new_python_cmd = SphinxBuilder.find_python()
+ if not new_python_cmd:
+ sys.exit(f"Python version {python_ver} is not supported anymore.")
+
+ # Restart script using the newer version
+ script_path = os.path.abspath(sys.argv[0])
+ args = [new_python_cmd, script_path] + sys.argv[1:]
+
+ print(f"Python {python_ver} not supported. Changing to {new_python_cmd}")
+
+ try:
+ os.execv(new_python_cmd, args)
+ except OSError as e:
+ sys.exit(f"Failed to restart with {new_python_cmd}: {e}")
+
+def jobs_type(value):
+ """
+ Handle valid values for -j. Accepts Sphinx "-jauto", plus a number
+ equal or bigger than one.
+ """
+ if value is None:
+ return None
+
+ if value.lower() == 'auto':
+ return value.lower()
+
+ try:
+ if int(value) >= 1:
+ return value
+
+ raise argparse.ArgumentTypeError(f"Minimum jobs is 1, got {value}")
+ except ValueError:
+ raise argparse.ArgumentTypeError(f"Must be 'auto' or positive integer, got {value}")
+
+def main():
+ """
+ Main function. The only mandatory argument is the target. If not
+ specified, the other arguments will use default values if not
+ specified at os.environ.
+ """
+ parser = argparse.ArgumentParser(description="Kernel documentation builder")
+
+ parser.add_argument("target", choices=list(TARGETS.keys()),
+ help="Documentation target to build")
+ parser.add_argument("--sphinxdirs", nargs="+",
+ help="Specific directories to build")
+ parser.add_argument("--conf", default="conf.py",
+ help="Sphinx configuration file")
+
+ parser.add_argument("--theme", help="Sphinx theme to use")
+
+ parser.add_argument("--css", help="Custom CSS file for HTML/EPUB")
+
+ parser.add_argument("--paper", choices=PAPER, default=PAPER[0],
+ help="Paper size for LaTeX/PDF output")
+
+ parser.add_argument("-v", "--verbose", action='store_true',
+ help="place build in verbose mode")
+
+ parser.add_argument('-j', '--jobs', type=jobs_type,
+ help="Sets number of jobs to use with sphinx-build")
+
+ parser.add_argument('-i', '--interactive', action='store_true',
+ help="Change latex default to run in interactive mode")
+
+ parser.add_argument("-V", "--venv", nargs='?', const=f'{VENV_DEFAULT}',
+ default=None,
+ help=f'If used, run Sphinx from a venv dir (default dir: {VENV_DEFAULT})')
+
+ args = parser.parse_args()
+
+ SphinxBuilder.check_python()
+
+ builder = SphinxBuilder(venv=args.venv, verbose=args.verbose,
+ n_jobs=args.jobs, interactive=args.interactive)
+
+ builder.build(args.target, sphinxdirs=args.sphinxdirs, conf=args.conf,
+ theme=args.theme, css=args.css, paper=args.paper)
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index 3f8d6925e896..954ed3dc0645 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -1,1056 +1,1621 @@
-#!/usr/bin/env perl
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
-use strict;
-
-# Copyright (c) 2017-2020 Mauro Carvalho Chehab <mchehab@kernel.org>
-#
-
-my $prefix = "./";
-$prefix = "$ENV{'srctree'}/" if ($ENV{'srctree'});
-
-my $conf = $prefix . "Documentation/conf.py";
-my $requirement_file = $prefix . "Documentation/sphinx/requirements.txt";
-my $virtenv_prefix = "sphinx_";
-
-#
-# Static vars
-#
-
-my %missing;
-my $system_release;
-my $need = 0;
-my $optional = 0;
-my $need_symlink = 0;
-my $need_sphinx = 0;
-my $need_pip = 0;
-my $need_virtualenv = 0;
-my $rec_sphinx_upgrade = 0;
-my $verbose_warn_install = 1;
-my $install = "";
-my $virtenv_dir = "";
-my $python_cmd = "";
-my $activate_cmd;
-my $min_version;
-my $cur_version;
-my $rec_version = "3.4.3";
-my $latest_avail_ver;
-
-#
-# Command line arguments
-#
-
-my $pdf = 1;
-my $virtualenv = 1;
-my $version_check = 0;
-
-#
-# List of required texlive packages on Fedora and OpenSuse
-#
-
-my %texlive = (
- 'amsfonts.sty' => 'texlive-amsfonts',
- 'amsmath.sty' => 'texlive-amsmath',
- 'amssymb.sty' => 'texlive-amsfonts',
- 'amsthm.sty' => 'texlive-amscls',
- 'anyfontsize.sty' => 'texlive-anyfontsize',
- 'atbegshi.sty' => 'texlive-oberdiek',
- 'bm.sty' => 'texlive-tools',
- 'capt-of.sty' => 'texlive-capt-of',
- 'cmap.sty' => 'texlive-cmap',
- 'ecrm1000.tfm' => 'texlive-ec',
- 'eqparbox.sty' => 'texlive-eqparbox',
- 'eu1enc.def' => 'texlive-euenc',
- 'fancybox.sty' => 'texlive-fancybox',
- 'fancyvrb.sty' => 'texlive-fancyvrb',
- 'float.sty' => 'texlive-float',
- 'fncychap.sty' => 'texlive-fncychap',
- 'footnote.sty' => 'texlive-mdwtools',
- 'framed.sty' => 'texlive-framed',
- 'luatex85.sty' => 'texlive-luatex85',
- 'multirow.sty' => 'texlive-multirow',
- 'needspace.sty' => 'texlive-needspace',
- 'palatino.sty' => 'texlive-psnfss',
- 'parskip.sty' => 'texlive-parskip',
- 'polyglossia.sty' => 'texlive-polyglossia',
- 'tabulary.sty' => 'texlive-tabulary',
- 'threeparttable.sty' => 'texlive-threeparttable',
- 'titlesec.sty' => 'texlive-titlesec',
- 'ucs.sty' => 'texlive-ucs',
- 'upquote.sty' => 'texlive-upquote',
- 'wrapfig.sty' => 'texlive-wrapfig',
- 'ctexhook.sty' => 'texlive-ctex',
-);
-
-#
-# Subroutines that checks if a feature exists
-#
-
-sub check_missing(%)
-{
- my %map = %{$_[0]};
-
- foreach my $prog (sort keys %missing) {
- my $is_optional = $missing{$prog};
-
- # At least on some LTS distros like CentOS 7, texlive doesn't
- # provide all packages we need. When such distros are
- # detected, we have to disable PDF output.
- #
- # So, we need to ignore the packages that distros would
- # need for LaTeX to work
- if ($is_optional == 2 && !$pdf) {
- $optional--;
- next;
- }
-
- if ($verbose_warn_install) {
- if ($is_optional) {
- print "Warning: better to also install \"$prog\".\n";
- } else {
- print "ERROR: please install \"$prog\", otherwise, build won't work.\n";
- }
- }
- if (defined($map{$prog})) {
- $install .= " " . $map{$prog};
- } else {
- $install .= " " . $prog;
- }
- }
-
- $install =~ s/^\s//;
-}
-
-sub add_package($$)
-{
- my $package = shift;
- my $is_optional = shift;
-
- $missing{$package} = $is_optional;
- if ($is_optional) {
- $optional++;
- } else {
- $need++;
- }
-}
-
-sub check_missing_file($$$)
-{
- my $files = shift;
- my $package = shift;
- my $is_optional = shift;
-
- for (@$files) {
- return if(-e $_);
- }
-
- add_package($package, $is_optional);
-}
-
-sub findprog($)
-{
- foreach(split(/:/, $ENV{PATH})) {
- return "$_/$_[0]" if(-x "$_/$_[0]");
- }
-}
-
-sub find_python_no_venv()
-{
- my $prog = shift;
-
- my $cur_dir = qx(pwd);
- $cur_dir =~ s/\s+$//;
-
- foreach my $dir (split(/:/, $ENV{PATH})) {
- next if ($dir =~ m,($cur_dir)/sphinx,);
- return "$dir/python3" if(-x "$dir/python3");
- }
- foreach my $dir (split(/:/, $ENV{PATH})) {
- next if ($dir =~ m,($cur_dir)/sphinx,);
- return "$dir/python" if(-x "$dir/python");
- }
- return "python";
-}
-
-sub check_program($$)
-{
- my $prog = shift;
- my $is_optional = shift;
-
- return $prog if findprog($prog);
-
- add_package($prog, $is_optional);
-}
-
-sub check_perl_module($$)
-{
- my $prog = shift;
- my $is_optional = shift;
-
- my $err = system("perl -M$prog -e 1 2>/dev/null /dev/null");
- return if ($err == 0);
-
- add_package($prog, $is_optional);
-}
-
-sub check_python_module($$)
-{
- my $prog = shift;
- my $is_optional = shift;
-
- return if (!$python_cmd);
-
- my $err = system("$python_cmd -c 'import $prog' 2>/dev/null /dev/null");
- return if ($err == 0);
-
- add_package($prog, $is_optional);
-}
-
-sub check_rpm_missing($$)
-{
- my @pkgs = @{$_[0]};
- my $is_optional = $_[1];
-
- foreach my $prog(@pkgs) {
- my $err = system("rpm -q '$prog' 2>/dev/null >/dev/null");
- add_package($prog, $is_optional) if ($err);
- }
-}
-
-sub check_pacman_missing($$)
-{
- my @pkgs = @{$_[0]};
- my $is_optional = $_[1];
-
- foreach my $prog(@pkgs) {
- my $err = system("pacman -Q '$prog' 2>/dev/null >/dev/null");
- add_package($prog, $is_optional) if ($err);
- }
-}
-
-sub check_missing_tex($)
-{
- my $is_optional = shift;
- my $kpsewhich = findprog("kpsewhich");
-
- foreach my $prog(keys %texlive) {
- my $package = $texlive{$prog};
- if (!$kpsewhich) {
- add_package($package, $is_optional);
- next;
- }
- my $file = qx($kpsewhich $prog);
- add_package($package, $is_optional) if ($file =~ /^\s*$/);
- }
-}
-
-sub get_sphinx_fname()
-{
- if ($ENV{'SPHINXBUILD'}) {
- return $ENV{'SPHINXBUILD'};
- }
-
- my $fname = "sphinx-build";
- return $fname if findprog($fname);
-
- $fname = "sphinx-build-3";
- if (findprog($fname)) {
- $need_symlink = 1;
- return $fname;
- }
-
- return "";
-}
-
-sub get_sphinx_version($)
-{
- my $cmd = shift;
- my $ver;
-
- open IN, "$cmd --version 2>&1 |";
- while (<IN>) {
- if (m/^\s*sphinx-build\s+([\d\.]+)((\+\/[\da-f]+)|(b\d+))?$/) {
- $ver=$1;
- last;
- }
- # Sphinx 1.2.x uses a different format
- if (m/^\s*Sphinx.*\s+([\d\.]+)$/) {
- $ver=$1;
- last;
- }
- }
- close IN;
- return $ver;
-}
-
-sub check_sphinx()
-{
- open IN, $conf or die "Can't open $conf";
- while (<IN>) {
- if (m/^\s*needs_sphinx\s*=\s*[\'\"]([\d\.]+)[\'\"]/) {
- $min_version=$1;
- last;
- }
- }
- close IN;
-
- die "Can't get needs_sphinx version from $conf" if (!$min_version);
-
- $virtenv_dir = $virtenv_prefix . "latest";
-
- my $sphinx = get_sphinx_fname();
- if ($sphinx eq "") {
- $need_sphinx = 1;
- return;
- }
-
- $cur_version = get_sphinx_version($sphinx);
- die "$sphinx didn't return its version" if (!$cur_version);
-
- if ($cur_version lt $min_version) {
- printf "ERROR: Sphinx version is %s. It should be >= %s\n",
- $cur_version, $min_version;
- $need_sphinx = 1;
- return;
- }
-
- return if ($cur_version lt $rec_version);
-
- # On version check mode, just assume Sphinx has all mandatory deps
- exit (0) if ($version_check);
-}
-
-#
-# Ancillary subroutines
-#
-
-sub catcheck($)
-{
- my $res = "";
- $res = qx(cat $_[0]) if (-r $_[0]);
- return $res;
-}
-
-sub which($)
-{
- my $file = shift;
- my @path = split ":", $ENV{PATH};
-
- foreach my $dir(@path) {
- my $name = $dir.'/'.$file;
- return $name if (-x $name );
- }
- return undef;
-}
-
-#
-# Subroutines that check distro-specific hints
-#
-
-sub give_debian_hints()
-{
- my %map = (
- "python-sphinx" => "python3-sphinx",
- "yaml" => "python3-yaml",
- "ensurepip" => "python3-venv",
- "virtualenv" => "virtualenv",
- "dot" => "graphviz",
- "convert" => "imagemagick",
- "Pod::Usage" => "perl-modules",
- "xelatex" => "texlive-xetex",
- "rsvg-convert" => "librsvg2-bin",
- );
-
- if ($pdf) {
- check_missing_file(["/usr/share/texlive/texmf-dist/tex/latex/ctex/ctexhook.sty"],
- "texlive-lang-chinese", 2);
-
- check_missing_file(["/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"],
- "fonts-dejavu", 2);
-
- check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
- "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc",
- "/usr/share/fonts/opentype/noto/NotoSerifCJK-Regular.ttc"],
- "fonts-noto-cjk", 2);
- }
-
- check_program("dvipng", 2) if ($pdf);
- check_missing(\%map);
-
- return if (!$need && !$optional);
- printf("You should run:\n") if ($verbose_warn_install);
- printf("\n\tsudo apt-get install $install\n");
-}
-
-sub give_redhat_hints()
-{
- my %map = (
- "python-sphinx" => "python3-sphinx",
- "yaml" => "python3-pyyaml",
- "virtualenv" => "python3-virtualenv",
- "dot" => "graphviz",
- "convert" => "ImageMagick",
- "Pod::Usage" => "perl-Pod-Usage",
- "xelatex" => "texlive-xetex-bin",
- "rsvg-convert" => "librsvg2-tools",
- );
-
- my @fedora26_opt_pkgs = (
- "graphviz-gd", # Fedora 26: needed for PDF support
- );
-
- my @fedora_tex_pkgs = (
- "texlive-collection-fontsrecommended",
- "texlive-collection-latex",
- "texlive-xecjk",
- "dejavu-sans-fonts",
- "dejavu-serif-fonts",
- "dejavu-sans-mono-fonts",
- );
-
- #
- # Checks valid for RHEL/CentOS version 7.x.
- #
- my $old = 0;
- my $rel;
- my $noto_sans_redhat = "google-noto-sans-cjk-ttc-fonts";
- $rel = $1 if ($system_release =~ /(release|Linux)\s+(\d+)/);
-
- if (!($system_release =~ /Fedora/)) {
- $map{"virtualenv"} = "python-virtualenv";
-
- if ($rel && $rel < 8) {
- $old = 1;
- $pdf = 0;
-
- printf("Note: texlive packages on RHEL/CENTOS <= 7 are incomplete. Can't support PDF output\n");
- printf("If you want to build PDF, please read:\n");
- printf("\thttps://www.systutorials.com/241660/how-to-install-tex-live-on-centos-7-linux/\n");
- }
- } else {
- if ($rel && $rel < 26) {
- $old = 1;
- }
- if ($rel && $rel >= 38) {
- $noto_sans_redhat = "google-noto-sans-cjk-fonts";
- }
- }
- if (!$rel) {
- printf("Couldn't identify release number\n");
- $old = 1;
- $pdf = 0;
- }
-
- if ($pdf) {
- check_missing_file(["/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
- "/usr/share/fonts/google-noto-sans-cjk-fonts/NotoSansCJK-Regular.ttc"],
- $noto_sans_redhat, 2);
- }
-
- check_rpm_missing(\@fedora26_opt_pkgs, 2) if ($pdf && !$old);
- check_rpm_missing(\@fedora_tex_pkgs, 2) if ($pdf);
- check_missing_tex(2) if ($pdf);
- check_missing(\%map);
-
- return if (!$need && !$optional);
-
- if (!$old) {
- # dnf, for Fedora 18+
- printf("You should run:\n") if ($verbose_warn_install);
- printf("\n\tsudo dnf install -y $install\n");
- } else {
- # yum, for RHEL (and clones) or Fedora version < 18
- printf("You should run:\n") if ($verbose_warn_install);
- printf("\n\tsudo yum install -y $install\n");
- }
-}
-
-sub give_opensuse_hints()
-{
- my %map = (
- "python-sphinx" => "python3-sphinx",
- "yaml" => "python3-pyyaml",
- "virtualenv" => "python3-virtualenv",
- "dot" => "graphviz",
- "convert" => "ImageMagick",
- "Pod::Usage" => "perl-Pod-Usage",
- "xelatex" => "texlive-xetex-bin",
- );
-
- # On Tumbleweed, this package is also named rsvg-convert
- $map{"rsvg-convert"} = "rsvg-view" if (!($system_release =~ /Tumbleweed/));
-
- my @suse_tex_pkgs = (
- "texlive-babel-english",
- "texlive-caption",
- "texlive-colortbl",
- "texlive-courier",
- "texlive-dvips",
- "texlive-helvetic",
- "texlive-makeindex",
- "texlive-metafont",
- "texlive-metapost",
- "texlive-palatino",
- "texlive-preview",
- "texlive-times",
- "texlive-zapfchan",
- "texlive-zapfding",
- );
-
- $map{"latexmk"} = "texlive-latexmk-bin";
-
- # FIXME: add support for installing CJK fonts
- #
- # I tried hard, but was unable to find a way to install
- # "Noto Sans CJK SC" on openSUSE
-
- check_rpm_missing(\@suse_tex_pkgs, 2) if ($pdf);
- check_missing_tex(2) if ($pdf);
- check_missing(\%map);
-
- return if (!$need && !$optional);
- printf("You should run:\n") if ($verbose_warn_install);
- printf("\n\tsudo zypper install --no-recommends $install\n");
-}
-
-sub give_mageia_hints()
-{
- my %map = (
- "python-sphinx" => "python3-sphinx",
- "yaml" => "python3-yaml",
- "virtualenv" => "python3-virtualenv",
- "dot" => "graphviz",
- "convert" => "ImageMagick",
- "Pod::Usage" => "perl-Pod-Usage",
- "xelatex" => "texlive",
- "rsvg-convert" => "librsvg2",
- );
-
- my @tex_pkgs = (
- "texlive-fontsextra",
- );
-
- $map{"latexmk"} = "texlive-collection-basic";
-
- my $packager_cmd;
- my $noto_sans;
- if ($system_release =~ /OpenMandriva/) {
- $packager_cmd = "dnf install";
- $noto_sans = "noto-sans-cjk-fonts";
- @tex_pkgs = ( "texlive-collection-fontsextra" );
- } else {
- $packager_cmd = "urpmi";
- $noto_sans = "google-noto-sans-cjk-ttc-fonts";
- }
-
-
- if ($pdf) {
- check_missing_file(["/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
- "/usr/share/fonts/TTF/NotoSans-Regular.ttf"],
- $noto_sans, 2);
- }
-
- check_rpm_missing(\@tex_pkgs, 2) if ($pdf);
- check_missing(\%map);
-
- return if (!$need && !$optional);
- printf("You should run:\n") if ($verbose_warn_install);
- printf("\n\tsudo $packager_cmd $install\n");
-}
-
-sub give_arch_linux_hints()
-{
- my %map = (
- "yaml" => "python-yaml",
- "virtualenv" => "python-virtualenv",
- "dot" => "graphviz",
- "convert" => "imagemagick",
- "xelatex" => "texlive-xetex",
- "latexmk" => "texlive-core",
- "rsvg-convert" => "extra/librsvg",
- );
-
- my @archlinux_tex_pkgs = (
- "texlive-core",
- "texlive-latexextra",
- "ttf-dejavu",
- );
- check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf);
-
- if ($pdf) {
- check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc"],
- "noto-fonts-cjk", 2);
- }
-
- check_missing(\%map);
-
- return if (!$need && !$optional);
- printf("You should run:\n") if ($verbose_warn_install);
- printf("\n\tsudo pacman -S $install\n");
-}
-
-sub give_gentoo_hints()
-{
- my %map = (
- "yaml" => "dev-python/pyyaml",
- "virtualenv" => "dev-python/virtualenv",
- "dot" => "media-gfx/graphviz",
- "convert" => "media-gfx/imagemagick",
- "xelatex" => "dev-texlive/texlive-xetex media-fonts/dejavu",
- "rsvg-convert" => "gnome-base/librsvg",
- );
-
- check_missing_file(["/usr/share/fonts/dejavu/DejaVuSans.ttf"],
- "media-fonts/dejavu", 2) if ($pdf);
-
- if ($pdf) {
- check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf",
- "/usr/share/fonts/noto-cjk/NotoSerifCJK-Regular.ttc"],
- "media-fonts/noto-cjk", 2);
- }
-
- check_missing(\%map);
-
- return if (!$need && !$optional);
-
- printf("You should run:\n") if ($verbose_warn_install);
- printf("\n");
-
- my $imagemagick = "media-gfx/imagemagick svg png";
- my $cairo = "media-gfx/graphviz cairo pdf";
- my $portage_imagemagick = "/etc/portage/package.use/imagemagick";
- my $portage_cairo = "/etc/portage/package.use/graphviz";
-
- if (qx(grep imagemagick $portage_imagemagick 2>/dev/null) eq "") {
- printf("\tsudo su -c 'echo \"$imagemagick\" > $portage_imagemagick'\n")
- }
- if (qx(grep graphviz $portage_cairo 2>/dev/null) eq "") {
- printf("\tsudo su -c 'echo \"$cairo\" > $portage_cairo'\n");
- }
-
- printf("\tsudo emerge --ask $install\n");
-
-}
-
-sub check_distros()
-{
- # Distro-specific hints
- if ($system_release =~ /Red Hat Enterprise Linux/) {
- give_redhat_hints;
- return;
- }
- if ($system_release =~ /CentOS/) {
- give_redhat_hints;
- return;
- }
- if ($system_release =~ /Scientific Linux/) {
- give_redhat_hints;
- return;
- }
- if ($system_release =~ /Oracle Linux Server/) {
- give_redhat_hints;
- return;
- }
- if ($system_release =~ /Fedora/) {
- give_redhat_hints;
- return;
- }
- if ($system_release =~ /Ubuntu/) {
- give_debian_hints;
- return;
- }
- if ($system_release =~ /Debian/) {
- give_debian_hints;
- return;
- }
- if ($system_release =~ /openSUSE/) {
- give_opensuse_hints;
- return;
- }
- if ($system_release =~ /Mageia/) {
- give_mageia_hints;
- return;
- }
- if ($system_release =~ /OpenMandriva/) {
- give_mageia_hints;
- return;
- }
- if ($system_release =~ /Arch Linux/) {
- give_arch_linux_hints;
- return;
- }
- if ($system_release =~ /Gentoo/) {
- give_gentoo_hints;
- return;
- }
-
- #
- # Fall-back to generic hint code for other distros
- # That's far from ideal, specially for LaTeX dependencies.
- #
- my %map = (
- "sphinx-build" => "sphinx"
- );
- check_missing_tex(2) if ($pdf);
- check_missing(\%map);
- print "I don't know distro $system_release.\n";
- print "So, I can't provide you a hint with the install procedure.\n";
- print "There are likely missing dependencies.\n";
-}
-
-#
-# Common dependencies
+# Copyright (c) 2017-2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
#
+# pylint: disable=C0103,C0114,C0115,C0116,C0301,C0302
+# pylint: disable=R0902,R0904,R0911,R0912,R0914,R0915,R1705,R1710,E1121
+
+# Note: this script requires at least Python 3.6 to run.
+# Don't add changes not compatible with it, it is meant to report
+# incompatible python versions.
+
+"""
+Dependency checker for Sphinx documentation Kernel build.
+
+This module provides tools to check for all required dependencies needed to
+build documentation using Sphinx, including system packages, Python modules
+and LaTeX packages for PDF generation.
+
+It detect packages for a subset of Linux distributions used by Kernel
+maintainers, showing hints and missing dependencies.
+
+The main class SphinxDependencyChecker handles the dependency checking logic
+and provides recommendations for installing missing packages. It supports both
+system package installations and Python virtual environments. By default,
+system pacage install is recommended.
+"""
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+from glob import glob
+
+
+def parse_version(version):
+ """Convert a major.minor.patch version into a tuple"""
+ return tuple(int(x) for x in version.split("."))
+
+
+def ver_str(version):
+ """Returns a version tuple as major.minor.patch"""
+
+ return ".".join([str(x) for x in version])
+
+
+RECOMMENDED_VERSION = parse_version("3.4.3")
+MIN_PYTHON_VERSION = parse_version("3.7")
+
+
+class DepManager:
+ """
+ Manage package dependencies. There are three types of dependencies:
+
+ - System: dependencies required for docs build;
+ - Python: python dependencies for a native distro Sphinx install;
+ - PDF: dependencies needed by PDF builds.
+
+ Each dependency can be mandatory or optional. Not installing an optional
+ dependency won't break the build, but will cause degradation at the
+ docs output.
+ """
+
+ # Internal types of dependencies. Don't use them outside DepManager class.
+ _SYS_TYPE = 0
+ _PHY_TYPE = 1
+ _PDF_TYPE = 2
+
+ # Dependencies visible outside the class.
+ # The keys are tuple with: (type, is_mandatory flag).
+ #
+ # Currently we're not using all optional dep types. Yet, we'll keep all
+ # possible combinations here. They're not many, and that makes easier
+ # if later needed and for the name() method below
+
+ SYSTEM_MANDATORY = (_SYS_TYPE, True)
+ PYTHON_MANDATORY = (_PHY_TYPE, True)
+ PDF_MANDATORY = (_PDF_TYPE, True)
+
+ SYSTEM_OPTIONAL = (_SYS_TYPE, False)
+ PYTHON_OPTIONAL = (_PHY_TYPE, False)
+ PDF_OPTIONAL = (_PDF_TYPE, True)
+
+ def __init__(self, pdf):
+ """
+ Initialize internal vars:
+
+ - missing: missing dependencies list, containing a distro-independent
+ name for a missing dependency and its type.
+ - missing_pkg: ancillary dict containing missing dependencies in
+ distro namespace, organized by type.
+ - need: total number of needed dependencies. Never cleaned.
+ - optional: total number of optional dependencies. Never cleaned.
+ - pdf: Is PDF support enabled?
+ """
+ self.missing = {}
+ self.missing_pkg = {}
+ self.need = 0
+ self.optional = 0
+ self.pdf = pdf
+
+ @staticmethod
+ def name(dtype):
+ """
+ Ancillary routine to output a warn/error message reporting
+ missing dependencies.
+ """
+ if dtype[0] == DepManager._SYS_TYPE:
+ msg = "build"
+ elif dtype[0] == DepManager._PHY_TYPE:
+ msg = "Python"
+ else:
+ msg = "PDF"
+
+ if dtype[1]:
+ return f"ERROR: {msg} mandatory deps missing"
+ else:
+ return f"Warning: {msg} optional deps missing"
+
+ @staticmethod
+ def is_optional(dtype):
+ """Ancillary routine to report if a dependency is optional"""
+ return not dtype[1]
+
+ @staticmethod
+ def is_pdf(dtype):
+ """Ancillary routine to report if a dependency is for PDF generation"""
+ if dtype[0] == DepManager._PDF_TYPE:
+ return True
+
+ return False
+
+ def add_package(self, package, dtype):
+ """
+ Add a package at the self.missing() dictionary.
+ Doesn't update missing_pkg.
+ """
+ is_optional = DepManager.is_optional(dtype)
+ self.missing[package] = dtype
+ if is_optional:
+ self.optional += 1
+ else:
+ self.need += 1
+
+ def del_package(self, package):
+ """
+ Remove a package at the self.missing() dictionary.
+ Doesn't update missing_pkg.
+ """
+ if package in self.missing:
+ del self.missing[package]
+
+ def clear_deps(self):
+ """
+ Clear dependencies without changing needed/optional.
+
+ This is an ackward way to have a separate section to recommend
+ a package after system main dependencies.
+
+ TODO: rework the logic to prevent needing it.
+ """
+
+ self.missing = {}
+ self.missing_pkg = {}
+
+ def check_missing(self, progs):
+ """
+ Update self.missing_pkg, using progs dict to convert from the
+ agnostic package name to distro-specific one.
+
+ Returns an string with the packages to be installed, sorted and
+ with eventual duplicates removed.
+ """
+
+ self.missing_pkg = {}
+
+ for prog, dtype in sorted(self.missing.items()):
+ # At least on some LTS distros like CentOS 7, texlive doesn't
+ # provide all packages we need. When such distros are
+ # detected, we have to disable PDF output.
+ #
+ # So, we need to ignore the packages that distros would
+ # need for LaTeX to work
+ if DepManager.is_pdf(dtype) and not self.pdf:
+ self.optional -= 1
+ continue
+
+ if not dtype in self.missing_pkg:
+ self.missing_pkg[dtype] = []
+
+ self.missing_pkg[dtype].append(progs.get(prog, prog))
+
+ install = []
+ for dtype, pkgs in self.missing_pkg.items():
+ install += pkgs
+
+ return " ".join(sorted(set(install)))
+
+ def warn_install(self):
+ """
+ Emit warnings/errors related to missing packages.
+ """
+
+ output_msg = ""
+
+ for dtype in sorted(self.missing_pkg.keys()):
+ progs = " ".join(sorted(set(self.missing_pkg[dtype])))
+
+ try:
+ name = DepManager.name(dtype)
+ output_msg += f'{name}:\t{progs}\n'
+ except KeyError:
+ raise KeyError(f"ERROR!!!: invalid dtype for {progs}: {dtype}")
+
+ if output_msg:
+ print(f"\n{output_msg}")
+
+class AncillaryMethods:
+ """
+ Ancillary methods that checks for missing dependencies for different
+ types of types, like binaries, python modules, rpm deps, etc.
+ """
+
+ @staticmethod
+ def which(prog):
+ """
+ Our own implementation of which(). We could instead use
+ shutil.which(), but this function is simple enough.
+ Probably faster to use this implementation than to import shutil.
+ """
+ for path in os.environ.get("PATH", "").split(":"):
+ full_path = os.path.join(path, prog)
+ if os.access(full_path, os.X_OK):
+ return full_path
+
+ return None
+
+ @staticmethod
+ def get_python_version(cmd):
+ """
+ Get python version from a Python binary. As we need to detect if
+ are out there newer python binaries, we can't rely on sys.release here.
+ """
+
+ result = SphinxDependencyChecker.run([cmd, "--version"],
+ capture_output=True, text=True)
+ version = result.stdout.strip()
+
+ match = re.search(r"(\d+\.\d+\.\d+)", version)
+ if match:
+ return parse_version(match.group(1))
+
+ print(f"Can't parse version {version}")
+ return (0, 0, 0)
+
+ @staticmethod
+ def find_python():
+ """
+ Detect if are out there any python 3.xy version newer than the
+ current one.
+
+ Note: this routine is limited to up to 2 digits for python3. We
+ may need to update it one day, hopefully on a distant future.
+ """
+ patterns = [
+ "python3.[0-9]",
+ "python3.[0-9][0-9]",
+ ]
+
+ # Seek for a python binary newer than MIN_PYTHON_VERSION
+ for path in os.getenv("PATH", "").split(":"):
+ for pattern in patterns:
+ for cmd in glob(os.path.join(path, pattern)):
+ if os.path.isfile(cmd) and os.access(cmd, os.X_OK):
+ version = SphinxDependencyChecker.get_python_version(cmd)
+ if version >= MIN_PYTHON_VERSION:
+ return cmd
+
+ @staticmethod
+ def check_python():
+ """
+ Check if the current python binary satisfies our minimal requirement
+ for Sphinx build. If not, re-run with a newer version if found.
+ """
+ cur_ver = sys.version_info[:3]
+ if cur_ver >= MIN_PYTHON_VERSION:
+ ver = ver_str(cur_ver)
+ print(f"Python version: {ver}")
+
+ # This could be useful for debugging purposes
+ if SphinxDependencyChecker.which("docutils"):
+ result = SphinxDependencyChecker.run(["docutils", "--version"],
+ capture_output=True, text=True)
+ ver = result.stdout.strip()
+ match = re.search(r"(\d+\.\d+\.\d+)", ver)
+ if match:
+ ver = match.group(1)
+
+ print(f"Docutils version: {ver}")
+
+ return
+
+ python_ver = ver_str(cur_ver)
+
+ new_python_cmd = SphinxDependencyChecker.find_python()
+ if not new_python_cmd:
+ print(f"ERROR: Python version {python_ver} is not spported anymore\n")
+ print(" Can't find a new version. This script may fail")
+ return
+
+ # Restart script using the newer version
+ script_path = os.path.abspath(sys.argv[0])
+ args = [new_python_cmd, script_path] + sys.argv[1:]
+
+ print(f"Python {python_ver} not supported. Changing to {new_python_cmd}")
+
+ try:
+ os.execv(new_python_cmd, args)
+ except OSError as e:
+ sys.exit(f"Failed to restart with {new_python_cmd}: {e}")
+
+ @staticmethod
+ def run(*args, **kwargs):
+ """
+ Excecute a command, hiding its output by default.
+ Preserve comatibility with older Python versions.
+ """
+
+ capture_output = kwargs.pop('capture_output', False)
+
+ if capture_output:
+ if 'stdout' not in kwargs:
+ kwargs['stdout'] = subprocess.PIPE
+ if 'stderr' not in kwargs:
+ kwargs['stderr'] = subprocess.PIPE
+ else:
+ if 'stdout' not in kwargs:
+ kwargs['stdout'] = subprocess.DEVNULL
+ if 'stderr' not in kwargs:
+ kwargs['stderr'] = subprocess.DEVNULL
+
+ # Don't break with older Python versions
+ if 'text' in kwargs and sys.version_info < (3, 7):
+ kwargs['universal_newlines'] = kwargs.pop('text')
+
+ return subprocess.run(*args, **kwargs)
+
+class MissingCheckers(AncillaryMethods):
+ """
+ Contains some ancillary checkers for different types of binaries and
+ package managers.
+ """
+
+ def __init__(self, args, texlive):
+ """
+ Initialize its internal variables
+ """
+ self.pdf = args.pdf
+ self.virtualenv = args.virtualenv
+ self.version_check = args.version_check
+ self.texlive = texlive
+
+ self.min_version = (0, 0, 0)
+ self.cur_version = (0, 0, 0)
+
+ self.deps = DepManager(self.pdf)
+
+ self.need_symlink = 0
+ self.need_sphinx = 0
+
+ self.verbose_warn_install = 1
+
+ self.virtenv_dir = ""
+ self.install = ""
+ self.python_cmd = ""
+
+ self.virtenv_prefix = ["sphinx_", "Sphinx_" ]
+
+ def check_missing_file(self, files, package, dtype):
+ """
+ Does the file exists? If not, add it to missing dependencies.
+ """
+ for f in files:
+ if os.path.exists(f):
+ return
+ self.deps.add_package(package, dtype)
+
+ def check_program(self, prog, dtype):
+ """
+ Does the program exists and it is at the PATH?
+ If not, add it to missing dependencies.
+ """
+ found = self.which(prog)
+ if found:
+ return found
+
+ self.deps.add_package(prog, dtype)
+
+ return None
+
+ def check_perl_module(self, prog, dtype):
+ """
+ Does perl have a dependency? Is it available?
+ If not, add it to missing dependencies.
+
+ Right now, we still need Perl for doc build, as it is required
+ by some tools called at docs or kernel build time, like:
+
+ scripts/documentation-file-ref-check
+
+ Also, checkpatch is on Perl.
+ """
+
+ # While testing with lxc download template, one of the
+ # distros (Oracle) didn't have perl - nor even an option to install
+ # before installing oraclelinux-release-el9 package.
+ #
+ # Check it before running an error. If perl is not there,
+ # add it as a mandatory package, as some parts of the doc builder
+ # needs it.
+ if not self.which("perl"):
+ self.deps.add_package("perl", DepManager.SYSTEM_MANDATORY)
+ self.deps.add_package(prog, dtype)
+ return
+
+ try:
+ self.run(["perl", f"-M{prog}", "-e", "1"], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(prog, dtype)
+
+ def check_python_module(self, module, is_optional=False):
+ """
+ Does a python module exists outside venv? If not, add it to missing
+ dependencies.
+ """
+ if is_optional:
+ dtype = DepManager.PYTHON_OPTIONAL
+ else:
+ dtype = DepManager.PYTHON_MANDATORY
+
+ try:
+ self.run([self.python_cmd, "-c", f"import {module}"], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(module, dtype)
+
+ def check_rpm_missing(self, pkgs, dtype):
+ """
+ Does a rpm package exists? If not, add it to missing dependencies.
+ """
+ for prog in pkgs:
+ try:
+ self.run(["rpm", "-q", prog], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(prog, dtype)
+
+ def check_pacman_missing(self, pkgs, dtype):
+ """
+ Does a pacman package exists? If not, add it to missing dependencies.
+ """
+ for prog in pkgs:
+ try:
+ self.run(["pacman", "-Q", prog], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(prog, dtype)
+
+ def check_missing_tex(self, is_optional=False):
+ """
+ Does a LaTeX package exists? If not, add it to missing dependencies.
+ """
+ if is_optional:
+ dtype = DepManager.PDF_OPTIONAL
+ else:
+ dtype = DepManager.PDF_MANDATORY
+
+ kpsewhich = self.which("kpsewhich")
+ for prog, package in self.texlive.items():
+
+ # If kpsewhich is not there, just add it to deps
+ if not kpsewhich:
+ self.deps.add_package(package, dtype)
+ continue
+
+ # Check if the package is needed
+ try:
+ result = self.run(
+ [kpsewhich, prog], stdout=subprocess.PIPE, text=True, check=True
+ )
+
+ # Didn't find. Add it
+ if not result.stdout.strip():
+ self.deps.add_package(package, dtype)
+
+ except subprocess.CalledProcessError:
+ # kpsewhich returned an error. Add it, just in case
+ self.deps.add_package(package, dtype)
+
+ def get_sphinx_fname(self):
+ """
+ Gets the binary filename for sphinx-build.
+ """
+ if "SPHINXBUILD" in os.environ:
+ return os.environ["SPHINXBUILD"]
+
+ fname = "sphinx-build"
+ if self.which(fname):
+ return fname
+
+ fname = "sphinx-build-3"
+ if self.which(fname):
+ self.need_symlink = 1
+ return fname
+
+ return ""
+
+ def get_sphinx_version(self, cmd):
+ """
+ Gets sphinx-build version.
+ """
+ try:
+ result = self.run([cmd, "--version"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, check=True)
+ except (subprocess.CalledProcessError, FileNotFoundError):
+ return None
+
+ for line in result.stdout.split("\n"):
+ match = re.match(r"^sphinx-build\s+([\d\.]+)(?:\+(?:/[\da-f]+)|b\d+)?\s*$", line)
+ if match:
+ return parse_version(match.group(1))
+
+ match = re.match(r"^Sphinx.*\s+([\d\.]+)\s*$", line)
+ if match:
+ return parse_version(match.group(1))
+
+ def check_sphinx(self, conf):
+ """
+ Checks Sphinx minimal requirements
+ """
+ try:
+ with open(conf, "r", encoding="utf-8") as f:
+ for line in f:
+ match = re.match(r"^\s*needs_sphinx\s*=\s*[\'\"]([\d\.]+)[\'\"]", line)
+ if match:
+ self.min_version = parse_version(match.group(1))
+ break
+ except IOError:
+ sys.exit(f"Can't open {conf}")
+
+ if not self.min_version:
+ sys.exit(f"Can't get needs_sphinx version from {conf}")
+
+ self.virtenv_dir = self.virtenv_prefix[0] + "latest"
+
+ sphinx = self.get_sphinx_fname()
+ if not sphinx:
+ self.need_sphinx = 1
+ return
+
+ self.cur_version = self.get_sphinx_version(sphinx)
+ if not self.cur_version:
+ sys.exit(f"{sphinx} didn't return its version")
+
+ if self.cur_version < self.min_version:
+ curver = ver_str(self.cur_version)
+ minver = ver_str(self.min_version)
+
+ print(f"ERROR: Sphinx version is {curver}. It should be >= {minver}")
+ self.need_sphinx = 1
+ return
+
+ # On version check mode, just assume Sphinx has all mandatory deps
+ if self.version_check and self.cur_version >= RECOMMENDED_VERSION:
+ sys.exit(0)
+
+ def catcheck(self, filename):
+ """
+ Reads a file if it exists, returning as string.
+ If not found, returns an empty string.
+ """
+ if os.path.exists(filename):
+ with open(filename, "r", encoding="utf-8") as f:
+ return f.read().strip()
+ return ""
+
+ def get_system_release(self):
+ """
+ Determine the system type. There's no unique way that would work
+ with all distros with a minimal package install. So, several
+ methods are used here.
+
+ By default, it will use lsb_release function. If not available, it will
+ fail back to reading the known different places where the distro name
+ is stored.
+
+ Several modern distros now have /etc/os-release, which usually have
+ a decent coverage.
+ """
+
+ system_release = ""
+
+ if self.which("lsb_release"):
+ result = self.run(["lsb_release", "-d"], capture_output=True, text=True)
+ system_release = result.stdout.replace("Description:", "").strip()
+
+ release_files = [
+ "/etc/system-release",
+ "/etc/redhat-release",
+ "/etc/lsb-release",
+ "/etc/gentoo-release",
+ ]
+
+ if not system_release:
+ for f in release_files:
+ system_release = self.catcheck(f)
+ if system_release:
+ break
+
+ # This seems more common than LSB these days
+ if not system_release:
+ os_var = {}
+ try:
+ with open("/etc/os-release", "r", encoding="utf-8") as f:
+ for line in f:
+ match = re.match(r"^([\w\d\_]+)=\"?([^\"]*)\"?\n", line)
+ if match:
+ os_var[match.group(1)] = match.group(2)
+
+ system_release = os_var.get("NAME", "")
+ if "VERSION_ID" in os_var:
+ system_release += " " + os_var["VERSION_ID"]
+ elif "VERSION" in os_var:
+ system_release += " " + os_var["VERSION"]
+ except IOError:
+ pass
+
+ if not system_release:
+ system_release = self.catcheck("/etc/issue")
+
+ system_release = system_release.strip()
+
+ return system_release
+
+class SphinxDependencyChecker(MissingCheckers):
+ """
+ Main class for checking Sphinx documentation build dependencies.
+
+ - Check for missing system packages;
+ - Check for missing Python modules;
+ - Check for missing LaTeX packages needed by PDF generation;
+ - Propose Sphinx install via Python Virtual environment;
+ - Propose Sphinx install via distro-specific package install.
+ """
+ def __init__(self, args):
+ """Initialize checker variables"""
+
+ # List of required texlive packages on Fedora and OpenSuse
+ texlive = {
+ "amsfonts.sty": "texlive-amsfonts",
+ "amsmath.sty": "texlive-amsmath",
+ "amssymb.sty": "texlive-amsfonts",
+ "amsthm.sty": "texlive-amscls",
+ "anyfontsize.sty": "texlive-anyfontsize",
+ "atbegshi.sty": "texlive-oberdiek",
+ "bm.sty": "texlive-tools",
+ "capt-of.sty": "texlive-capt-of",
+ "cmap.sty": "texlive-cmap",
+ "ctexhook.sty": "texlive-ctex",
+ "ecrm1000.tfm": "texlive-ec",
+ "eqparbox.sty": "texlive-eqparbox",
+ "eu1enc.def": "texlive-euenc",
+ "fancybox.sty": "texlive-fancybox",
+ "fancyvrb.sty": "texlive-fancyvrb",
+ "float.sty": "texlive-float",
+ "fncychap.sty": "texlive-fncychap",
+ "footnote.sty": "texlive-mdwtools",
+ "framed.sty": "texlive-framed",
+ "luatex85.sty": "texlive-luatex85",
+ "multirow.sty": "texlive-multirow",
+ "needspace.sty": "texlive-needspace",
+ "palatino.sty": "texlive-psnfss",
+ "parskip.sty": "texlive-parskip",
+ "polyglossia.sty": "texlive-polyglossia",
+ "tabulary.sty": "texlive-tabulary",
+ "threeparttable.sty": "texlive-threeparttable",
+ "titlesec.sty": "texlive-titlesec",
+ "ucs.sty": "texlive-ucs",
+ "upquote.sty": "texlive-upquote",
+ "wrapfig.sty": "texlive-wrapfig",
+ }
+
+ super().__init__(args, texlive)
+
+ self.need_pip = False
+ self.rec_sphinx_upgrade = 0
+
+ self.system_release = self.get_system_release()
+ self.activate_cmd = ""
+
+ # Some distros may not have a Sphinx shipped package compatible with
+ # our minimal requirements
+ self.package_supported = True
+
+ # Recommend a new python version
+ self.recommend_python = None
+
+ # Certain hints are meant to be shown only once
+ self.distro_msg = None
+
+ self.latest_avail_ver = (0, 0, 0)
+ self.venv_ver = (0, 0, 0)
+
+ prefix = os.environ.get("srctree", ".") + "/"
+
+ self.conf = prefix + "Documentation/conf.py"
+ self.requirement_file = prefix + "Documentation/sphinx/requirements.txt"
+
+ def get_install_progs(self, progs, cmd, extra=None):
+ """
+ Check for missing dependencies using the provided program mapping.
+
+ The actual distro-specific programs are mapped via progs argument.
+ """
+ install = self.deps.check_missing(progs)
+
+ if self.verbose_warn_install:
+ self.deps.warn_install()
+
+ if not install:
+ return
+
+ if cmd:
+ if self.verbose_warn_install:
+ msg = "You should run:"
+ else:
+ msg = ""
+
+ if extra:
+ msg += "\n\t" + extra.replace("\n", "\n\t")
+
+ return(msg + "\n\tsudo " + cmd + " " + install)
+
+ return None
+
+ #
+ # Distro-specific hints methods
+ #
+
+ def give_debian_hints(self):
+ """
+ Provide package installation hints for Debian-based distros.
+ """
+ progs = {
+ "Pod::Usage": "perl-modules",
+ "convert": "imagemagick",
+ "dot": "graphviz",
+ "ensurepip": "python3-venv",
+ "python-sphinx": "python3-sphinx",
+ "rsvg-convert": "librsvg2-bin",
+ "virtualenv": "virtualenv",
+ "xelatex": "texlive-xetex",
+ "yaml": "python3-yaml",
+ }
+
+ if self.pdf:
+ pdf_pkgs = {
+ "fonts-dejavu": [
+ "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
+ ],
+ "fonts-noto-cjk": [
+ "/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/opentype/noto/NotoSerifCJK-Regular.ttc",
+ ],
+ "tex-gyre": [
+ "/usr/share/texmf/tex/latex/tex-gyre/tgtermes.sty"
+ ],
+ "texlive-fonts-recommended": [
+ "/usr/share/texlive/texmf-dist/fonts/tfm/adobe/zapfding/pzdr.tfm",
+ ],
+ "texlive-lang-chinese": [
+ "/usr/share/texlive/texmf-dist/tex/latex/ctex/ctexhook.sty",
+ ],
+ }
+
+ for package, files in pdf_pkgs.items():
+ self.check_missing_file(files, package, DepManager.PDF_MANDATORY)
+
+ self.check_program("dvipng", DepManager.PDF_MANDATORY)
+
+ if not self.distro_msg:
+ self.distro_msg = \
+ "Note: ImageMagick is broken on some distros, affecting PDF output. For more details:\n" \
+ "\thttps://askubuntu.com/questions/1158894/imagemagick-still-broken-using-with-usr-bin-convert"
+
+ return self.get_install_progs(progs, "apt-get install")
+
+ def give_redhat_hints(self):
+ """
+ Provide package installation hints for RedHat-based distros
+ (Fedora, RHEL and RHEL-based variants).
+ """
+ progs = {
+ "Pod::Usage": "perl-Pod-Usage",
+ "convert": "ImageMagick",
+ "dot": "graphviz",
+ "python-sphinx": "python3-sphinx",
+ "rsvg-convert": "librsvg2-tools",
+ "virtualenv": "python3-virtualenv",
+ "xelatex": "texlive-xetex-bin",
+ "yaml": "python3-pyyaml",
+ }
+
+ fedora_tex_pkgs = [
+ "dejavu-sans-fonts",
+ "dejavu-sans-mono-fonts",
+ "dejavu-serif-fonts",
+ "texlive-collection-fontsrecommended",
+ "texlive-collection-latex",
+ "texlive-xecjk",
+ ]
+
+ fedora = False
+ rel = None
+
+ match = re.search(r"(release|Linux)\s+(\d+)", self.system_release)
+ if match:
+ rel = int(match.group(2))
+
+ if not rel:
+ print("Couldn't identify release number")
+ noto_sans_redhat = None
+ self.pdf = False
+ elif re.search("Fedora", self.system_release):
+ # Fedora 38 and upper use this CJK font
+
+ noto_sans_redhat = "google-noto-sans-cjk-fonts"
+ fedora = True
+ else:
+ # Almalinux, CentOS, RHEL, ...
+
+ # at least up to version 9 (and Fedora < 38), that's the CJK font
+ noto_sans_redhat = "google-noto-sans-cjk-ttc-fonts"
+
+ progs["virtualenv"] = "python-virtualenv"
+
+ if not rel or rel < 8:
+ print("ERROR: Distro not supported. Too old?")
+ return
+
+ # RHEL 8 uses Python 3.6, which is not compatible with
+ # the build system anymore. Suggest Python 3.11
+ if rel == 8:
+ self.check_program("python3.9", DepManager.SYSTEM_MANDATORY)
+ progs["python3.9"] = "python39"
+ progs["yaml"] = "python39-pyyaml"
+
+ self.recommend_python = True
+
+ # There's no python39-sphinx package. Only pip is supported
+ self.package_supported = False
+
+ if not self.distro_msg:
+ self.distro_msg = \
+ "Note: RHEL-based distros typically require extra repositories.\n" \
+ "For most, enabling epel and crb are enough:\n" \
+ "\tsudo dnf install -y epel-release\n" \
+ "\tsudo dnf config-manager --set-enabled crb\n" \
+ "Yet, some may have other required repositories. Those commands could be useful:\n" \
+ "\tsudo dnf repolist all\n" \
+ "\tsudo dnf repoquery --available --info <pkgs>\n" \
+ "\tsudo dnf config-manager --set-enabled '*' # enable all - probably not what you want"
+
+ if self.pdf:
+ pdf_pkgs = [
+ "/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/google-noto-sans-cjk-fonts/NotoSansCJK-Regular.ttc",
+ ]
+
+ self.check_missing_file(pdf_pkgs, noto_sans_redhat, DepManager.PDF_MANDATORY)
+
+ self.check_rpm_missing(fedora_tex_pkgs, DepManager.PDF_MANDATORY)
+
+ self.check_missing_tex(DepManager.PDF_MANDATORY)
+
+ # There's no texlive-ctex on RHEL 8 repositories. This will
+ # likely affect CJK pdf build only.
+ if not fedora and rel == 8:
+ self.deps.del_package("texlive-ctex")
+
+ return self.get_install_progs(progs, "dnf install")
+
+ def give_opensuse_hints(self):
+ """
+ Provide package installation hints for openSUSE-based distros
+ (Leap and Tumbleweed).
+ """
+ progs = {
+ "Pod::Usage": "perl-Pod-Usage",
+ "convert": "ImageMagick",
+ "dot": "graphviz",
+ "python-sphinx": "python3-sphinx",
+ "virtualenv": "python3-virtualenv",
+ "xelatex": "texlive-xetex-bin texlive-dejavu",
+ "yaml": "python3-pyyaml",
+ }
+
+ suse_tex_pkgs = [
+ "texlive-babel-english",
+ "texlive-caption",
+ "texlive-colortbl",
+ "texlive-courier",
+ "texlive-dvips",
+ "texlive-helvetic",
+ "texlive-makeindex",
+ "texlive-metafont",
+ "texlive-metapost",
+ "texlive-palatino",
+ "texlive-preview",
+ "texlive-times",
+ "texlive-zapfchan",
+ "texlive-zapfding",
+ ]
+
+ progs["latexmk"] = "texlive-latexmk-bin"
+
+ match = re.search(r"(Leap)\s+(\d+).(\d)", self.system_release)
+ if match:
+ rel = int(match.group(2))
+
+ # Leap 15.x uses Python 3.6, which is not compatible with
+ # the build system anymore. Suggest Python 3.11
+ if rel == 15:
+ if not self.which(self.python_cmd):
+ self.check_program("python3.11", DepManager.SYSTEM_MANDATORY)
+ progs["python3.11"] = "python311"
+ self.recommend_python = True
+
+ progs.update({
+ "python-sphinx": "python311-Sphinx python311-Sphinx-latex",
+ "virtualenv": "python311-virtualenv",
+ "yaml": "python311-PyYAML",
+ })
+ else:
+ # Tumbleweed defaults to Python 3.11
+
+ progs.update({
+ "python-sphinx": "python313-Sphinx python313-Sphinx-latex",
+ "virtualenv": "python313-virtualenv",
+ "yaml": "python313-PyYAML",
+ })
+
+ # FIXME: add support for installing CJK fonts
+ #
+ # I tried hard, but was unable to find a way to install
+ # "Noto Sans CJK SC" on openSUSE
+
+ if self.pdf:
+ self.check_rpm_missing(suse_tex_pkgs, DepManager.PDF_MANDATORY)
+ if self.pdf:
+ self.check_missing_tex()
+
+ return self.get_install_progs(progs, "zypper install --no-recommends")
+
+ def give_mageia_hints(self):
+ """
+ Provide package installation hints for Mageia and OpenMandriva.
+ """
+ progs = {
+ "Pod::Usage": "perl-Pod-Usage",
+ "convert": "ImageMagick",
+ "dot": "graphviz",
+ "python-sphinx": "python3-sphinx",
+ "rsvg-convert": "librsvg2",
+ "virtualenv": "python3-virtualenv",
+ "xelatex": "texlive",
+ "yaml": "python3-yaml",
+ }
+
+ tex_pkgs = [
+ "texlive-fontsextra",
+ "texlive-fonts-asian",
+ "fonts-ttf-dejavu",
+ ]
+
+ if re.search(r"OpenMandriva", self.system_release):
+ packager_cmd = "dnf install"
+ noto_sans = "noto-sans-cjk-fonts"
+ tex_pkgs = [
+ "texlive-collection-basic",
+ "texlive-collection-langcjk",
+ "texlive-collection-fontsextra",
+ "texlive-collection-fontsrecommended"
+ ]
+
+ # Tested on OpenMandriva Lx 4.3
+ progs["convert"] = "imagemagick"
+ progs["yaml"] = "python-pyyaml"
+ progs["python-virtualenv"] = "python-virtualenv"
+ progs["python-sphinx"] = "python-sphinx"
+ progs["xelatex"] = "texlive"
+
+ self.check_program("python-virtualenv", DepManager.PYTHON_MANDATORY)
+
+ # On my tests with openMandriva LX 4.0 docker image, upgraded
+ # to 4.3, python-virtualenv package is broken: it is missing
+ # ensurepip. Without it, the alternative would be to run:
+ # python3 -m venv --without-pip ~/sphinx_latest, but running
+ # pip there won't install sphinx at venv.
+ #
+ # Add a note about that.
+
+ if not self.distro_msg:
+ self.distro_msg = \
+ "Notes:\n"\
+ "1. for venv, ensurepip could be broken, preventing its install method.\n" \
+ "2. at least on OpenMandriva LX 4.3, texlive packages seem broken"
+
+ else:
+ packager_cmd = "urpmi"
+ noto_sans = "google-noto-sans-cjk-ttc-fonts"
+
+ progs["latexmk"] = "texlive-collection-basic"
+
+ if self.pdf:
+ pdf_pkgs = [
+ "/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/TTF/NotoSans-Regular.ttf",
+ ]
+
+ self.check_missing_file(pdf_pkgs, noto_sans, DepManager.PDF_MANDATORY)
+ self.check_rpm_missing(tex_pkgs, DepManager.PDF_MANDATORY)
+
+ return self.get_install_progs(progs, packager_cmd)
+
+ def give_arch_linux_hints(self):
+ """
+ Provide package installation hints for ArchLinux.
+ """
+ progs = {
+ "convert": "imagemagick",
+ "dot": "graphviz",
+ "latexmk": "texlive-core",
+ "rsvg-convert": "extra/librsvg",
+ "virtualenv": "python-virtualenv",
+ "xelatex": "texlive-xetex",
+ "yaml": "python-yaml",
+ }
+
+ archlinux_tex_pkgs = [
+ "texlive-basic",
+ "texlive-binextra",
+ "texlive-core",
+ "texlive-fontsrecommended",
+ "texlive-langchinese",
+ "texlive-langcjk",
+ "texlive-latexextra",
+ "ttf-dejavu",
+ ]
+
+ if self.pdf:
+ self.check_pacman_missing(archlinux_tex_pkgs,
+ DepManager.PDF_MANDATORY)
+
+ self.check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc"],
+ "noto-fonts-cjk",
+ DepManager.PDF_MANDATORY)
+
+
+ return self.get_install_progs(progs, "pacman -S")
+
+ def give_gentoo_hints(self):
+ """
+ Provide package installation hints for Gentoo.
+ """
+ texlive_deps = [
+ "dev-texlive/texlive-fontsrecommended",
+ "dev-texlive/texlive-latexextra",
+ "dev-texlive/texlive-xetex",
+ "media-fonts/dejavu",
+ ]
+
+ progs = {
+ "convert": "media-gfx/imagemagick",
+ "dot": "media-gfx/graphviz",
+ "rsvg-convert": "gnome-base/librsvg",
+ "virtualenv": "dev-python/virtualenv",
+ "xelatex": " ".join(texlive_deps),
+ "yaml": "dev-python/pyyaml",
+ "python-sphinx": "dev-python/sphinx",
+ }
+
+ if self.pdf:
+ pdf_pkgs = {
+ "media-fonts/dejavu": [
+ "/usr/share/fonts/dejavu/DejaVuSans.ttf",
+ ],
+ "media-fonts/noto-cjk": [
+ "/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf",
+ "/usr/share/fonts/noto-cjk/NotoSerifCJK-Regular.ttc",
+ ],
+ }
+ for package, files in pdf_pkgs.items():
+ self.check_missing_file(files, package, DepManager.PDF_MANDATORY)
+
+ # Handling dependencies is a nightmare, as Gentoo refuses to emerge
+ # some packages if there's no package.use file describing them.
+ # To make it worse, compilation flags shall also be present there
+ # for some packages. If USE is not perfect, error/warning messages
+ # like those are shown:
+ #
+ # !!! The following binary packages have been ignored due to non matching USE:
+ #
+ # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_13 qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf python_single_target_python3_12 -python_single_target_python3_13 qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_10 qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_10 python_single_target_python3_12 -python_single_target_python3_13 qt6 svg
+ # =media-fonts/noto-cjk-20190416 X
+ # =app-text/texlive-core-2024-r1 X cjk -xetex
+ # =app-text/texlive-core-2024-r1 X -xetex
+ # =app-text/texlive-core-2024-r1 -xetex
+ # =dev-libs/zziplib-0.13.79-r1 sdl
+ #
+ # And will ignore such packages, installing the remaining ones. That
+ # affects mostly the image extension and PDF generation.
+
+ # Package dependencies and the minimal needed args:
+ portages = {
+ "graphviz": "media-gfx/graphviz",
+ "imagemagick": "media-gfx/imagemagick",
+ "media-libs": "media-libs/harfbuzz icu",
+ "media-fonts": "media-fonts/noto-cjk",
+ "texlive": "app-text/texlive-core xetex",
+ "zziblib": "dev-libs/zziplib sdl",
+ }
+
+ extra_cmds = ""
+ if not self.distro_msg:
+ self.distro_msg = "Note: Gentoo requires package.use to be adjusted before emerging packages"
+
+ use_base = "/etc/portage/package.use"
+ files = glob(f"{use_base}/*")
+
+ for fname, portage in portages.items():
+ install = False
+
+ while install is False:
+ if not files:
+ # No files under package.usage. Install all
+ install = True
+ break
+
+ args = portage.split(" ")
+
+ name = args.pop(0)
+
+ cmd = ["grep", "-l", "-E", rf"^{name}\b" ] + files
+ result = self.run(cmd, stdout=subprocess.PIPE, text=True)
+ if result.returncode or not result.stdout.strip():
+ # File containing portage name not found
+ install = True
+ break
+
+ # Ensure that needed USE flags are present
+ if args:
+ match_fname = result.stdout.strip()
+ with open(match_fname, 'r', encoding='utf8',
+ errors='backslashreplace') as fp:
+ for line in fp:
+ for arg in args:
+ if arg.startswith("-"):
+ continue
+
+ if not re.search(rf"\s*{arg}\b", line):
+ # Needed file argument not found
+ install = True
+ break
+
+ # Everything looks ok, don't install
+ break
+
+ # emit a code to setup missing USE
+ if install:
+ extra_cmds += (f"sudo su -c 'echo \"{portage}\" > {use_base}/{fname}'\n")
+
+ # Now, we can use emerge and let it respect USE
+ return self.get_install_progs(progs,
+ "emerge --ask --changed-use --binpkg-respect-use=y",
+ extra_cmds)
+
+ def get_install(self):
+ """
+ OS-specific hints logic. Seeks for a hinter. If found, use it to
+ provide package-manager specific install commands.
+
+ Otherwise, outputs install instructions for the meta-packages.
+
+ Returns a string with the command to be executed to install the
+ the needed packages, if distro found. Otherwise, return just a
+ list of packages that require installation.
+ """
+ os_hints = {
+ re.compile("Red Hat Enterprise Linux"): self.give_redhat_hints,
+ re.compile("Fedora"): self.give_redhat_hints,
+ re.compile("AlmaLinux"): self.give_redhat_hints,
+ re.compile("Amazon Linux"): self.give_redhat_hints,
+ re.compile("CentOS"): self.give_redhat_hints,
+ re.compile("openEuler"): self.give_redhat_hints,
+ re.compile("Oracle Linux Server"): self.give_redhat_hints,
+ re.compile("Rocky Linux"): self.give_redhat_hints,
+ re.compile("Springdale Open Enterprise"): self.give_redhat_hints,
+
+ re.compile("Ubuntu"): self.give_debian_hints,
+ re.compile("Debian"): self.give_debian_hints,
+ re.compile("Devuan"): self.give_debian_hints,
+ re.compile("Kali"): self.give_debian_hints,
+ re.compile("Mint"): self.give_debian_hints,
+
+ re.compile("openSUSE"): self.give_opensuse_hints,
+
+ re.compile("Mageia"): self.give_mageia_hints,
+ re.compile("OpenMandriva"): self.give_mageia_hints,
+
+ re.compile("Arch Linux"): self.give_arch_linux_hints,
+ re.compile("Gentoo"): self.give_gentoo_hints,
+ }
+
+ # If the OS is detected, use per-OS hint logic
+ for regex, os_hint in os_hints.items():
+ if regex.search(self.system_release):
+ return os_hint()
+
+ #
+ # Fall-back to generic hint code for other distros
+ # That's far from ideal, specially for LaTeX dependencies.
+ #
+ progs = {"sphinx-build": "sphinx"}
+ if self.pdf:
+ self.check_missing_tex()
+
+ self.distro_msg = \
+ f"I don't know distro {self.system_release}.\n" \
+ "So, I can't provide you a hint with the install procedure.\n" \
+ "There are likely missing dependencies."
+
+ return self.get_install_progs(progs, None)
+
+ #
+ # Common dependencies
+ #
+ def deactivate_help(self):
+ """
+ Print a helper message to disable a virtual environment.
+ """
+
+ print("\n If you want to exit the virtualenv, you can use:")
+ print("\tdeactivate")
+
+ def get_virtenv(self):
+ """
+ Give a hint about how to activate an already-existing virtual
+ environment containing sphinx-build.
+
+ Returns a tuble with (activate_cmd_path, sphinx_version) with
+ the newest available virtual env.
+ """
+
+ cwd = os.getcwd()
+
+ activates = []
+
+ # Add all sphinx prefixes with possible version numbers
+ for p in self.virtenv_prefix:
+ activates += glob(f"{cwd}/{p}[0-9]*/bin/activate")
+
+ activates.sort(reverse=True, key=str.lower)
+
+ # Place sphinx_latest first, if it exists
+ for p in self.virtenv_prefix:
+ activates = glob(f"{cwd}/{p}*latest/bin/activate") + activates
+
+ ver = (0, 0, 0)
+ for f in activates:
+ # Discard too old Sphinx virtual environments
+ match = re.search(r"(\d+)\.(\d+)\.(\d+)", f)
+ if match:
+ ver = (int(match.group(1)), int(match.group(2)), int(match.group(3)))
+
+ if ver < self.min_version:
+ continue
+
+ sphinx_cmd = f.replace("activate", "sphinx-build")
+ if not os.path.isfile(sphinx_cmd):
+ continue
+
+ ver = self.get_sphinx_version(sphinx_cmd)
+
+ if not ver:
+ venv_dir = f.replace("/bin/activate", "")
+ print(f"Warning: virtual environment {venv_dir} is not working.\n" \
+ "Python version upgrade? Remove it with:\n\n" \
+ "\trm -rf {venv_dir}\n\n")
+ else:
+ if self.need_sphinx and ver >= self.min_version:
+ return (f, ver)
+ elif parse_version(ver) > self.cur_version:
+ return (f, ver)
+
+ return ("", ver)
+
+ def recommend_sphinx_upgrade(self):
+ """
+ Check if Sphinx needs to be upgraded.
+
+ Returns a tuple with the higest available Sphinx version if found.
+ Otherwise, returns None to indicate either that no upgrade is needed
+ or no venv was found.
+ """
+
+ # Avoid running sphinx-builds from venv if cur_version is good
+ if self.cur_version and self.cur_version >= RECOMMENDED_VERSION:
+ self.latest_avail_ver = self.cur_version
+ return None
+
+ # Get the highest version from sphinx_*/bin/sphinx-build and the
+ # corresponding command to activate the venv/virtenv
+ self.activate_cmd, self.venv_ver = self.get_virtenv()
+
+ # Store the highest version from Sphinx existing virtualenvs
+ if self.activate_cmd and self.venv_ver > self.cur_version:
+ self.latest_avail_ver = self.venv_ver
+ else:
+ if self.cur_version:
+ self.latest_avail_ver = self.cur_version
+ else:
+ self.latest_avail_ver = (0, 0, 0)
+
+ # As we don't know package version of Sphinx, and there's no
+ # virtual environments, don't check if upgrades are needed
+ if not self.virtualenv:
+ if not self.latest_avail_ver:
+ return None
+
+ return self.latest_avail_ver
+
+ # Either there are already a virtual env or a new one should be created
+ self.need_pip = True
+
+ if not self.latest_avail_ver:
+ return None
+
+ # Return if the reason is due to an upgrade or not
+ if self.latest_avail_ver != (0, 0, 0):
+ if self.latest_avail_ver < RECOMMENDED_VERSION:
+ self.rec_sphinx_upgrade = 1
+
+ return self.latest_avail_ver
+
+ def recommend_package(self):
+ """
+ Recommend installing Sphinx as a distro-specific package.
+ """
+
+ print("\n2) As a package with:")
+
+ old_need = self.deps.need
+ old_optional = self.deps.optional
+
+ self.pdf = False
+ self.deps.optional = 0
+ old_verbose = self.verbose_warn_install
+ self.verbose_warn_install = 0
+
+ self.deps.clear_deps()
+
+ self.deps.add_package("python-sphinx", DepManager.PYTHON_MANDATORY)
+
+ cmd = self.get_install()
+ if cmd:
+ print(cmd)
+
+ self.deps.need = old_need
+ self.deps.optional = old_optional
+ self.verbose_warn_install = old_verbose
+
+ def recommend_sphinx_version(self, virtualenv_cmd):
+ """
+ Provide recommendations for installing or upgrading Sphinx based
+ on current version.
+
+ The logic here is complex, as it have to deal with different versions:
+
+ - minimal supported version;
+ - minimal PDF version;
+ - recommended version.
+
+ It also needs to work fine with both distro's package and
+ venv/virtualenv
+ """
+
+ if self.recommend_python:
+ cur_ver = sys.version_info[:3]
+ if cur_ver < MIN_PYTHON_VERSION:
+ print(f"\nPython version {cur_ver} is incompatible with doc build.\n" \
+ "Please upgrade it and re-run.\n")
+ return
+
+ # Version is OK. Nothing to do.
+ if self.cur_version != (0, 0, 0) and self.cur_version >= RECOMMENDED_VERSION:
+ return
+
+ if self.latest_avail_ver:
+ latest_avail_ver = ver_str(self.latest_avail_ver)
+
+ if not self.need_sphinx:
+ # sphinx-build is present and its version is >= $min_version
+
+ # only recommend enabling a newer virtenv version if makes sense.
+ if self.latest_avail_ver and self.latest_avail_ver > self.cur_version:
+ print(f"\nYou may also use the newer Sphinx version {latest_avail_ver} with:")
+ if f"{self.virtenv_prefix}" in os.getcwd():
+ print("\tdeactivate")
+ print(f"\t. {self.activate_cmd}")
+ self.deactivate_help()
+ return
+
+ if self.latest_avail_ver and self.latest_avail_ver >= RECOMMENDED_VERSION:
+ return
+
+ if not self.virtualenv:
+ # No sphinx either via package or via virtenv. As we can't
+ # Compare the versions here, just return, recommending the
+ # user to install it from the package distro.
+ if not self.latest_avail_ver or self.latest_avail_ver == (0, 0, 0):
+ return
+
+ # User doesn't want a virtenv recommendation, but he already
+ # installed one via virtenv with a newer version.
+ # So, print commands to enable it
+ if self.latest_avail_ver > self.cur_version:
+ print(f"\nYou may also use the Sphinx virtualenv version {latest_avail_ver} with:")
+ if f"{self.virtenv_prefix}" in os.getcwd():
+ print("\tdeactivate")
+ print(f"\t. {self.activate_cmd}")
+ self.deactivate_help()
+ return
+ print("\n")
+ else:
+ if self.need_sphinx:
+ self.deps.need += 1
+
+ # Suggest newer versions if current ones are too old
+ if self.latest_avail_ver and self.latest_avail_ver >= self.min_version:
+ if self.latest_avail_ver >= RECOMMENDED_VERSION:
+ print(f"\nNeed to activate Sphinx (version {latest_avail_ver}) on virtualenv with:")
+ print(f"\t. {self.activate_cmd}")
+ self.deactivate_help()
+ return
+
+ # Version is above the minimal required one, but may be
+ # below the recommended one. So, print warnings/notes
+ if self.latest_avail_ver < RECOMMENDED_VERSION:
+ print(f"Warning: It is recommended at least Sphinx version {RECOMMENDED_VERSION}.")
+
+ # At this point, either it needs Sphinx or upgrade is recommended,
+ # both via pip
+
+ if self.rec_sphinx_upgrade:
+ if not self.virtualenv:
+ print("Instead of install/upgrade Python Sphinx pkg, you could use pip/pypi with:\n\n")
+ else:
+ print("To upgrade Sphinx, use:\n\n")
+ else:
+ print("\nSphinx needs to be installed either:\n1) via pip/pypi with:\n")
+
+ if not virtualenv_cmd:
+ print(" Currently not possible.\n")
+ print(" Please upgrade Python to a newer version and run this script again")
+ else:
+ print(f"\t{virtualenv_cmd} {self.virtenv_dir}")
+ print(f"\t. {self.virtenv_dir}/bin/activate")
+ print(f"\tpip install -r {self.requirement_file}")
+ self.deactivate_help()
+
+ if self.package_supported:
+ self.recommend_package()
+
+ print("\n" \
+ " Please note that Sphinx currentlys produce false-positive\n" \
+ " warnings when the same name is used for more than one type (functions,\n" \
+ " structs, enums,...). This is known Sphinx bug. For more details, see:\n" \
+ "\thttps://github.com/sphinx-doc/sphinx/pull/8313")
+
+ def check_needs(self):
+ """
+ Main method that checks needed dependencies and provides
+ recommendations.
+ """
+ self.python_cmd = sys.executable
+
+ # Check if Sphinx is already accessible from current environment
+ self.check_sphinx(self.conf)
+
+ if self.system_release:
+ print(f"Detected OS: {self.system_release}.")
+ else:
+ print("Unknown OS")
+ if self.cur_version != (0, 0, 0):
+ ver = ver_str(self.cur_version)
+ print(f"Sphinx version: {ver}\n")
+
+ # Check the type of virtual env, depending on Python version
+ virtualenv_cmd = None
+
+ if sys.version_info < MIN_PYTHON_VERSION:
+ min_ver = ver_str(MIN_PYTHON_VERSION)
+ print(f"ERROR: at least python {min_ver} is required to build the kernel docs")
+ self.need_sphinx = 1
+
+ self.venv_ver = self.recommend_sphinx_upgrade()
+
+ if self.need_pip:
+ if sys.version_info < MIN_PYTHON_VERSION:
+ self.need_pip = False
+ print("Warning: python version is not supported.")
+ else:
+ virtualenv_cmd = f"{self.python_cmd} -m venv"
+ self.check_python_module("ensurepip")
+
+ # Check for needed programs/tools
+ self.check_perl_module("Pod::Usage", DepManager.SYSTEM_MANDATORY)
+
+ self.check_program("make", DepManager.SYSTEM_MANDATORY)
+ self.check_program("which", DepManager.SYSTEM_MANDATORY)
+
+ self.check_program("dot", DepManager.SYSTEM_OPTIONAL)
+ self.check_program("convert", DepManager.SYSTEM_OPTIONAL)
+
+ self.check_python_module("yaml")
+
+ if self.pdf:
+ self.check_program("xelatex", DepManager.PDF_MANDATORY)
+ self.check_program("rsvg-convert", DepManager.PDF_MANDATORY)
+ self.check_program("latexmk", DepManager.PDF_MANDATORY)
+
+ # Do distro-specific checks and output distro-install commands
+ cmd = self.get_install()
+ if cmd:
+ print(cmd)
+
+ # If distro requires some special instructions, print here.
+ # Please notice that get_install() needs to be called first.
+ if self.distro_msg:
+ print("\n" + self.distro_msg)
+
+ if not self.python_cmd:
+ if self.need == 1:
+ sys.exit("Can't build as 1 mandatory dependency is missing")
+ elif self.need:
+ sys.exit(f"Can't build as {self.need} mandatory dependencies are missing")
+
+ # Check if sphinx-build is called sphinx-build-3
+ if self.need_symlink:
+ sphinx_path = self.which("sphinx-build-3")
+ if sphinx_path:
+ print(f"\tsudo ln -sf {sphinx_path} /usr/bin/sphinx-build\n")
+
+ self.recommend_sphinx_version(virtualenv_cmd)
+ print("")
+
+ if not self.deps.optional:
+ print("All optional dependencies are met.")
+
+ if self.deps.need == 1:
+ sys.exit("Can't build as 1 mandatory dependency is missing")
+ elif self.deps.need:
+ sys.exit(f"Can't build as {self.deps.need} mandatory dependencies are missing")
+
+ print("Needed package dependencies are met.")
+
+DESCRIPTION = """
+Process some flags related to Sphinx installation and documentation build.
+"""
+
+
+def main():
+ """Main function"""
+ parser = argparse.ArgumentParser(description=DESCRIPTION)
+
+ parser.add_argument(
+ "--no-virtualenv",
+ action="store_false",
+ dest="virtualenv",
+ help="Recommend installing Sphinx instead of using a virtualenv",
+ )
+
+ parser.add_argument(
+ "--no-pdf",
+ action="store_false",
+ dest="pdf",
+ help="Don't check for dependencies required to build PDF docs",
+ )
+
+ parser.add_argument(
+ "--version-check",
+ action="store_true",
+ dest="version_check",
+ help="If version is compatible, don't check for missing dependencies",
+ )
-sub deactivate_help()
-{
- printf "\n If you want to exit the virtualenv, you can use:\n";
- printf "\tdeactivate\n";
-}
-
-sub get_virtenv()
-{
- my $ver;
- my $min_activate = "$ENV{'PWD'}/${virtenv_prefix}${min_version}/bin/activate";
- my @activates = glob "$ENV{'PWD'}/${virtenv_prefix}*/bin/activate";
-
- @activates = sort {$b cmp $a} @activates;
-
- foreach my $f (@activates) {
- next if ($f lt $min_activate);
-
- my $sphinx_cmd = $f;
- $sphinx_cmd =~ s/activate/sphinx-build/;
- next if (! -f $sphinx_cmd);
-
- my $ver = get_sphinx_version($sphinx_cmd);
-
- if (!$ver) {
- $f =~ s#/bin/activate##;
- print("Warning: virtual environment $f is not working.\nPython version upgrade? Remove it with:\n\n\trm -rf $f\n\n");
- }
-
- if ($need_sphinx && ($ver ge $min_version)) {
- return ($f, $ver);
- } elsif ($ver gt $cur_version) {
- return ($f, $ver);
- }
- }
- return ("", "");
-}
-
-sub recommend_sphinx_upgrade()
-{
- my $venv_ver;
-
- # Avoid running sphinx-builds from venv if $cur_version is good
- if ($cur_version && ($cur_version ge $rec_version)) {
- $latest_avail_ver = $cur_version;
- return;
- }
-
- # Get the highest version from sphinx_*/bin/sphinx-build and the
- # corresponding command to activate the venv/virtenv
- ($activate_cmd, $venv_ver) = get_virtenv();
-
- # Store the highest version from Sphinx existing virtualenvs
- if (($activate_cmd ne "") && ($venv_ver gt $cur_version)) {
- $latest_avail_ver = $venv_ver;
- } else {
- $latest_avail_ver = $cur_version if ($cur_version);
- }
-
- # As we don't know package version of Sphinx, and there's no
- # virtual environments, don't check if upgrades are needed
- if (!$virtualenv) {
- return if (!$latest_avail_ver);
- }
-
- # Either there are already a virtual env or a new one should be created
- $need_pip = 1;
-
- return if (!$latest_avail_ver);
-
- # Return if the reason is due to an upgrade or not
- if ($latest_avail_ver lt $rec_version) {
- $rec_sphinx_upgrade = 1;
- }
-
- return $latest_avail_ver;
-}
-
-#
-# The logic here is complex, as it have to deal with different versions:
-# - minimal supported version;
-# - minimal PDF version;
-# - recommended version.
-# It also needs to work fine with both distro's package and venv/virtualenv
-sub recommend_sphinx_version($)
-{
- my $virtualenv_cmd = shift;
-
- # Version is OK. Nothing to do.
- if ($cur_version && ($cur_version ge $rec_version)) {
- return;
- };
-
- if (!$need_sphinx) {
- # sphinx-build is present and its version is >= $min_version
-
- #only recommend enabling a newer virtenv version if makes sense.
- if ($latest_avail_ver gt $cur_version) {
- printf "\nYou may also use the newer Sphinx version $latest_avail_ver with:\n";
- printf "\tdeactivate\n" if ($ENV{'PWD'} =~ /${virtenv_prefix}/);
- printf "\t. $activate_cmd\n";
- deactivate_help();
-
- return;
- }
- return if ($latest_avail_ver ge $rec_version);
- }
-
- if (!$virtualenv) {
- # No sphinx either via package or via virtenv. As we can't
- # Compare the versions here, just return, recommending the
- # user to install it from the package distro.
- return if (!$latest_avail_ver);
-
- # User doesn't want a virtenv recommendation, but he already
- # installed one via virtenv with a newer version.
- # So, print commands to enable it
- if ($latest_avail_ver gt $cur_version) {
- printf "\nYou may also use the Sphinx virtualenv version $latest_avail_ver with:\n";
- printf "\tdeactivate\n" if ($ENV{'PWD'} =~ /${virtenv_prefix}/);
- printf "\t. $activate_cmd\n";
- deactivate_help();
-
- return;
- }
- print "\n";
- } else {
- $need++ if ($need_sphinx);
- }
-
- # Suggest newer versions if current ones are too old
- if ($latest_avail_ver && $latest_avail_ver ge $min_version) {
- # If there's a good enough version, ask the user to enable it
- if ($latest_avail_ver ge $rec_version) {
- printf "\nNeed to activate Sphinx (version $latest_avail_ver) on virtualenv with:\n";
- printf "\t. $activate_cmd\n";
- deactivate_help();
-
- return;
- }
-
- # Version is above the minimal required one, but may be
- # below the recommended one. So, print warnings/notes
-
- if ($latest_avail_ver lt $rec_version) {
- print "Warning: It is recommended at least Sphinx version $rec_version.\n";
- }
- }
-
- # At this point, either it needs Sphinx or upgrade is recommended,
- # both via pip
-
- if ($rec_sphinx_upgrade) {
- if (!$virtualenv) {
- print "Instead of install/upgrade Python Sphinx pkg, you could use pip/pypi with:\n\n";
- } else {
- print "To upgrade Sphinx, use:\n\n";
- }
- } else {
- print "\nSphinx needs to be installed either:\n1) via pip/pypi with:\n\n";
- }
-
- $python_cmd = find_python_no_venv();
-
- printf "\t$virtualenv_cmd $virtenv_dir\n";
-
- printf "\t. $virtenv_dir/bin/activate\n";
- printf "\tpip install -r $requirement_file\n";
- deactivate_help();
-
- printf "\n2) As a package with:\n";
-
- my $old_need = $need;
- my $old_optional = $optional;
- %missing = ();
- $pdf = 0;
- $optional = 0;
- $install = "";
- $verbose_warn_install = 0;
-
- add_package("python-sphinx", 0);
-
- check_distros();
-
- $need = $old_need;
- $optional = $old_optional;
-
- printf "\n Please note that Sphinx >= 3.0 will currently produce false-positive\n";
- printf " warning when the same name is used for more than one type (functions,\n";
- printf " structs, enums,...). This is known Sphinx bug. For more details, see:\n";
- printf "\thttps://github.com/sphinx-doc/sphinx/pull/8313\n";
-}
-
-sub check_needs()
-{
- # Check if Sphinx is already accessible from current environment
- check_sphinx();
-
- if ($system_release) {
- print "Detected OS: $system_release.\n";
- } else {
- print "Unknown OS\n";
- }
- printf "Sphinx version: %s\n\n", $cur_version if ($cur_version);
-
- # Check python command line, trying first python3
- $python_cmd = findprog("python3");
- $python_cmd = check_program("python", 0) if (!$python_cmd);
-
- # Check the type of virtual env, depending on Python version
- if ($python_cmd) {
- if ($virtualenv) {
- my $tmp = qx($python_cmd --version 2>&1);
- if ($tmp =~ m/(\d+\.)(\d+\.)/) {
- if ($1 < 3) {
- # Fail if it finds python2 (or worse)
- die "Python 3 is required to build the kernel docs\n";
- }
- if ($1 == 3 && $2 < 3) {
- # Need Python 3.3 or upper for venv
- $need_virtualenv = 1;
- }
- } else {
- die "Warning: couldn't identify $python_cmd version!";
- }
- } else {
- add_package("python-sphinx", 0);
- }
- }
-
- my $venv_ver = recommend_sphinx_upgrade();
-
- my $virtualenv_cmd;
-
- if ($need_pip) {
- # Set virtualenv command line, if python < 3.3
- if ($need_virtualenv) {
- $virtualenv_cmd = findprog("virtualenv-3");
- $virtualenv_cmd = findprog("virtualenv-3.5") if (!$virtualenv_cmd);
- if (!$virtualenv_cmd) {
- check_program("virtualenv", 0);
- $virtualenv_cmd = "virtualenv";
- }
- } else {
- $virtualenv_cmd = "$python_cmd -m venv";
- check_python_module("ensurepip", 0);
- }
- }
-
- # Check for needed programs/tools
- check_perl_module("Pod::Usage", 0);
- check_python_module("yaml", 0);
- check_program("make", 0);
- check_program("gcc", 0);
- check_program("dot", 1);
- check_program("convert", 1);
-
- # Extra PDF files - should use 2 for is_optional
- check_program("xelatex", 2) if ($pdf);
- check_program("rsvg-convert", 2) if ($pdf);
- check_program("latexmk", 2) if ($pdf);
-
- # Do distro-specific checks and output distro-install commands
- check_distros();
-
- if (!$python_cmd) {
- if ($need == 1) {
- die "Can't build as $need mandatory dependency is missing";
- } elsif ($need) {
- die "Can't build as $need mandatory dependencies are missing";
- }
- }
-
- # Check if sphinx-build is called sphinx-build-3
- if ($need_symlink) {
- printf "\tsudo ln -sf %s /usr/bin/sphinx-build\n\n",
- which("sphinx-build-3");
- }
-
- recommend_sphinx_version($virtualenv_cmd);
- printf "\n";
-
- print "All optional dependencies are met.\n" if (!$optional);
-
- if ($need == 1) {
- die "Can't build as $need mandatory dependency is missing";
- } elsif ($need) {
- die "Can't build as $need mandatory dependencies are missing";
- }
-
- print "Needed package dependencies are met.\n";
-}
-
-#
-# Main
-#
-
-while (@ARGV) {
- my $arg = shift(@ARGV);
-
- if ($arg eq "--no-virtualenv") {
- $virtualenv = 0;
- } elsif ($arg eq "--no-pdf"){
- $pdf = 0;
- } elsif ($arg eq "--version-check"){
- $version_check = 1;
- } else {
- print "Usage:\n\t$0 <--no-virtualenv> <--no-pdf> <--version-check>\n\n";
- print "Where:\n";
- print "\t--no-virtualenv\t- Recommend installing Sphinx instead of using a virtualenv\n";
- print "\t--version-check\t- if version is compatible, don't check for missing dependencies\n";
- print "\t--no-pdf\t- don't check for dependencies required to build PDF docs\n\n";
- exit -1;
- }
-}
-
-#
-# Determine the system type. There's no standard unique way that would
-# work with all distros with a minimal package install. So, several
-# methods are used here.
-#
-# By default, it will use lsb_release function. If not available, it will
-# fail back to reading the known different places where the distro name
-# is stored
-#
+ args = parser.parse_args()
+
+ checker = SphinxDependencyChecker(args)
-$system_release = qx(lsb_release -d) if which("lsb_release");
-$system_release =~ s/Description:\s*// if ($system_release);
-$system_release = catcheck("/etc/system-release") if !$system_release;
-$system_release = catcheck("/etc/redhat-release") if !$system_release;
-$system_release = catcheck("/etc/lsb-release") if !$system_release;
-$system_release = catcheck("/etc/gentoo-release") if !$system_release;
-
-# This seems more common than LSB these days
-if (!$system_release) {
- my %os_var;
- if (open IN, "cat /etc/os-release|") {
- while (<IN>) {
- if (m/^([\w\d\_]+)=\"?([^\"]*)\"?\n/) {
- $os_var{$1}=$2;
- }
- }
- $system_release = $os_var{"NAME"};
- if (defined($os_var{"VERSION_ID"})) {
- $system_release .= " " . $os_var{"VERSION_ID"} if (defined($os_var{"VERSION_ID"}));
- } else {
- $system_release .= " " . $os_var{"VERSION"};
- }
- }
-}
-$system_release = catcheck("/etc/issue") if !$system_release;
-$system_release =~ s/\s+$//;
-
-check_needs;
+ checker.check_python()
+ checker.check_needs()
+
+# Call main if not used as module
+if __name__ == "__main__":
+ main()