summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rwxr-xr-xtools/docs/check-variable-fonts.py37
-rwxr-xr-xtools/docs/checktransupdate.py307
-rwxr-xr-xtools/docs/documentation-file-ref-check245
-rwxr-xr-xtools/docs/features-refresh.sh98
-rwxr-xr-xtools/docs/find-unused-docs.sh62
-rwxr-xr-xtools/docs/get_abi.py214
-rwxr-xr-xtools/docs/get_feat.py225
-rwxr-xr-xtools/docs/list-arch.sh11
-rwxr-xr-xtools/docs/parse-headers.py14
-rwxr-xr-xtools/docs/sphinx-build-wrapper864
-rwxr-xr-xtools/docs/sphinx-pre-install1543
-rwxr-xr-xtools/docs/test_doc_build.py513
-rw-r--r--tools/include/nolibc/Makefile22
-rw-r--r--tools/include/nolibc/arch-arm.h2
-rw-r--r--tools/include/nolibc/arch-arm64.h2
-rw-r--r--tools/include/nolibc/arch-loongarch.h2
-rw-r--r--tools/include/nolibc/arch-m68k.h2
-rw-r--r--tools/include/nolibc/arch-mips.h2
-rw-r--r--tools/include/nolibc/arch-powerpc.h2
-rw-r--r--tools/include/nolibc/arch-riscv.h2
-rw-r--r--tools/include/nolibc/arch-s390.h2
-rw-r--r--tools/include/nolibc/arch-sh.h2
-rw-r--r--tools/include/nolibc/arch-sparc.h2
-rw-r--r--tools/include/nolibc/arch-x86.h10
-rw-r--r--tools/include/nolibc/arch.h9
-rw-r--r--tools/include/nolibc/compiler.h4
-rw-r--r--tools/include/nolibc/crt.h3
-rw-r--r--tools/include/nolibc/dirent.h6
-rw-r--r--tools/include/nolibc/getopt.h2
-rw-r--r--tools/include/nolibc/inttypes.h3
-rw-r--r--tools/include/nolibc/nolibc.h2
-rw-r--r--tools/include/nolibc/stackprotector.h2
-rw-r--r--tools/include/nolibc/std.h4
-rw-r--r--tools/include/nolibc/stdio.h10
-rw-r--r--tools/include/nolibc/stdlib.h2
-rw-r--r--tools/include/nolibc/string.h15
-rw-r--r--tools/include/nolibc/sys.h74
-rw-r--r--tools/include/nolibc/sys/auxv.h3
-rw-r--r--tools/include/nolibc/sys/mman.h5
-rw-r--r--tools/include/nolibc/sys/reboot.h2
-rw-r--r--tools/include/nolibc/sys/select.h103
-rw-r--r--tools/include/nolibc/sys/uio.h49
-rw-r--r--tools/include/nolibc/sys/wait.h18
-rw-r--r--tools/include/nolibc/time.h16
-rw-r--r--tools/include/nolibc/types.h47
-rw-r--r--tools/include/nolibc/unistd.h6
-rw-r--r--tools/lib/python/__init__.py (renamed from tools/docs/lib/__init__.py)0
-rw-r--r--tools/lib/python/abi/__init__.py0
-rw-r--r--tools/lib/python/abi/abi_parser.py628
-rw-r--r--tools/lib/python/abi/abi_regex.py234
-rw-r--r--tools/lib/python/abi/helpers.py38
-rw-r--r--tools/lib/python/abi/system_symbols.py378
-rwxr-xr-xtools/lib/python/feat/parse_features.py494
-rwxr-xr-xtools/lib/python/jobserver.py149
-rw-r--r--tools/lib/python/kdoc/__init__.py0
-rw-r--r--tools/lib/python/kdoc/enrich_formatter.py (renamed from tools/docs/lib/enrich_formatter.py)0
-rw-r--r--tools/lib/python/kdoc/kdoc_files.py294
-rw-r--r--tools/lib/python/kdoc/kdoc_item.py43
-rw-r--r--tools/lib/python/kdoc/kdoc_output.py824
-rw-r--r--tools/lib/python/kdoc/kdoc_parser.py1670
-rw-r--r--tools/lib/python/kdoc/kdoc_re.py270
-rwxr-xr-xtools/lib/python/kdoc/latex_fonts.py167
-rwxr-xr-xtools/lib/python/kdoc/parse_data_structs.py (renamed from tools/docs/lib/parse_data_structs.py)230
-rw-r--r--tools/lib/python/kdoc/python_version.py178
-rw-r--r--tools/sched_ext/Makefile4
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h15
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h314
-rw-r--r--tools/sched_ext/include/scx/compat.h14
-rw-r--r--tools/sched_ext/scx_cpu0.bpf.c88
-rw-r--r--tools/sched_ext/scx_cpu0.c106
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c10
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c52
-rw-r--r--tools/testing/selftests/cgroup/test_core.c7
-rw-r--r--tools/testing/selftests/cgroup/test_cpu.c7
-rw-r--r--tools/testing/selftests/cgroup/test_cpuset.c7
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c7
-rw-r--r--tools/testing/selftests/cgroup/test_kill.c7
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c7
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c7
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c7
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c2
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/trace_marker_raw.tc107
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc18
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/enable_disable_tprobe.tc40
-rw-r--r--tools/testing/selftests/kselftest/runner.sh14
-rw-r--r--tools/testing/selftests/livepatch/functions.sh6
-rw-r--r--tools/testing/selftests/nolibc/Makefile.nolibc1
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c13
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-again.sh56
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-series.sh116
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE041
-rwxr-xr-xtools/testing/selftests/run_kselftest.sh14
-rw-r--r--tools/testing/selftests/sched_ext/Makefile1
-rw-r--r--tools/testing/selftests/sched_ext/peek_dsq.bpf.c251
-rw-r--r--tools/testing/selftests/sched_ext/peek_dsq.c224
97 files changed, 11234 insertions, 460 deletions
diff --git a/tools/docs/check-variable-fonts.py b/tools/docs/check-variable-fonts.py
new file mode 100755
index 000000000000..958d5a745724
--- /dev/null
+++ b/tools/docs/check-variable-fonts.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) Akira Yokosawa, 2024
+#
+# Ported to Python by (c) Mauro Carvalho Chehab, 2025
+#
+# pylint: disable=C0103
+
+"""
+Detect problematic Noto CJK variable fonts.
+
+or more details, see .../tools/lib/python/kdoc/latex_fonts.py.
+"""
+
+import argparse
+import sys
+import os.path
+
+src_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, os.path.join(src_dir, '../lib/python'))
+
+from kdoc.latex_fonts import LatexFontChecker
+
+checker = LatexFontChecker()
+
+parser=argparse.ArgumentParser(description=checker.description(),
+ formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument("--deny-vf",
+ help="XDG_CONFIG_HOME dir containing fontconfig/fonts.conf file")
+
+args=parser.parse_args()
+
+msg = LatexFontChecker(args.deny_vf).check()
+if msg:
+ print(msg)
+
+sys.exit(1)
diff --git a/tools/docs/checktransupdate.py b/tools/docs/checktransupdate.py
new file mode 100755
index 000000000000..e894652369a5
--- /dev/null
+++ b/tools/docs/checktransupdate.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+This script helps track the translation status of the documentation
+in different locales, e.g., zh_CN. More specially, it uses `git log`
+commit to find the latest english commit from the translation commit
+(order by author date) and the latest english commits from HEAD. If
+differences occur, report the file and commits that need to be updated.
+
+The usage is as follows:
+- tools/docs/checktransupdate.py -l zh_CN
+This will print all the files that need to be updated or translated in the zh_CN locale.
+- tools/docs/checktransupdate.py Documentation/translations/zh_CN/dev-tools/testing-overview.rst
+This will only print the status of the specified file.
+
+The output is something like:
+Documentation/dev-tools/kfence.rst
+No translation in the locale of zh_CN
+
+Documentation/translations/zh_CN/dev-tools/testing-overview.rst
+commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs")
+1 commits needs resolving in total
+"""
+
+import os
+import re
+import time
+import logging
+from argparse import ArgumentParser, ArgumentTypeError, BooleanOptionalAction
+from datetime import datetime
+
+
+def get_origin_path(file_path):
+ """Get the origin path from the translation path"""
+ paths = file_path.split("/")
+ tidx = paths.index("translations")
+ opaths = paths[:tidx]
+ opaths += paths[tidx + 2 :]
+ return "/".join(opaths)
+
+
+def get_latest_commit_from(file_path, commit):
+ """Get the latest commit from the specified commit for the specified file"""
+ command = f"git log --pretty=format:%H%n%aD%n%cD%n%n%B {commit} -1 -- {file_path}"
+ logging.debug(command)
+ pipe = os.popen(command)
+ result = pipe.read()
+ result = result.split("\n")
+ if len(result) <= 1:
+ return None
+
+ logging.debug("Result: %s", result[0])
+
+ return {
+ "hash": result[0],
+ "author_date": datetime.strptime(result[1], "%a, %d %b %Y %H:%M:%S %z"),
+ "commit_date": datetime.strptime(result[2], "%a, %d %b %Y %H:%M:%S %z"),
+ "message": result[4:],
+ }
+
+
+def get_origin_from_trans(origin_path, t_from_head):
+ """Get the latest origin commit from the translation commit"""
+ o_from_t = get_latest_commit_from(origin_path, t_from_head["hash"])
+ while o_from_t is not None and o_from_t["author_date"] > t_from_head["author_date"]:
+ o_from_t = get_latest_commit_from(origin_path, o_from_t["hash"] + "^")
+ if o_from_t is not None:
+ logging.debug("tracked origin commit id: %s", o_from_t["hash"])
+ return o_from_t
+
+
+def get_origin_from_trans_smartly(origin_path, t_from_head):
+ """Get the latest origin commit from the formatted translation commit:
+ (1) update to commit HASH (TITLE)
+ (2) Update the translation through commit HASH (TITLE)
+ """
+ # catch flag for 12-bit commit hash
+ HASH = r'([0-9a-f]{12})'
+ # pattern 1: contains "update to commit HASH"
+ pat_update_to = re.compile(rf'update to commit {HASH}')
+ # pattern 2: contains "Update the translation through commit HASH"
+ pat_update_translation = re.compile(rf'Update the translation through commit {HASH}')
+
+ origin_commit_hash = None
+ for line in t_from_head["message"]:
+ # check if the line matches the first pattern
+ match = pat_update_to.search(line)
+ if match:
+ origin_commit_hash = match.group(1)
+ break
+ # check if the line matches the second pattern
+ match = pat_update_translation.search(line)
+ if match:
+ origin_commit_hash = match.group(1)
+ break
+ if origin_commit_hash is None:
+ return None
+ o_from_t = get_latest_commit_from(origin_path, origin_commit_hash)
+ if o_from_t is not None:
+ logging.debug("tracked origin commit id: %s", o_from_t["hash"])
+ return o_from_t
+
+
+def get_commits_count_between(opath, commit1, commit2):
+ """Get the commits count between two commits for the specified file"""
+ command = f"git log --pretty=format:%H {commit1}...{commit2} -- {opath}"
+ logging.debug(command)
+ pipe = os.popen(command)
+ result = pipe.read().split("\n")
+ # filter out empty lines
+ result = list(filter(lambda x: x != "", result))
+ return result
+
+
+def pretty_output(commit):
+ """Pretty print the commit message"""
+ command = f"git log --pretty='format:%h (\"%s\")' -1 {commit}"
+ logging.debug(command)
+ pipe = os.popen(command)
+ return pipe.read()
+
+
+def valid_commit(commit):
+ """Check if the commit is valid or not"""
+ msg = pretty_output(commit)
+ return "Merge tag" not in msg
+
+def check_per_file(file_path):
+ """Check the translation status for the specified file"""
+ opath = get_origin_path(file_path)
+
+ if not os.path.isfile(opath):
+ logging.error("Cannot find the origin path for {file_path}")
+ return
+
+ o_from_head = get_latest_commit_from(opath, "HEAD")
+ t_from_head = get_latest_commit_from(file_path, "HEAD")
+
+ if o_from_head is None or t_from_head is None:
+ logging.error("Cannot find the latest commit for %s", file_path)
+ return
+
+ o_from_t = get_origin_from_trans_smartly(opath, t_from_head)
+ # notice, o_from_t from get_*_smartly() is always more accurate than from get_*()
+ if o_from_t is None:
+ o_from_t = get_origin_from_trans(opath, t_from_head)
+
+ if o_from_t is None:
+ logging.error("Error: Cannot find the latest origin commit for %s", file_path)
+ return
+
+ if o_from_head["hash"] == o_from_t["hash"]:
+ logging.debug("No update needed for %s", file_path)
+ else:
+ logging.info(file_path)
+ commits = get_commits_count_between(
+ opath, o_from_t["hash"], o_from_head["hash"]
+ )
+ count = 0
+ for commit in commits:
+ if valid_commit(commit):
+ logging.info("commit %s", pretty_output(commit))
+ count += 1
+ logging.info("%d commits needs resolving in total\n", count)
+
+
+def valid_locales(locale):
+ """Check if the locale is valid or not"""
+ script_path = os.path.dirname(os.path.abspath(__file__))
+ linux_path = os.path.join(script_path, "../..")
+ if not os.path.isdir(f"{linux_path}/Documentation/translations/{locale}"):
+ raise ArgumentTypeError("Invalid locale: {locale}")
+ return locale
+
+
+def list_files_with_excluding_folders(folder, exclude_folders, include_suffix):
+ """List all files with the specified suffix in the folder and its subfolders"""
+ files = []
+ stack = [folder]
+
+ while stack:
+ pwd = stack.pop()
+ # filter out the exclude folders
+ if os.path.basename(pwd) in exclude_folders:
+ continue
+ # list all files and folders
+ for item in os.listdir(pwd):
+ ab_item = os.path.join(pwd, item)
+ if os.path.isdir(ab_item):
+ stack.append(ab_item)
+ else:
+ if ab_item.endswith(include_suffix):
+ files.append(ab_item)
+
+ return files
+
+
+class DmesgFormatter(logging.Formatter):
+ """Custom dmesg logging formatter"""
+ def format(self, record):
+ timestamp = time.time()
+ formatted_time = f"[{timestamp:>10.6f}]"
+ log_message = f"{formatted_time} {record.getMessage()}"
+ return log_message
+
+
+def config_logging(log_level, log_file="checktransupdate.log"):
+ """configure logging based on the log level"""
+ # set up the root logger
+ logger = logging.getLogger()
+ logger.setLevel(log_level)
+
+ # Create console handler
+ console_handler = logging.StreamHandler()
+ console_handler.setLevel(log_level)
+
+ # Create file handler
+ file_handler = logging.FileHandler(log_file)
+ file_handler.setLevel(log_level)
+
+ # Create formatter and add it to the handlers
+ formatter = DmesgFormatter()
+ console_handler.setFormatter(formatter)
+ file_handler.setFormatter(formatter)
+
+ # Add the handler to the logger
+ logger.addHandler(console_handler)
+ logger.addHandler(file_handler)
+
+
+def main():
+ """Main function of the script"""
+ script_path = os.path.dirname(os.path.abspath(__file__))
+ linux_path = os.path.join(script_path, "../..")
+
+ parser = ArgumentParser(description="Check the translation update")
+ parser.add_argument(
+ "-l",
+ "--locale",
+ default="zh_CN",
+ type=valid_locales,
+ help="Locale to check when files are not specified",
+ )
+
+ parser.add_argument(
+ "--print-missing-translations",
+ action=BooleanOptionalAction,
+ default=True,
+ help="Print files that do not have translations",
+ )
+
+ parser.add_argument(
+ '--log',
+ default='INFO',
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
+ help='Set the logging level')
+
+ parser.add_argument(
+ '--logfile',
+ default='checktransupdate.log',
+ help='Set the logging file (default: checktransupdate.log)')
+
+ parser.add_argument(
+ "files", nargs="*", help="Files to check, if not specified, check all files"
+ )
+ args = parser.parse_args()
+
+ # Configure logging based on the --log argument
+ log_level = getattr(logging, args.log.upper(), logging.INFO)
+ config_logging(log_level)
+
+ # Get files related to linux path
+ files = args.files
+ if len(files) == 0:
+ offical_files = list_files_with_excluding_folders(
+ os.path.join(linux_path, "Documentation"), ["translations", "output"], "rst"
+ )
+
+ for file in offical_files:
+ # split the path into parts
+ path_parts = file.split(os.sep)
+ # find the index of the "Documentation" directory
+ kindex = path_parts.index("Documentation")
+ # insert the translations and locale after the Documentation directory
+ new_path_parts = path_parts[:kindex + 1] + ["translations", args.locale] \
+ + path_parts[kindex + 1 :]
+ # join the path parts back together
+ new_file = os.sep.join(new_path_parts)
+ if os.path.isfile(new_file):
+ files.append(new_file)
+ else:
+ if args.print_missing_translations:
+ logging.info(os.path.relpath(os.path.abspath(file), linux_path))
+ logging.info("No translation in the locale of %s\n", args.locale)
+
+ files = list(map(lambda x: os.path.relpath(os.path.abspath(x), linux_path), files))
+
+ # cd to linux root directory
+ os.chdir(linux_path)
+
+ for file in files:
+ check_per_file(file)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/documentation-file-ref-check b/tools/docs/documentation-file-ref-check
new file mode 100755
index 000000000000..0cad42f6943b
--- /dev/null
+++ b/tools/docs/documentation-file-ref-check
@@ -0,0 +1,245 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+#
+# Treewide grep for references to files under Documentation, and report
+# non-existing files in stderr.
+
+use warnings;
+use strict;
+use Getopt::Long qw(:config no_auto_abbrev);
+
+# NOTE: only add things here when the file was gone, but the text wants
+# to mention a past documentation file, for example, to give credits for
+# the original work.
+my %false_positives = (
+ "Documentation/scsi/scsi_mid_low_api.rst" => "Documentation/Configure.help",
+ "drivers/vhost/vhost.c" => "Documentation/virtual/lguest/lguest.c",
+);
+
+my $scriptname = $0;
+$scriptname =~ s,tools/docs/([^/]+/),$1,;
+
+# Parse arguments
+my $help = 0;
+my $fix = 0;
+my $warn = 0;
+
+if (! -e ".git") {
+ printf "Warning: can't check if file exists, as this is not a git tree\n";
+ exit 0;
+}
+
+GetOptions(
+ 'fix' => \$fix,
+ 'warn' => \$warn,
+ 'h|help|usage' => \$help,
+);
+
+if ($help != 0) {
+ print "$scriptname [--help] [--fix]\n";
+ exit -1;
+}
+
+# Step 1: find broken references
+print "Finding broken references. This may take a while... " if ($fix);
+
+my %broken_ref;
+
+my $doc_fix = 0;
+
+open IN, "git grep ':doc:\`' Documentation/|"
+ or die "Failed to run git grep";
+while (<IN>) {
+ next if (!m,^([^:]+):.*\:doc\:\`([^\`]+)\`,);
+ next if (m,sphinx/,);
+
+ my $file = $1;
+ my $d = $1;
+ my $doc_ref = $2;
+
+ my $f = $doc_ref;
+
+ $d =~ s,(.*/).*,$1,;
+ $f =~ s,.*\<([^\>]+)\>,$1,;
+
+ if ($f =~ m,^/,) {
+ $f = "$f.rst";
+ $f =~ s,^/,Documentation/,;
+ } else {
+ $f = "$d$f.rst";
+ }
+
+ next if (grep -e, glob("$f"));
+
+ if ($fix && !$doc_fix) {
+ print STDERR "\nWARNING: Currently, can't fix broken :doc:`` fields\n";
+ }
+ $doc_fix++;
+
+ print STDERR "$file: :doc:`$doc_ref`\n";
+}
+close IN;
+
+open IN, "git grep 'Documentation/'|"
+ or die "Failed to run git grep";
+while (<IN>) {
+ next if (!m/^([^:]+):(.*)/);
+
+ my $f = $1;
+ my $ln = $2;
+
+ # On linux-next, discard the Next/ directory
+ next if ($f =~ m,^Next/,);
+
+ # Makefiles and scripts contain nasty expressions to parse docs
+ next if ($f =~ m/Makefile/ || $f =~ m/\.(sh|py|pl|~|rej|org|orig)$/);
+
+ # It doesn't make sense to parse hidden files
+ next if ($f =~ m#/\.#);
+
+ # Skip this script
+ next if ($f eq $scriptname);
+
+ # Ignore the dir where documentation will be built
+ next if ($ln =~ m,\b(\S*)Documentation/output,);
+
+ if ($ln =~ m,\b(\S*)(Documentation/[A-Za-z0-9\_\.\,\~/\*\[\]\?+-]*)(.*),) {
+ my $prefix = $1;
+ my $ref = $2;
+ my $base = $2;
+ my $extra = $3;
+
+ # some file references are like:
+ # /usr/src/linux/Documentation/DMA-{API,mapping}.txt
+ # For now, ignore them
+ next if ($extra =~ m/^{/);
+
+ # Remove footnotes at the end like:
+ # Documentation/devicetree/dt-object-internal.txt[1]
+ $ref =~ s/(txt|rst)\[\d+]$/$1/;
+
+ # Remove ending ']' without any '['
+ $ref =~ s/\].*// if (!($ref =~ m/\[/));
+
+ # Remove puntuation marks at the end
+ $ref =~ s/[\,\.]+$//;
+
+ my $fulref = "$prefix$ref";
+
+ $fulref =~ s/^(\<file|ref)://;
+ $fulref =~ s/^[\'\`]+//;
+ $fulref =~ s,^\$\(.*\)/,,;
+ $base =~ s,.*/,,;
+
+ # Remove URL false-positives
+ next if ($fulref =~ m/^http/);
+
+ # Remove sched-pelt false-positive
+ next if ($fulref =~ m,^Documentation/scheduler/sched-pelt$,);
+
+ # Discard some build examples from Documentation/target/tcm_mod_builder.rst
+ next if ($fulref =~ m,mnt/sdb/lio-core-2.6.git/Documentation/target,);
+
+ # Check if exists, evaluating wildcards
+ next if (grep -e, glob("$ref $fulref"));
+
+ # Accept relative Documentation patches for tools/
+ if ($f =~ m/tools/) {
+ my $path = $f;
+ $path =~ s,(.*)/.*,$1,;
+ $path =~ s,testing/selftests/bpf,bpf/bpftool,;
+ next if (grep -e, glob("$path/$ref $path/../$ref $path/$fulref"));
+ }
+
+ # Discard known false-positives
+ if (defined($false_positives{$f})) {
+ next if ($false_positives{$f} eq $fulref);
+ }
+
+ if ($fix) {
+ if (!($ref =~ m/(scripts|Kconfig|Kbuild)/)) {
+ $broken_ref{$ref}++;
+ }
+ } elsif ($warn) {
+ print STDERR "Warning: $f references a file that doesn't exist: $fulref\n";
+ } else {
+ print STDERR "$f: $fulref\n";
+ }
+ }
+}
+close IN;
+
+exit 0 if (!$fix);
+
+# Step 2: Seek for file name alternatives
+print "Auto-fixing broken references. Please double-check the results\n";
+
+foreach my $ref (keys %broken_ref) {
+ my $new =$ref;
+
+ my $basedir = ".";
+ # On translations, only seek inside the translations directory
+ $basedir = $1 if ($ref =~ m,(Documentation/translations/[^/]+),);
+
+ # get just the basename
+ $new =~ s,.*/,,;
+
+ my $f="";
+
+ # usual reason for breakage: DT file moved around
+ if ($ref =~ /devicetree/) {
+ # usual reason for breakage: DT file renamed to .yaml
+ if (!$f) {
+ my $new_ref = $ref;
+ $new_ref =~ s/\.txt$/.yaml/;
+ $f=$new_ref if (-f $new_ref);
+ }
+
+ if (!$f) {
+ my $search = $new;
+ $search =~ s,^.*/,,;
+ $f = qx(find Documentation/devicetree/ -iname "*$search*") if ($search);
+ if (!$f) {
+ # Manufacturer name may have changed
+ $search =~ s/^.*,//;
+ $f = qx(find Documentation/devicetree/ -iname "*$search*") if ($search);
+ }
+ }
+ }
+
+ # usual reason for breakage: file renamed to .rst
+ if (!$f) {
+ $new =~ s/\.txt$/.rst/;
+ $f=qx(find $basedir -iname $new) if ($new);
+ }
+
+ # usual reason for breakage: use dash or underline
+ if (!$f) {
+ $new =~ s/[-_]/[-_]/g;
+ $f=qx(find $basedir -iname $new) if ($new);
+ }
+
+ # Wild guess: seek for the same name on another place
+ if (!$f) {
+ $f = qx(find $basedir -iname $new) if ($new);
+ }
+
+ my @find = split /\s+/, $f;
+
+ if (!$f) {
+ print STDERR "ERROR: Didn't find a replacement for $ref\n";
+ } elsif (scalar(@find) > 1) {
+ print STDERR "WARNING: Won't auto-replace, as found multiple files close to $ref:\n";
+ foreach my $j (@find) {
+ $j =~ s,^./,,;
+ print STDERR " $j\n";
+ }
+ } else {
+ $f = $find[0];
+ $f =~ s,^./,,;
+ print "INFO: Replacing $ref to $f\n";
+ foreach my $j (qx(git grep -l $ref)) {
+ qx(sed "s\@$ref\@$f\@g" -i $j);
+ }
+ }
+}
diff --git a/tools/docs/features-refresh.sh b/tools/docs/features-refresh.sh
new file mode 100755
index 000000000000..c2288124e94a
--- /dev/null
+++ b/tools/docs/features-refresh.sh
@@ -0,0 +1,98 @@
+#
+# Small script that refreshes the kernel feature support status in place.
+#
+
+for F_FILE in Documentation/features/*/*/arch-support.txt; do
+ F=$(grep "^# Kconfig:" "$F_FILE" | cut -c26-)
+
+ #
+ # Each feature F is identified by a pair (O, K), where 'O' can
+ # be either the empty string (for 'nop') or "not" (the logical
+ # negation operator '!'); other operators are not supported.
+ #
+ O=""
+ K=$F
+ if [[ "$F" == !* ]]; then
+ O="not"
+ K=$(echo $F | sed -e 's/^!//g')
+ fi
+
+ #
+ # F := (O, K) is 'valid' iff there is a Kconfig file (for some
+ # arch) which contains K.
+ #
+ # Notice that this definition entails an 'asymmetry' between
+ # the case 'O = ""' and the case 'O = "not"'. E.g., F may be
+ # _invalid_ if:
+ #
+ # [case 'O = ""']
+ # 1) no arch provides support for F,
+ # 2) K does not exist (e.g., it was renamed/mis-typed);
+ #
+ # [case 'O = "not"']
+ # 3) all archs provide support for F,
+ # 4) as in (2).
+ #
+ # The rationale for adopting this definition (and, thus, for
+ # keeping the asymmetry) is:
+ #
+ # We want to be able to 'detect' (2) (or (4)).
+ #
+ # (1) and (3) may further warn the developers about the fact
+ # that K can be removed.
+ #
+ F_VALID="false"
+ for ARCH_DIR in arch/*/; do
+ K_FILES=$(find $ARCH_DIR -name "Kconfig*")
+ K_GREP=$(grep "$K" $K_FILES)
+ if [ ! -z "$K_GREP" ]; then
+ F_VALID="true"
+ break
+ fi
+ done
+ if [ "$F_VALID" = "false" ]; then
+ printf "WARNING: '%s' is not a valid Kconfig\n" "$F"
+ fi
+
+ T_FILE="$F_FILE.tmp"
+ grep "^#" $F_FILE > $T_FILE
+ echo " -----------------------" >> $T_FILE
+ echo " | arch |status|" >> $T_FILE
+ echo " -----------------------" >> $T_FILE
+ for ARCH_DIR in arch/*/; do
+ ARCH=$(echo $ARCH_DIR | sed -e 's/^arch//g' | sed -e 's/\///g')
+ K_FILES=$(find $ARCH_DIR -name "Kconfig*")
+ K_GREP=$(grep "$K" $K_FILES)
+ #
+ # Arch support status values for (O, K) are updated according
+ # to the following rules.
+ #
+ # - ("", K) is 'supported by a given arch', if there is a
+ # Kconfig file for that arch which contains K;
+ #
+ # - ("not", K) is 'supported by a given arch', if there is
+ # no Kconfig file for that arch which contains K;
+ #
+ # - otherwise: preserve the previous status value (if any),
+ # default to 'not yet supported'.
+ #
+ # Notice that, according these rules, invalid features may be
+ # updated/modified.
+ #
+ if [ "$O" = "" ] && [ ! -z "$K_GREP" ]; then
+ printf " |%12s: | ok |\n" "$ARCH" >> $T_FILE
+ elif [ "$O" = "not" ] && [ -z "$K_GREP" ]; then
+ printf " |%12s: | ok |\n" "$ARCH" >> $T_FILE
+ else
+ S=$(grep -v "^#" "$F_FILE" | grep " $ARCH:")
+ if [ ! -z "$S" ]; then
+ echo "$S" >> $T_FILE
+ else
+ printf " |%12s: | TODO |\n" "$ARCH" \
+ >> $T_FILE
+ fi
+ fi
+ done
+ echo " -----------------------" >> $T_FILE
+ mv $T_FILE $F_FILE
+done
diff --git a/tools/docs/find-unused-docs.sh b/tools/docs/find-unused-docs.sh
new file mode 100755
index 000000000000..05552dbda5bc
--- /dev/null
+++ b/tools/docs/find-unused-docs.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# (c) 2017, Jonathan Corbet <corbet@lwn.net>
+# sayli karnik <karniksayli1995@gmail.com>
+#
+# This script detects files with kernel-doc comments for exported functions
+# that are not included in documentation.
+#
+# usage: Run 'tools/docs/find-unused-docs.sh directory' from top level of kernel
+# tree.
+#
+# example: $tools/docs/find-unused-docs.sh drivers/scsi
+#
+# Licensed under the terms of the GNU GPL License
+
+if ! [ -d "Documentation" ]; then
+ echo "Run from top level of kernel tree"
+ exit 1
+fi
+
+if [ "$#" -ne 1 ]; then
+ echo "Usage: tools/docs/find-unused-docs.sh directory"
+ exit 1
+fi
+
+if ! [ -d "$1" ]; then
+ echo "Directory $1 doesn't exist"
+ exit 1
+fi
+
+cd "$( dirname "${BASH_SOURCE[0]}" )"
+cd ..
+
+cd Documentation/
+
+echo "The following files contain kerneldoc comments for exported functions \
+that are not used in the formatted documentation"
+
+# FILES INCLUDED
+
+files_included=($(grep -rHR ".. kernel-doc" --include \*.rst | cut -d " " -f 3))
+
+declare -A FILES_INCLUDED
+
+for each in "${files_included[@]}"; do
+ FILES_INCLUDED[$each]="$each"
+ done
+
+cd ..
+
+# FILES NOT INCLUDED
+
+for file in `find $1 -name '*.c'`; do
+
+ if [[ ${FILES_INCLUDED[$file]+_} ]]; then
+ continue;
+ fi
+ str=$(PYTHONDONTWRITEBYTECODE=1 scripts/kernel-doc -export "$file" 2>/dev/null)
+ if [[ -n "$str" ]]; then
+ echo "$file"
+ fi
+ done
+
diff --git a/tools/docs/get_abi.py b/tools/docs/get_abi.py
new file mode 100755
index 000000000000..2f0b99401f26
--- /dev/null
+++ b/tools/docs/get_abi.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python3
+# pylint: disable=R0903
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Parse ABI documentation and produce results from it.
+"""
+
+import argparse
+import logging
+import os
+import sys
+
+# Import Python modules
+
+LIB_DIR = "../lib/python"
+SRC_DIR = os.path.dirname(os.path.realpath(__file__))
+
+sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
+
+from abi.abi_parser import AbiParser # pylint: disable=C0413
+from abi.abi_regex import AbiRegex # pylint: disable=C0413
+from abi.helpers import ABI_DIR, DEBUG_HELP # pylint: disable=C0413
+from abi.system_symbols import SystemSymbols # pylint: disable=C0413
+
+# Command line classes
+
+
+REST_DESC = """
+Produce output in ReST format.
+
+The output is done on two sections:
+
+- Symbols: show all parsed symbols in alphabetic order;
+- Files: cross reference the content of each file with the symbols on it.
+"""
+
+class AbiRest:
+ """Initialize an argparse subparser for rest output"""
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("rest",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description=REST_DESC)
+
+ parser.add_argument("--enable-lineno", action="store_true",
+ help="enable lineno")
+ parser.add_argument("--raw", action="store_true",
+ help="output text as contained in the ABI files. "
+ "It not used, output will contain dynamically"
+ " generated cross references when possible.")
+ parser.add_argument("--no-file", action="store_true",
+ help="Don't the files section")
+ parser.add_argument("--show-hints", help="Show-hints")
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ parser = AbiParser(args.dir, debug=args.debug)
+ parser.parse_abi()
+ parser.check_issues()
+
+ for t in parser.doc(args.raw, not args.no_file):
+ if args.enable_lineno:
+ print (f".. LINENO {t[1]}#{t[2]}\n\n")
+
+ print(t[0])
+
+class AbiValidate:
+ """Initialize an argparse subparser for ABI validation"""
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("validate",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description="list events")
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ parser = AbiParser(args.dir, debug=args.debug)
+ parser.parse_abi()
+ parser.check_issues()
+
+
+class AbiSearch:
+ """Initialize an argparse subparser for ABI search"""
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("search",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description="Search ABI using a regular expression")
+
+ parser.add_argument("expression",
+ help="Case-insensitive search pattern for the ABI symbol")
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ parser = AbiParser(args.dir, debug=args.debug)
+ parser.parse_abi()
+ parser.search_symbols(args.expression)
+
+UNDEFINED_DESC="""
+Check undefined ABIs on local machine.
+
+Read sysfs devnodes and check if the devnodes there are defined inside
+ABI documentation.
+
+The search logic tries to minimize the number of regular expressions to
+search per each symbol.
+
+By default, it runs on a single CPU, as Python support for CPU threads
+is still experimental, and multi-process runs on Python is very slow.
+
+On experimental tests, if the number of ABI symbols to search per devnode
+is contained on a limit of ~150 regular expressions, using a single CPU
+is a lot faster than using multiple processes. However, if the number of
+regular expressions to check is at the order of ~30000, using multiple
+CPUs speeds up the check.
+"""
+
+class AbiUndefined:
+ """
+ Initialize an argparse subparser for logic to check undefined ABI at
+ the current machine's sysfs
+ """
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("undefined",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description=UNDEFINED_DESC)
+
+ parser.add_argument("-S", "--sysfs-dir", default="/sys",
+ help="directory where sysfs is mounted")
+ parser.add_argument("-s", "--search-string",
+ help="search string regular expression to limit symbol search")
+ parser.add_argument("-H", "--show-hints", action="store_true",
+ help="Hints about definitions for missing ABI symbols.")
+ parser.add_argument("-j", "--jobs", "--max-workers", type=int, default=1,
+ help="If bigger than one, enables multiprocessing.")
+ parser.add_argument("-c", "--max-chunk-size", type=int, default=50,
+ help="Maximum number of chunk size")
+ parser.add_argument("-f", "--found", action="store_true",
+ help="Also show found items. "
+ "Helpful to debug the parser."),
+ parser.add_argument("-d", "--dry-run", action="store_true",
+ help="Don't actually search for undefined. "
+ "Helpful to debug the parser."),
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ abi = AbiRegex(args.dir, debug=args.debug,
+ search_string=args.search_string)
+
+ abi_symbols = SystemSymbols(abi=abi, hints=args.show_hints,
+ sysfs=args.sysfs_dir)
+
+ abi_symbols.check_undefined_symbols(dry_run=args.dry_run,
+ found=args.found,
+ max_workers=args.jobs,
+ chunk_size=args.max_chunk_size)
+
+
+def main():
+ """Main program"""
+
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+
+ parser.add_argument("-d", "--debug", type=int, default=0, help="debug level")
+ parser.add_argument("-D", "--dir", default=ABI_DIR, help=DEBUG_HELP)
+
+ subparsers = parser.add_subparsers()
+
+ AbiRest(subparsers)
+ AbiValidate(subparsers)
+ AbiSearch(subparsers)
+ AbiUndefined(subparsers)
+
+ args = parser.parse_args()
+
+ if args.debug:
+ level = logging.DEBUG
+ else:
+ level = logging.INFO
+
+ logging.basicConfig(level=level, format="[%(levelname)s] %(message)s")
+
+ if "func" in args:
+ args.func(args)
+ else:
+ sys.exit(f"Please specify a valid command for {sys.argv[0]}")
+
+
+# Call main method
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/get_feat.py b/tools/docs/get_feat.py
new file mode 100755
index 000000000000..2b5155a1f134
--- /dev/null
+++ b/tools/docs/get_feat.py
@@ -0,0 +1,225 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0911,R0912,R0914,R0915
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+
+"""
+Parse the Linux Feature files and produce a ReST book.
+"""
+
+import argparse
+import os
+import subprocess
+import sys
+
+from pprint import pprint
+
+LIB_DIR = "../../tools/lib/python"
+SRC_DIR = os.path.dirname(os.path.realpath(__file__))
+
+sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
+
+from feat.parse_features import ParseFeature # pylint: disable=C0413
+
+SRCTREE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..")
+DEFAULT_DIR = "Documentation/features"
+
+
+class GetFeature:
+ """Helper class to parse feature parsing parameters"""
+
+ @staticmethod
+ def get_current_arch():
+ """Detects the current architecture"""
+
+ proc = subprocess.run(["uname", "-m"], check=True,
+ capture_output=True, text=True)
+
+ arch = proc.stdout.strip()
+ if arch in ["x86_64", "i386"]:
+ arch = "x86"
+ elif arch == "s390x":
+ arch = "s390"
+
+ return arch
+
+ def run_parser(self, args):
+ """Execute the feature parser"""
+
+ feat = ParseFeature(args.directory, args.debug, args.enable_fname)
+ data = feat.parse()
+
+ if args.debug > 2:
+ pprint(data)
+
+ return feat
+
+ def run_rest(self, args):
+ """
+ Generate tables in ReST format. Three types of tables are
+ supported, depending on the calling arguments:
+
+ - neither feature nor arch is passed: generates a full matrix;
+ - arch provided: generates a table of supported tables for the
+ guiven architecture, eventually filtered by feature;
+ - only feature provided: generates a table with feature details,
+ showing what architectures it is implemented.
+ """
+
+ feat = self.run_parser(args)
+
+ if args.arch:
+ rst = feat.output_arch_table(args.arch, args.feat)
+ elif args.feat:
+ rst = feat.output_feature(args.feat)
+ else:
+ rst = feat.output_matrix()
+
+ print(rst)
+
+ def run_current(self, args):
+ """
+ Instead of using a --arch parameter, get feature for the current
+ architecture.
+ """
+
+ args.arch = self.get_current_arch()
+
+ self.run_rest(args)
+
+ def run_list(self, args):
+ """
+ Generate a list of features for a given architecture, in a format
+ parseable by other scripts. The output format is not ReST.
+ """
+
+ if not args.arch:
+ args.arch = self.get_current_arch()
+
+ feat = self.run_parser(args)
+ msg = feat.list_arch_features(args.arch, args.feat)
+
+ print(msg)
+
+ def parse_arch(self, parser):
+ """Add a --arch parsing argument"""
+
+ parser.add_argument("--arch",
+ help="Output features for an specific"
+ " architecture, optionally filtering for a "
+ "single specific feature.")
+
+ def parse_feat(self, parser):
+ """Add a --feat parsing argument"""
+
+ parser.add_argument("--feat", "--feature",
+ help="Output features for a single specific "
+ "feature.")
+
+
+ def current_args(self, subparsers):
+ """Implementscurrent argparse subparser"""
+
+ parser = subparsers.add_parser("current",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Output table in ReST "
+ "compatible ASCII format "
+ "with features for this "
+ "machine's architecture")
+
+ self.parse_feat(parser)
+ parser.set_defaults(func=self.run_current)
+
+ def rest_args(self, subparsers):
+ """Implement rest argparse subparser"""
+
+ parser = subparsers.add_parser("rest",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Output table(s) in ReST "
+ "compatible ASCII format "
+ "with features in ReST "
+ "markup language. The "
+ "output is affected by "
+ "--arch or --feat/--feature"
+ " flags.")
+
+ self.parse_arch(parser)
+ self.parse_feat(parser)
+ parser.set_defaults(func=self.run_rest)
+
+ def list_args(self, subparsers):
+ """Implement list argparse subparser"""
+
+ parser = subparsers.add_parser("list",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="List features for this "
+ "machine's architecture, "
+ "using an easier to parse "
+ "format. The output is "
+ "affected by --arch flag.")
+
+ self.parse_arch(parser)
+ self.parse_feat(parser)
+ parser.set_defaults(func=self.run_list)
+
+ def validate_args(self, subparsers):
+ """Implement validate argparse subparser"""
+
+ parser = subparsers.add_parser("validate",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Validate the contents of "
+ "the files under "
+ f"{DEFAULT_DIR}.")
+
+ parser.set_defaults(func=self.run_parser)
+
+ def parser(self):
+ """
+ Create an arparse with common options and several subparsers
+ """
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+
+ parser.add_argument("-d", "--debug", action="count", default=0,
+ help="Put the script in verbose mode, useful for "
+ "debugging. Can be called multiple times, to "
+ "increase verbosity.")
+
+ parser.add_argument("--directory", "--dir", default=DEFAULT_DIR,
+ help="Changes the location of the Feature files. "
+ f"By default, it uses the {DEFAULT_DIR} "
+ "directory.")
+
+ parser.add_argument("--enable-fname", action="store_true",
+ help="Prints the file name of the feature files. "
+ "This can be used in order to track "
+ "dependencies during documentation build.")
+
+ subparsers = parser.add_subparsers()
+
+ self.current_args(subparsers)
+ self.rest_args(subparsers)
+ self.list_args(subparsers)
+ self.validate_args(subparsers)
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ """Main program"""
+
+ feat = GetFeature()
+
+ args = feat.parser()
+
+ if "func" in args:
+ args.func(args)
+ else:
+ sys.exit(f"Please specify a valid command for {sys.argv[0]}")
+
+
+# Call main method
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/list-arch.sh b/tools/docs/list-arch.sh
new file mode 100755
index 000000000000..96fe83b7058b
--- /dev/null
+++ b/tools/docs/list-arch.sh
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Small script that visualizes the kernel feature support status
+# of an architecture.
+#
+# (If no arguments are given then it will print the host architecture's status.)
+#
+
+ARCH=${1:-$(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/')}
+
+$(dirname $0)/get_feat.pl list --arch $ARCH
diff --git a/tools/docs/parse-headers.py b/tools/docs/parse-headers.py
index bfa4e46a53e3..436acea4c6ca 100755
--- a/tools/docs/parse-headers.py
+++ b/tools/docs/parse-headers.py
@@ -24,10 +24,13 @@ The optional ``FILE_RULES`` contains a set of rules like:
replace define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ :c:type:`v4l2_event_motion_det`
"""
-import argparse
+import argparse, sys
+import os.path
-from lib.parse_data_structs import ParseDataStructs
-from lib.enrich_formatter import EnrichFormatter
+src_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, os.path.join(src_dir, '../lib/python'))
+from kdoc.parse_data_structs import ParseDataStructs
+from kdoc.enrich_formatter import EnrichFormatter
def main():
"""Main function"""
@@ -47,10 +50,7 @@ def main():
args = parser.parse_args()
parser = ParseDataStructs(debug=args.debug)
- parser.parse_file(args.file_in)
-
- if args.file_rules:
- parser.process_exceptions(args.file_rules)
+ parser.parse_file(args.file_in, args.file_rules)
parser.debug_print()
parser.write_output(args.file_in, args.file_out, args.toc)
diff --git a/tools/docs/sphinx-build-wrapper b/tools/docs/sphinx-build-wrapper
new file mode 100755
index 000000000000..7a5fcef25429
--- /dev/null
+++ b/tools/docs/sphinx-build-wrapper
@@ -0,0 +1,864 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+#
+# pylint: disable=R0902, R0912, R0913, R0914, R0915, R0917, C0103
+#
+# Converted from docs Makefile and parallel-wrapper.sh, both under
+# GPLv2, copyrighted since 2008 by the following authors:
+#
+# Akira Yokosawa <akiyks@gmail.com>
+# Arnd Bergmann <arnd@arndb.de>
+# Breno Leitao <leitao@debian.org>
+# Carlos Bilbao <carlos.bilbao@amd.com>
+# Dave Young <dyoung@redhat.com>
+# Donald Hunter <donald.hunter@gmail.com>
+# Geert Uytterhoeven <geert+renesas@glider.be>
+# Jani Nikula <jani.nikula@intel.com>
+# Jan Stancek <jstancek@redhat.com>
+# Jonathan Corbet <corbet@lwn.net>
+# Joshua Clayton <stillcompiling@gmail.com>
+# Kees Cook <keescook@chromium.org>
+# Linus Torvalds <torvalds@linux-foundation.org>
+# Magnus Damm <damm+renesas@opensource.se>
+# Masahiro Yamada <masahiroy@kernel.org>
+# Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+# Maxim Cournoyer <maxim.cournoyer@gmail.com>
+# Peter Foley <pefoley2@pefoley.com>
+# Randy Dunlap <rdunlap@infradead.org>
+# Rob Herring <robh@kernel.org>
+# Shuah Khan <shuahkh@osg.samsung.com>
+# Thorsten Blum <thorsten.blum@toblux.com>
+# Tomas Winkler <tomas.winkler@intel.com>
+
+
+"""
+Sphinx build wrapper that handles Kernel-specific business rules:
+
+- it gets the Kernel build environment vars;
+- it determines what's the best parallelism;
+- it handles SPHINXDIRS
+
+This tool ensures that MIN_PYTHON_VERSION is satisfied. If version is
+below that, it seeks for a new Python version. If found, it re-runs using
+the newer version.
+"""
+
+import argparse
+import locale
+import os
+import re
+import shlex
+import shutil
+import subprocess
+import sys
+
+from concurrent import futures
+from glob import glob
+
+
+LIB_DIR = "../lib/python"
+SRC_DIR = os.path.dirname(os.path.realpath(__file__))
+
+sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
+
+from kdoc.python_version import PythonVersion
+from kdoc.latex_fonts import LatexFontChecker
+from jobserver import JobserverExec # pylint: disable=C0413,C0411,E0401
+
+#
+# Some constants
+#
+VENV_DEFAULT = "sphinx_latest"
+MIN_PYTHON_VERSION = PythonVersion("3.7").version
+PAPER = ["", "a4", "letter"]
+
+TARGETS = {
+ "cleandocs": { "builder": "clean" },
+ "linkcheckdocs": { "builder": "linkcheck" },
+ "htmldocs": { "builder": "html" },
+ "epubdocs": { "builder": "epub", "out_dir": "epub" },
+ "texinfodocs": { "builder": "texinfo", "out_dir": "texinfo" },
+ "infodocs": { "builder": "texinfo", "out_dir": "texinfo" },
+ "mandocs": { "builder": "man", "out_dir": "man" },
+ "latexdocs": { "builder": "latex", "out_dir": "latex" },
+ "pdfdocs": { "builder": "latex", "out_dir": "latex" },
+ "xmldocs": { "builder": "xml", "out_dir": "xml" },
+}
+
+
+#
+# SphinxBuilder class
+#
+
+class SphinxBuilder:
+ """
+ Handles a sphinx-build target, adding needed arguments to build
+ with the Kernel.
+ """
+
+ def get_path(self, path, use_cwd=False, abs_path=False):
+ """
+ Ancillary routine to handle patches the right way, as shell does.
+
+ It first expands "~" and "~user". Then, if patch is not absolute,
+ join self.srctree. Finally, if requested, convert to abspath.
+ """
+
+ path = os.path.expanduser(path)
+ if not path.startswith("/"):
+ if use_cwd:
+ base = os.getcwd()
+ else:
+ base = self.srctree
+
+ path = os.path.join(base, path)
+
+ if abs_path:
+ return os.path.abspath(path)
+
+ return path
+
+ def check_rust(self):
+ """
+ Checks if Rust is enabled
+ """
+ self.rustdoc = False
+
+ config = os.path.join(self.srctree, ".config")
+
+ if not os.path.isfile(config):
+ return
+
+ re_rust = re.compile(r"CONFIG_RUST=(m|y)")
+
+ try:
+ with open(config, "r", encoding="utf-8") as fp:
+ for line in fp:
+ if re_rust.match(line):
+ self.rustdoc = True
+ return
+
+ except OSError as e:
+ print(f"Failed to open {config}", file=sys.stderr)
+
+ def get_sphinx_extra_opts(self, n_jobs):
+ """
+ Get the number of jobs to be used for docs build passed via command
+ line and desired sphinx verbosity.
+
+ The number of jobs can be on different places:
+
+ 1) It can be passed via "-j" argument;
+ 2) The SPHINXOPTS="-j8" env var may have "-j";
+ 3) if called via GNU make, -j specifies the desired number of jobs.
+ with GNU makefile, this number is available via POSIX jobserver;
+ 4) if none of the above is available, it should default to "-jauto",
+ and let sphinx decide the best value.
+ """
+
+ #
+ # SPHINXOPTS env var, if used, contains extra arguments to be used
+ # by sphinx-build time. Among them, it may contain sphinx verbosity
+ # and desired number of parallel jobs.
+ #
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-j', '--jobs', type=int)
+ parser.add_argument('-q', '--quiet', action='store_true')
+
+ #
+ # Other sphinx-build arguments go as-is, so place them
+ # at self.sphinxopts, using shell parser
+ #
+ sphinxopts = shlex.split(os.environ.get("SPHINXOPTS", ""))
+
+ #
+ # Build a list of sphinx args, honoring verbosity here if specified
+ #
+
+ verbose = self.verbose
+ sphinx_args, self.sphinxopts = parser.parse_known_args(sphinxopts)
+ if sphinx_args.quiet is True:
+ verbose = False
+
+ #
+ # If the user explicitly sets "-j" at command line, use it.
+ # Otherwise, pick it from SPHINXOPTS args
+ #
+ if n_jobs:
+ self.n_jobs = n_jobs
+ elif sphinx_args.jobs:
+ self.n_jobs = sphinx_args.jobs
+ else:
+ self.n_jobs = None
+
+ if not verbose:
+ self.sphinxopts += ["-q"]
+
+ def __init__(self, builddir, venv=None, verbose=False, n_jobs=None,
+ interactive=None):
+ """Initialize internal variables"""
+ self.venv = venv
+ self.verbose = None
+
+ #
+ # Normal variables passed from Kernel's makefile
+ #
+ self.kernelversion = os.environ.get("KERNELVERSION", "unknown")
+ self.kernelrelease = os.environ.get("KERNELRELEASE", "unknown")
+ self.pdflatex = os.environ.get("PDFLATEX", "xelatex")
+
+ #
+ # Kernel main Makefile defines a PYTHON3 variable whose default is
+ # "python3". When set to a different value, it allows running a
+ # diferent version than the default official python3 package.
+ # Several distros package python3xx-sphinx packages with newer
+ # versions of Python and sphinx-build.
+ #
+ # Honor such variable different than default
+ #
+ self.python = os.environ.get("PYTHON3")
+ if self.python == "python3":
+ self.python = None
+
+ if not interactive:
+ self.latexopts = os.environ.get("LATEXOPTS", "-interaction=batchmode -no-shell-escape")
+ else:
+ self.latexopts = os.environ.get("LATEXOPTS", "")
+
+ if not verbose:
+ verbose = bool(os.environ.get("KBUILD_VERBOSE", "") != "")
+
+ if verbose is not None:
+ self.verbose = verbose
+
+ #
+ # Source tree directory. This needs to be at os.environ, as
+ # Sphinx extensions use it
+ #
+ self.srctree = os.environ.get("srctree")
+ if not self.srctree:
+ self.srctree = "."
+ os.environ["srctree"] = self.srctree
+
+ #
+ # Now that we can expand srctree, get other directories as well
+ #
+ self.sphinxbuild = os.environ.get("SPHINXBUILD", "sphinx-build")
+ self.kerneldoc = self.get_path(os.environ.get("KERNELDOC",
+ "scripts/kernel-doc.py"))
+ self.builddir = self.get_path(builddir, use_cwd=True, abs_path=True)
+
+ #
+ # Get directory locations for LaTeX build toolchain
+ #
+ self.pdflatex_cmd = shutil.which(self.pdflatex)
+ self.latexmk_cmd = shutil.which("latexmk")
+
+ self.env = os.environ.copy()
+
+ self.get_sphinx_extra_opts(n_jobs)
+
+ self.check_rust()
+
+ #
+ # If venv command line argument is specified, run Sphinx from venv
+ #
+ if venv:
+ bin_dir = os.path.join(venv, "bin")
+ if not os.path.isfile(os.path.join(bin_dir, "activate")):
+ sys.exit(f"Venv {venv} not found.")
+
+ # "activate" virtual env
+ self.env["PATH"] = bin_dir + ":" + self.env["PATH"]
+ self.env["VIRTUAL_ENV"] = venv
+ if "PYTHONHOME" in self.env:
+ del self.env["PYTHONHOME"]
+ print(f"Setting venv to {venv}")
+
+ def run_sphinx(self, sphinx_build, build_args, *args, **pwargs):
+ """
+ Executes sphinx-build using current python3 command.
+
+ When calling via GNU make, POSIX jobserver is used to tell how
+ many jobs are still available from a job pool. claim all remaining
+ jobs, as we don't want sphinx-build to run in parallel with other
+ jobs.
+
+ Despite that, the user may actually force a different value than
+ the number of available jobs via command line.
+
+ The "with" logic here is used to ensure that the claimed jobs will
+ be freed once subprocess finishes
+ """
+
+ with JobserverExec() as jobserver:
+ if jobserver.claim:
+ #
+ # when GNU make is used, claim available jobs from jobserver
+ #
+ n_jobs = str(jobserver.claim)
+ else:
+ #
+ # Otherwise, let sphinx decide by default
+ #
+ n_jobs = "auto"
+
+ #
+ # If explicitly requested via command line, override default
+ #
+ if self.n_jobs:
+ n_jobs = str(self.n_jobs)
+
+ #
+ # We can't simply call python3 sphinx-build, as OpenSUSE
+ # Tumbleweed uses an ELF binary file (/usr/bin/alts) to switch
+ # between different versions of sphinx-build. So, only call it
+ # prepending "python3.xx" when PYTHON3 variable is not default.
+ #
+ if self.python:
+ cmd = [self.python]
+ else:
+ cmd = []
+
+ cmd += [sphinx_build]
+ cmd += [f"-j{n_jobs}"]
+ cmd += build_args
+ cmd += self.sphinxopts
+
+ if self.verbose:
+ print(" ".join(cmd))
+
+ return subprocess.call(cmd, *args, **pwargs)
+
+ def handle_html(self, css, output_dir):
+ """
+ Extra steps for HTML and epub output.
+
+ For such targets, we need to ensure that CSS will be properly
+ copied to the output _static directory
+ """
+
+ if css:
+ css = os.path.expanduser(css)
+ if not css.startswith("/"):
+ css = os.path.join(self.srctree, css)
+
+ static_dir = os.path.join(output_dir, "_static")
+ os.makedirs(static_dir, exist_ok=True)
+
+ try:
+ shutil.copy2(css, static_dir)
+ except (OSError, IOError) as e:
+ print(f"Warning: Failed to copy CSS: {e}", file=sys.stderr)
+
+ if self.rustdoc:
+ print("Building rust docs")
+ if "MAKE" in self.env:
+ cmd = [self.env["MAKE"]]
+ else:
+ cmd = ["make", "LLVM=1"]
+
+ cmd += [ "rustdoc"]
+ if self.verbose:
+ print(" ".join(cmd))
+
+ try:
+ subprocess.run(cmd, check=True)
+ except subprocess.CalledProcessError as e:
+ print(f"Ignored errors when building rustdoc: {e}. Is RUST enabled?",
+ file=sys.stderr)
+
+ def build_pdf_file(self, latex_cmd, from_dir, path):
+ """Builds a single pdf file using latex_cmd"""
+ try:
+ subprocess.run(latex_cmd + [path],
+ cwd=from_dir, check=True, env=self.env)
+
+ return True
+ except subprocess.CalledProcessError:
+ return False
+
+ def pdf_parallel_build(self, tex_suffix, latex_cmd, tex_files, n_jobs):
+ """Build PDF files in parallel if possible"""
+ builds = {}
+ build_failed = False
+ max_len = 0
+ has_tex = False
+
+ #
+ # LaTeX PDF error code is almost useless for us:
+ # any warning makes it non-zero. For kernel doc builds it always return
+ # non-zero even when build succeeds. So, let's do the best next thing:
+ # Ignore build errors. At the end, check if all PDF files were built,
+ # printing a summary with the built ones and returning 0 if all of
+ # them were actually built.
+ #
+ with futures.ThreadPoolExecutor(max_workers=n_jobs) as executor:
+ jobs = {}
+
+ for from_dir, pdf_dir, entry in tex_files:
+ name = entry.name
+
+ if not name.endswith(tex_suffix):
+ continue
+
+ name = name[:-len(tex_suffix)]
+ has_tex = True
+
+ future = executor.submit(self.build_pdf_file, latex_cmd,
+ from_dir, entry.path)
+ jobs[future] = (from_dir, pdf_dir, name)
+
+ for future in futures.as_completed(jobs):
+ from_dir, pdf_dir, name = jobs[future]
+
+ pdf_name = name + ".pdf"
+ pdf_from = os.path.join(from_dir, pdf_name)
+ pdf_to = os.path.join(pdf_dir, pdf_name)
+ out_name = os.path.relpath(pdf_to, self.builddir)
+ max_len = max(max_len, len(out_name))
+
+ try:
+ success = future.result()
+
+ if success and os.path.exists(pdf_from):
+ os.rename(pdf_from, pdf_to)
+
+ #
+ # if verbose, get the name of built PDF file
+ #
+ if self.verbose:
+ builds[out_name] = "SUCCESS"
+ else:
+ builds[out_name] = "FAILED"
+ build_failed = True
+ except futures.Error as e:
+ builds[out_name] = f"FAILED ({repr(e)})"
+ build_failed = True
+
+ #
+ # Handle case where no .tex files were found
+ #
+ if not has_tex:
+ out_name = "LaTeX files"
+ max_len = max(max_len, len(out_name))
+ builds[out_name] = "FAILED: no .tex files were generated"
+ build_failed = True
+
+ return builds, build_failed, max_len
+
+ def handle_pdf(self, output_dirs, deny_vf):
+ """
+ Extra steps for PDF output.
+
+ As PDF is handled via a LaTeX output, after building the .tex file,
+ a new build is needed to create the PDF output from the latex
+ directory.
+ """
+ builds = {}
+ max_len = 0
+ tex_suffix = ".tex"
+ tex_files = []
+
+ #
+ # Since early 2024, Fedora and openSUSE tumbleweed have started
+ # deploying variable-font format of "Noto CJK", causing LaTeX
+ # to break with CJK. Work around it, by denying the variable font
+ # usage during xelatex build by passing the location of a config
+ # file with a deny list.
+ #
+ # See tools/docs/lib/latex_fonts.py for more details.
+ #
+ if deny_vf:
+ deny_vf = os.path.expanduser(deny_vf)
+ if os.path.isdir(deny_vf):
+ self.env["XDG_CONFIG_HOME"] = deny_vf
+
+ for from_dir in output_dirs:
+ pdf_dir = os.path.join(from_dir, "../pdf")
+ os.makedirs(pdf_dir, exist_ok=True)
+
+ if self.latexmk_cmd:
+ latex_cmd = [self.latexmk_cmd, f"-{self.pdflatex}"]
+ else:
+ latex_cmd = [self.pdflatex]
+
+ latex_cmd.extend(shlex.split(self.latexopts))
+
+ # Get a list of tex files to process
+ with os.scandir(from_dir) as it:
+ for entry in it:
+ if entry.name.endswith(tex_suffix):
+ tex_files.append((from_dir, pdf_dir, entry))
+
+ #
+ # When using make, this won't be used, as the number of jobs comes
+ # from POSIX jobserver. So, this covers the case where build comes
+ # from command line. On such case, serialize by default, except if
+ # the user explicitly sets the number of jobs.
+ #
+ n_jobs = 1
+
+ # n_jobs is either an integer or "auto". Only use it if it is a number
+ if self.n_jobs:
+ try:
+ n_jobs = int(self.n_jobs)
+ except ValueError:
+ pass
+
+ #
+ # When using make, jobserver.claim is the number of jobs that were
+ # used with "-j" and that aren't used by other make targets
+ #
+ with JobserverExec() as jobserver:
+ n_jobs = 1
+
+ #
+ # Handle the case when a parameter is passed via command line,
+ # using it as default, if jobserver doesn't claim anything
+ #
+ if self.n_jobs:
+ try:
+ n_jobs = int(self.n_jobs)
+ except ValueError:
+ pass
+
+ if jobserver.claim:
+ n_jobs = jobserver.claim
+
+ builds, build_failed, max_len = self.pdf_parallel_build(tex_suffix,
+ latex_cmd,
+ tex_files,
+ n_jobs)
+
+ #
+ # In verbose mode, print a summary with the build results per file.
+ # Otherwise, print a single line with all failures, if any.
+ # On both cases, return code 1 indicates build failures,
+ #
+ if self.verbose:
+ msg = "Summary"
+ msg += "\n" + "=" * len(msg)
+ print()
+ print(msg)
+
+ for pdf_name, pdf_file in builds.items():
+ print(f"{pdf_name:<{max_len}}: {pdf_file}")
+
+ print()
+ if build_failed:
+ msg = LatexFontChecker().check()
+ if msg:
+ print(msg)
+
+ sys.exit("Error: not all PDF files were created.")
+
+ elif build_failed:
+ n_failures = len(builds)
+ failures = ", ".join(builds.keys())
+
+ msg = LatexFontChecker().check()
+ if msg:
+ print(msg)
+
+ sys.exit(f"Error: Can't build {n_failures} PDF file(s): {failures}")
+
+ def handle_info(self, output_dirs):
+ """
+ Extra steps for Info output.
+
+ For texinfo generation, an additional make is needed from the
+ texinfo directory.
+ """
+
+ for output_dir in output_dirs:
+ try:
+ subprocess.run(["make", "info"], cwd=output_dir, check=True)
+ except subprocess.CalledProcessError as e:
+ sys.exit(f"Error generating info docs: {e}")
+
+ def handle_man(self, kerneldoc, docs_dir, src_dir, output_dir):
+ """
+ Create man pages from kernel-doc output
+ """
+
+ re_kernel_doc = re.compile(r"^\.\.\s+kernel-doc::\s*(\S+)")
+ re_man = re.compile(r'^\.TH "[^"]*" (\d+) "([^"]*)"')
+
+ if docs_dir == src_dir:
+ #
+ # Pick the entire set of kernel-doc markups from the entire tree
+ #
+ kdoc_files = set([self.srctree])
+ else:
+ kdoc_files = set()
+
+ for fname in glob(os.path.join(src_dir, "**"), recursive=True):
+ if os.path.isfile(fname) and fname.endswith(".rst"):
+ with open(fname, "r", encoding="utf-8") as in_fp:
+ data = in_fp.read()
+
+ for line in data.split("\n"):
+ match = re_kernel_doc.match(line)
+ if match:
+ if os.path.isfile(match.group(1)):
+ kdoc_files.add(match.group(1))
+
+ if not kdoc_files:
+ sys.exit(f"Directory {src_dir} doesn't contain kernel-doc tags")
+
+ cmd = [ kerneldoc, "-m" ] + sorted(kdoc_files)
+ try:
+ if self.verbose:
+ print(" ".join(cmd))
+
+ result = subprocess.run(cmd, stdout=subprocess.PIPE, text= True)
+
+ if result.returncode:
+ print(f"Warning: kernel-doc returned {result.returncode} warnings")
+
+ except (OSError, ValueError, subprocess.SubprocessError) as e:
+ sys.exit(f"Failed to create man pages for {src_dir}: {repr(e)}")
+
+ fp = None
+ try:
+ for line in result.stdout.split("\n"):
+ match = re_man.match(line)
+ if not match:
+ if fp:
+ fp.write(line + '\n')
+ continue
+
+ if fp:
+ fp.close()
+
+ fname = f"{output_dir}/{match.group(2)}.{match.group(1)}"
+
+ if self.verbose:
+ print(f"Creating {fname}")
+ fp = open(fname, "w", encoding="utf-8")
+ fp.write(line + '\n')
+ finally:
+ if fp:
+ fp.close()
+
+ def cleandocs(self, builder): # pylint: disable=W0613
+ """Remove documentation output directory"""
+ shutil.rmtree(self.builddir, ignore_errors=True)
+
+ def build(self, target, sphinxdirs=None,
+ theme=None, css=None, paper=None, deny_vf=None,
+ skip_sphinx=False):
+ """
+ Build documentation using Sphinx. This is the core function of this
+ module. It prepares all arguments required by sphinx-build.
+ """
+
+ builder = TARGETS[target]["builder"]
+ out_dir = TARGETS[target].get("out_dir", "")
+
+ #
+ # Cleandocs doesn't require sphinx-build
+ #
+ if target == "cleandocs":
+ self.cleandocs(builder)
+ return
+
+ if theme:
+ os.environ["DOCS_THEME"] = theme
+
+ #
+ # Other targets require sphinx-build, so check if it exists
+ #
+ if not skip_sphinx:
+ sphinxbuild = shutil.which(self.sphinxbuild, path=self.env["PATH"])
+ if not sphinxbuild and target != "mandocs":
+ sys.exit(f"Error: {self.sphinxbuild} not found in PATH.\n")
+
+ if target == "pdfdocs":
+ if not self.pdflatex_cmd and not self.latexmk_cmd:
+ sys.exit("Error: pdflatex or latexmk required for PDF generation")
+
+ docs_dir = os.path.abspath(os.path.join(self.srctree, "Documentation"))
+
+ #
+ # Fill in base arguments for Sphinx build
+ #
+ kerneldoc = self.kerneldoc
+ if kerneldoc.startswith(self.srctree):
+ kerneldoc = os.path.relpath(kerneldoc, self.srctree)
+
+ args = [ "-b", builder, "-c", docs_dir ]
+
+ if builder == "latex":
+ if not paper:
+ paper = PAPER[1]
+
+ args.extend(["-D", f"latex_elements.papersize={paper}paper"])
+
+ if self.rustdoc:
+ args.extend(["-t", "rustdoc"])
+
+ if not sphinxdirs:
+ sphinxdirs = os.environ.get("SPHINXDIRS", ".")
+
+ #
+ # The sphinx-build tool has a bug: internally, it tries to set
+ # locale with locale.setlocale(locale.LC_ALL, ''). This causes a
+ # crash if language is not set. Detect and fix it.
+ #
+ try:
+ locale.setlocale(locale.LC_ALL, '')
+ except locale.Error:
+ self.env["LC_ALL"] = "C"
+
+ #
+ # sphinxdirs can be a list or a whitespace-separated string
+ #
+ sphinxdirs_list = []
+ for sphinxdir in sphinxdirs:
+ if isinstance(sphinxdir, list):
+ sphinxdirs_list += sphinxdir
+ else:
+ sphinxdirs_list += sphinxdir.split()
+
+ #
+ # Step 1: Build each directory in separate.
+ #
+ # This is not the best way of handling it, as cross-references between
+ # them will be broken, but this is what we've been doing since
+ # the beginning.
+ #
+ output_dirs = []
+ for sphinxdir in sphinxdirs_list:
+ src_dir = os.path.join(docs_dir, sphinxdir)
+ doctree_dir = os.path.join(self.builddir, ".doctrees")
+ output_dir = os.path.join(self.builddir, sphinxdir, out_dir)
+
+ #
+ # Make directory names canonical
+ #
+ src_dir = os.path.normpath(src_dir)
+ doctree_dir = os.path.normpath(doctree_dir)
+ output_dir = os.path.normpath(output_dir)
+
+ os.makedirs(doctree_dir, exist_ok=True)
+ os.makedirs(output_dir, exist_ok=True)
+
+ output_dirs.append(output_dir)
+
+ build_args = args + [
+ "-d", doctree_dir,
+ "-D", f"kerneldoc_bin={kerneldoc}",
+ "-D", f"version={self.kernelversion}",
+ "-D", f"release={self.kernelrelease}",
+ "-D", f"kerneldoc_srctree={self.srctree}",
+ src_dir,
+ output_dir,
+ ]
+
+ if target == "mandocs":
+ self.handle_man(kerneldoc, docs_dir, src_dir, output_dir)
+ elif not skip_sphinx:
+ try:
+ result = self.run_sphinx(sphinxbuild, build_args,
+ env=self.env)
+
+ if result:
+ sys.exit(f"Build failed: return code: {result}")
+
+ except (OSError, ValueError, subprocess.SubprocessError) as e:
+ sys.exit(f"Build failed: {repr(e)}")
+
+ #
+ # Ensure that each html/epub output will have needed static files
+ #
+ if target in ["htmldocs", "epubdocs"]:
+ self.handle_html(css, output_dir)
+
+ #
+ # Step 2: Some targets (PDF and info) require an extra step once
+ # sphinx-build finishes
+ #
+ if target == "pdfdocs":
+ self.handle_pdf(output_dirs, deny_vf)
+ elif target == "infodocs":
+ self.handle_info(output_dirs)
+
+def jobs_type(value):
+ """
+ Handle valid values for -j. Accepts Sphinx "-jauto", plus a number
+ equal or bigger than one.
+ """
+ if value is None:
+ return None
+
+ if value.lower() == 'auto':
+ return value.lower()
+
+ try:
+ if int(value) >= 1:
+ return value
+
+ raise argparse.ArgumentTypeError(f"Minimum jobs is 1, got {value}")
+ except ValueError:
+ raise argparse.ArgumentTypeError(f"Must be 'auto' or positive integer, got {value}") # pylint: disable=W0707
+
+def main():
+ """
+ Main function. The only mandatory argument is the target. If not
+ specified, the other arguments will use default values if not
+ specified at os.environ.
+ """
+ parser = argparse.ArgumentParser(description="Kernel documentation builder")
+
+ parser.add_argument("target", choices=list(TARGETS.keys()),
+ help="Documentation target to build")
+ parser.add_argument("--sphinxdirs", nargs="+",
+ help="Specific directories to build")
+ parser.add_argument("--builddir", default="output",
+ help="Sphinx configuration file")
+
+ parser.add_argument("--theme", help="Sphinx theme to use")
+
+ parser.add_argument("--css", help="Custom CSS file for HTML/EPUB")
+
+ parser.add_argument("--paper", choices=PAPER, default=PAPER[0],
+ help="Paper size for LaTeX/PDF output")
+
+ parser.add_argument('--deny-vf',
+ help="Configuration to deny variable fonts on pdf builds")
+
+ parser.add_argument("-v", "--verbose", action='store_true',
+ help="place build in verbose mode")
+
+ parser.add_argument('-j', '--jobs', type=jobs_type,
+ help="Sets number of jobs to use with sphinx-build")
+
+ parser.add_argument('-i', '--interactive', action='store_true',
+ help="Change latex default to run in interactive mode")
+
+ parser.add_argument('-s', '--skip-sphinx-build', action='store_true',
+ help="Skip sphinx-build step")
+
+ parser.add_argument("-V", "--venv", nargs='?', const=f'{VENV_DEFAULT}',
+ default=None,
+ help=f'If used, run Sphinx from a venv dir (default dir: {VENV_DEFAULT})')
+
+ args = parser.parse_args()
+
+ PythonVersion.check_python(MIN_PYTHON_VERSION, show_alternatives=True,
+ bail_out=True)
+
+ builder = SphinxBuilder(builddir=args.builddir, venv=args.venv,
+ verbose=args.verbose, n_jobs=args.jobs,
+ interactive=args.interactive)
+
+ builder.build(args.target, sphinxdirs=args.sphinxdirs,
+ theme=args.theme, css=args.css, paper=args.paper,
+ deny_vf=args.deny_vf,
+ skip_sphinx=args.skip_sphinx_build)
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/sphinx-pre-install b/tools/docs/sphinx-pre-install
new file mode 100755
index 000000000000..965c9b093a41
--- /dev/null
+++ b/tools/docs/sphinx-pre-install
@@ -0,0 +1,1543 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2017-2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+#
+# pylint: disable=C0103,C0114,C0115,C0116,C0301,C0302
+# pylint: disable=R0902,R0904,R0911,R0912,R0914,R0915,R1705,R1710,E1121
+
+# Note: this script requires at least Python 3.6 to run.
+# Don't add changes not compatible with it, it is meant to report
+# incompatible python versions.
+
+"""
+Dependency checker for Sphinx documentation Kernel build.
+
+This module provides tools to check for all required dependencies needed to
+build documentation using Sphinx, including system packages, Python modules
+and LaTeX packages for PDF generation.
+
+It detect packages for a subset of Linux distributions used by Kernel
+maintainers, showing hints and missing dependencies.
+
+The main class SphinxDependencyChecker handles the dependency checking logic
+and provides recommendations for installing missing packages. It supports both
+system package installations and Python virtual environments. By default,
+system pacage install is recommended.
+"""
+
+import argparse
+import locale
+import os
+import re
+import subprocess
+import sys
+from glob import glob
+import os.path
+
+src_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, os.path.join(src_dir, '../lib/python'))
+from kdoc.python_version import PythonVersion
+
+RECOMMENDED_VERSION = PythonVersion("3.4.3").version
+MIN_PYTHON_VERSION = PythonVersion("3.7").version
+
+
+class DepManager:
+ """
+ Manage package dependencies. There are three types of dependencies:
+
+ - System: dependencies required for docs build;
+ - Python: python dependencies for a native distro Sphinx install;
+ - PDF: dependencies needed by PDF builds.
+
+ Each dependency can be mandatory or optional. Not installing an optional
+ dependency won't break the build, but will cause degradation at the
+ docs output.
+ """
+
+ # Internal types of dependencies. Don't use them outside DepManager class.
+ _SYS_TYPE = 0
+ _PHY_TYPE = 1
+ _PDF_TYPE = 2
+
+ # Dependencies visible outside the class.
+ # The keys are tuple with: (type, is_mandatory flag).
+ #
+ # Currently we're not using all optional dep types. Yet, we'll keep all
+ # possible combinations here. They're not many, and that makes easier
+ # if later needed and for the name() method below
+
+ SYSTEM_MANDATORY = (_SYS_TYPE, True)
+ PYTHON_MANDATORY = (_PHY_TYPE, True)
+ PDF_MANDATORY = (_PDF_TYPE, True)
+
+ SYSTEM_OPTIONAL = (_SYS_TYPE, False)
+ PYTHON_OPTIONAL = (_PHY_TYPE, False)
+ PDF_OPTIONAL = (_PDF_TYPE, True)
+
+ def __init__(self, pdf):
+ """
+ Initialize internal vars:
+
+ - missing: missing dependencies list, containing a distro-independent
+ name for a missing dependency and its type.
+ - missing_pkg: ancillary dict containing missing dependencies in
+ distro namespace, organized by type.
+ - need: total number of needed dependencies. Never cleaned.
+ - optional: total number of optional dependencies. Never cleaned.
+ - pdf: Is PDF support enabled?
+ """
+ self.missing = {}
+ self.missing_pkg = {}
+ self.need = 0
+ self.optional = 0
+ self.pdf = pdf
+
+ @staticmethod
+ def name(dtype):
+ """
+ Ancillary routine to output a warn/error message reporting
+ missing dependencies.
+ """
+ if dtype[0] == DepManager._SYS_TYPE:
+ msg = "build"
+ elif dtype[0] == DepManager._PHY_TYPE:
+ msg = "Python"
+ else:
+ msg = "PDF"
+
+ if dtype[1]:
+ return f"ERROR: {msg} mandatory deps missing"
+ else:
+ return f"Warning: {msg} optional deps missing"
+
+ @staticmethod
+ def is_optional(dtype):
+ """Ancillary routine to report if a dependency is optional"""
+ return not dtype[1]
+
+ @staticmethod
+ def is_pdf(dtype):
+ """Ancillary routine to report if a dependency is for PDF generation"""
+ if dtype[0] == DepManager._PDF_TYPE:
+ return True
+
+ return False
+
+ def add_package(self, package, dtype):
+ """
+ Add a package at the self.missing() dictionary.
+ Doesn't update missing_pkg.
+ """
+ is_optional = DepManager.is_optional(dtype)
+ self.missing[package] = dtype
+ if is_optional:
+ self.optional += 1
+ else:
+ self.need += 1
+
+ def del_package(self, package):
+ """
+ Remove a package at the self.missing() dictionary.
+ Doesn't update missing_pkg.
+ """
+ if package in self.missing:
+ del self.missing[package]
+
+ def clear_deps(self):
+ """
+ Clear dependencies without changing needed/optional.
+
+ This is an ackward way to have a separate section to recommend
+ a package after system main dependencies.
+
+ TODO: rework the logic to prevent needing it.
+ """
+
+ self.missing = {}
+ self.missing_pkg = {}
+
+ def check_missing(self, progs):
+ """
+ Update self.missing_pkg, using progs dict to convert from the
+ agnostic package name to distro-specific one.
+
+ Returns an string with the packages to be installed, sorted and
+ with eventual duplicates removed.
+ """
+
+ self.missing_pkg = {}
+
+ for prog, dtype in sorted(self.missing.items()):
+ # At least on some LTS distros like CentOS 7, texlive doesn't
+ # provide all packages we need. When such distros are
+ # detected, we have to disable PDF output.
+ #
+ # So, we need to ignore the packages that distros would
+ # need for LaTeX to work
+ if DepManager.is_pdf(dtype) and not self.pdf:
+ self.optional -= 1
+ continue
+
+ if not dtype in self.missing_pkg:
+ self.missing_pkg[dtype] = []
+
+ self.missing_pkg[dtype].append(progs.get(prog, prog))
+
+ install = []
+ for dtype, pkgs in self.missing_pkg.items():
+ install += pkgs
+
+ return " ".join(sorted(set(install)))
+
+ def warn_install(self):
+ """
+ Emit warnings/errors related to missing packages.
+ """
+
+ output_msg = ""
+
+ for dtype in sorted(self.missing_pkg.keys()):
+ progs = " ".join(sorted(set(self.missing_pkg[dtype])))
+
+ try:
+ name = DepManager.name(dtype)
+ output_msg += f'{name}:\t{progs}\n'
+ except KeyError:
+ raise KeyError(f"ERROR!!!: invalid dtype for {progs}: {dtype}")
+
+ if output_msg:
+ print(f"\n{output_msg}")
+
+class AncillaryMethods:
+ """
+ Ancillary methods that checks for missing dependencies for different
+ types of types, like binaries, python modules, rpm deps, etc.
+ """
+
+ @staticmethod
+ def which(prog):
+ """
+ Our own implementation of which(). We could instead use
+ shutil.which(), but this function is simple enough.
+ Probably faster to use this implementation than to import shutil.
+ """
+ for path in os.environ.get("PATH", "").split(":"):
+ full_path = os.path.join(path, prog)
+ if os.access(full_path, os.X_OK):
+ return full_path
+
+ return None
+
+ @staticmethod
+ def run(*args, **kwargs):
+ """
+ Excecute a command, hiding its output by default.
+ Preserve compatibility with older Python versions.
+ """
+
+ capture_output = kwargs.pop('capture_output', False)
+
+ if capture_output:
+ if 'stdout' not in kwargs:
+ kwargs['stdout'] = subprocess.PIPE
+ if 'stderr' not in kwargs:
+ kwargs['stderr'] = subprocess.PIPE
+ else:
+ if 'stdout' not in kwargs:
+ kwargs['stdout'] = subprocess.DEVNULL
+ if 'stderr' not in kwargs:
+ kwargs['stderr'] = subprocess.DEVNULL
+
+ # Don't break with older Python versions
+ if 'text' in kwargs and sys.version_info < (3, 7):
+ kwargs['universal_newlines'] = kwargs.pop('text')
+
+ return subprocess.run(*args, **kwargs)
+
+class MissingCheckers(AncillaryMethods):
+ """
+ Contains some ancillary checkers for different types of binaries and
+ package managers.
+ """
+
+ def __init__(self, args, texlive):
+ """
+ Initialize its internal variables
+ """
+ self.pdf = args.pdf
+ self.virtualenv = args.virtualenv
+ self.version_check = args.version_check
+ self.texlive = texlive
+
+ self.min_version = (0, 0, 0)
+ self.cur_version = (0, 0, 0)
+
+ self.deps = DepManager(self.pdf)
+
+ self.need_symlink = 0
+ self.need_sphinx = 0
+
+ self.verbose_warn_install = 1
+
+ self.virtenv_dir = ""
+ self.install = ""
+ self.python_cmd = ""
+
+ self.virtenv_prefix = ["sphinx_", "Sphinx_" ]
+
+ def check_missing_file(self, files, package, dtype):
+ """
+ Does the file exists? If not, add it to missing dependencies.
+ """
+ for f in files:
+ if os.path.exists(f):
+ return
+ self.deps.add_package(package, dtype)
+
+ def check_program(self, prog, dtype):
+ """
+ Does the program exists and it is at the PATH?
+ If not, add it to missing dependencies.
+ """
+ found = self.which(prog)
+ if found:
+ return found
+
+ self.deps.add_package(prog, dtype)
+
+ return None
+
+ def check_perl_module(self, prog, dtype):
+ """
+ Does perl have a dependency? Is it available?
+ If not, add it to missing dependencies.
+
+ Right now, we still need Perl for doc build, as it is required
+ by some tools called at docs or kernel build time, like:
+
+ tools/docs/documentation-file-ref-check
+
+ Also, checkpatch is on Perl.
+ """
+
+ # While testing with lxc download template, one of the
+ # distros (Oracle) didn't have perl - nor even an option to install
+ # before installing oraclelinux-release-el9 package.
+ #
+ # Check it before running an error. If perl is not there,
+ # add it as a mandatory package, as some parts of the doc builder
+ # needs it.
+ if not self.which("perl"):
+ self.deps.add_package("perl", DepManager.SYSTEM_MANDATORY)
+ self.deps.add_package(prog, dtype)
+ return
+
+ try:
+ self.run(["perl", f"-M{prog}", "-e", "1"], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(prog, dtype)
+
+ def check_python_module(self, module, is_optional=False):
+ """
+ Does a python module exists outside venv? If not, add it to missing
+ dependencies.
+ """
+ if is_optional:
+ dtype = DepManager.PYTHON_OPTIONAL
+ else:
+ dtype = DepManager.PYTHON_MANDATORY
+
+ try:
+ self.run([self.python_cmd, "-c", f"import {module}"], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(module, dtype)
+
+ def check_rpm_missing(self, pkgs, dtype):
+ """
+ Does a rpm package exists? If not, add it to missing dependencies.
+ """
+ for prog in pkgs:
+ try:
+ self.run(["rpm", "-q", prog], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(prog, dtype)
+
+ def check_pacman_missing(self, pkgs, dtype):
+ """
+ Does a pacman package exists? If not, add it to missing dependencies.
+ """
+ for prog in pkgs:
+ try:
+ self.run(["pacman", "-Q", prog], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(prog, dtype)
+
+ def check_missing_tex(self, is_optional=False):
+ """
+ Does a LaTeX package exists? If not, add it to missing dependencies.
+ """
+ if is_optional:
+ dtype = DepManager.PDF_OPTIONAL
+ else:
+ dtype = DepManager.PDF_MANDATORY
+
+ kpsewhich = self.which("kpsewhich")
+ for prog, package in self.texlive.items():
+
+ # If kpsewhich is not there, just add it to deps
+ if not kpsewhich:
+ self.deps.add_package(package, dtype)
+ continue
+
+ # Check if the package is needed
+ try:
+ result = self.run(
+ [kpsewhich, prog], stdout=subprocess.PIPE, text=True, check=True
+ )
+
+ # Didn't find. Add it
+ if not result.stdout.strip():
+ self.deps.add_package(package, dtype)
+
+ except subprocess.CalledProcessError:
+ # kpsewhich returned an error. Add it, just in case
+ self.deps.add_package(package, dtype)
+
+ def get_sphinx_fname(self):
+ """
+ Gets the binary filename for sphinx-build.
+ """
+ if "SPHINXBUILD" in os.environ:
+ return os.environ["SPHINXBUILD"]
+
+ fname = "sphinx-build"
+ if self.which(fname):
+ return fname
+
+ fname = "sphinx-build-3"
+ if self.which(fname):
+ self.need_symlink = 1
+ return fname
+
+ return ""
+
+ def get_sphinx_version(self, cmd):
+ """
+ Gets sphinx-build version.
+ """
+ env = os.environ.copy()
+
+ # The sphinx-build tool has a bug: internally, it tries to set
+ # locale with locale.setlocale(locale.LC_ALL, ''). This causes a
+ # crash if language is not set. Detect and fix it.
+ try:
+ locale.setlocale(locale.LC_ALL, '')
+ except Exception:
+ env["LC_ALL"] = "C"
+ env["LANG"] = "C"
+
+ try:
+ result = self.run([cmd, "--version"], env=env,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, check=True)
+ except (subprocess.CalledProcessError, FileNotFoundError):
+ return None
+
+ for line in result.stdout.split("\n"):
+ match = re.match(r"^sphinx-build\s+([\d\.]+)(?:\+(?:/[\da-f]+)|b\d+)?\s*$", line)
+ if match:
+ return PythonVersion.parse_version(match.group(1))
+
+ match = re.match(r"^Sphinx.*\s+([\d\.]+)\s*$", line)
+ if match:
+ return PythonVersion.parse_version(match.group(1))
+
+ def check_sphinx(self, conf):
+ """
+ Checks Sphinx minimal requirements
+ """
+ try:
+ with open(conf, "r", encoding="utf-8") as f:
+ for line in f:
+ match = re.match(r"^\s*needs_sphinx\s*=\s*[\'\"]([\d\.]+)[\'\"]", line)
+ if match:
+ self.min_version = PythonVersion.parse_version(match.group(1))
+ break
+ except IOError:
+ sys.exit(f"Can't open {conf}")
+
+ if not self.min_version:
+ sys.exit(f"Can't get needs_sphinx version from {conf}")
+
+ self.virtenv_dir = self.virtenv_prefix[0] + "latest"
+
+ sphinx = self.get_sphinx_fname()
+ if not sphinx:
+ self.need_sphinx = 1
+ return
+
+ self.cur_version = self.get_sphinx_version(sphinx)
+ if not self.cur_version:
+ sys.exit(f"{sphinx} didn't return its version")
+
+ if self.cur_version < self.min_version:
+ curver = PythonVersion.ver_str(self.cur_version)
+ minver = PythonVersion.ver_str(self.min_version)
+
+ print(f"ERROR: Sphinx version is {curver}. It should be >= {minver}")
+ self.need_sphinx = 1
+ return
+
+ # On version check mode, just assume Sphinx has all mandatory deps
+ if self.version_check and self.cur_version >= RECOMMENDED_VERSION:
+ sys.exit(0)
+
+ def catcheck(self, filename):
+ """
+ Reads a file if it exists, returning as string.
+ If not found, returns an empty string.
+ """
+ if os.path.exists(filename):
+ with open(filename, "r", encoding="utf-8") as f:
+ return f.read().strip()
+ return ""
+
+ def get_system_release(self):
+ """
+ Determine the system type. There's no unique way that would work
+ with all distros with a minimal package install. So, several
+ methods are used here.
+
+ By default, it will use lsb_release function. If not available, it will
+ fail back to reading the known different places where the distro name
+ is stored.
+
+ Several modern distros now have /etc/os-release, which usually have
+ a decent coverage.
+ """
+
+ system_release = ""
+
+ if self.which("lsb_release"):
+ result = self.run(["lsb_release", "-d"], capture_output=True, text=True)
+ system_release = result.stdout.replace("Description:", "").strip()
+
+ release_files = [
+ "/etc/system-release",
+ "/etc/redhat-release",
+ "/etc/lsb-release",
+ "/etc/gentoo-release",
+ ]
+
+ if not system_release:
+ for f in release_files:
+ system_release = self.catcheck(f)
+ if system_release:
+ break
+
+ # This seems more common than LSB these days
+ if not system_release:
+ os_var = {}
+ try:
+ with open("/etc/os-release", "r", encoding="utf-8") as f:
+ for line in f:
+ match = re.match(r"^([\w\d\_]+)=\"?([^\"]*)\"?\n", line)
+ if match:
+ os_var[match.group(1)] = match.group(2)
+
+ system_release = os_var.get("NAME", "")
+ if "VERSION_ID" in os_var:
+ system_release += " " + os_var["VERSION_ID"]
+ elif "VERSION" in os_var:
+ system_release += " " + os_var["VERSION"]
+ except IOError:
+ pass
+
+ if not system_release:
+ system_release = self.catcheck("/etc/issue")
+
+ system_release = system_release.strip()
+
+ return system_release
+
+class SphinxDependencyChecker(MissingCheckers):
+ """
+ Main class for checking Sphinx documentation build dependencies.
+
+ - Check for missing system packages;
+ - Check for missing Python modules;
+ - Check for missing LaTeX packages needed by PDF generation;
+ - Propose Sphinx install via Python Virtual environment;
+ - Propose Sphinx install via distro-specific package install.
+ """
+ def __init__(self, args):
+ """Initialize checker variables"""
+
+ # List of required texlive packages on Fedora and OpenSuse
+ texlive = {
+ "amsfonts.sty": "texlive-amsfonts",
+ "amsmath.sty": "texlive-amsmath",
+ "amssymb.sty": "texlive-amsfonts",
+ "amsthm.sty": "texlive-amscls",
+ "anyfontsize.sty": "texlive-anyfontsize",
+ "atbegshi.sty": "texlive-oberdiek",
+ "bm.sty": "texlive-tools",
+ "capt-of.sty": "texlive-capt-of",
+ "cmap.sty": "texlive-cmap",
+ "ctexhook.sty": "texlive-ctex",
+ "ecrm1000.tfm": "texlive-ec",
+ "eqparbox.sty": "texlive-eqparbox",
+ "eu1enc.def": "texlive-euenc",
+ "fancybox.sty": "texlive-fancybox",
+ "fancyvrb.sty": "texlive-fancyvrb",
+ "float.sty": "texlive-float",
+ "fncychap.sty": "texlive-fncychap",
+ "footnote.sty": "texlive-mdwtools",
+ "framed.sty": "texlive-framed",
+ "luatex85.sty": "texlive-luatex85",
+ "multirow.sty": "texlive-multirow",
+ "needspace.sty": "texlive-needspace",
+ "palatino.sty": "texlive-psnfss",
+ "parskip.sty": "texlive-parskip",
+ "polyglossia.sty": "texlive-polyglossia",
+ "tabulary.sty": "texlive-tabulary",
+ "threeparttable.sty": "texlive-threeparttable",
+ "titlesec.sty": "texlive-titlesec",
+ "ucs.sty": "texlive-ucs",
+ "upquote.sty": "texlive-upquote",
+ "wrapfig.sty": "texlive-wrapfig",
+ }
+
+ super().__init__(args, texlive)
+
+ self.need_pip = False
+ self.rec_sphinx_upgrade = 0
+
+ self.system_release = self.get_system_release()
+ self.activate_cmd = ""
+
+ # Some distros may not have a Sphinx shipped package compatible with
+ # our minimal requirements
+ self.package_supported = True
+
+ # Recommend a new python version
+ self.recommend_python = None
+
+ # Certain hints are meant to be shown only once
+ self.distro_msg = None
+
+ self.latest_avail_ver = (0, 0, 0)
+ self.venv_ver = (0, 0, 0)
+
+ prefix = os.environ.get("srctree", ".") + "/"
+
+ self.conf = prefix + "Documentation/conf.py"
+ self.requirement_file = prefix + "Documentation/sphinx/requirements.txt"
+
+ def get_install_progs(self, progs, cmd, extra=None):
+ """
+ Check for missing dependencies using the provided program mapping.
+
+ The actual distro-specific programs are mapped via progs argument.
+ """
+ install = self.deps.check_missing(progs)
+
+ if self.verbose_warn_install:
+ self.deps.warn_install()
+
+ if not install:
+ return
+
+ if cmd:
+ if self.verbose_warn_install:
+ msg = "You should run:"
+ else:
+ msg = ""
+
+ if extra:
+ msg += "\n\t" + extra.replace("\n", "\n\t")
+
+ return(msg + "\n\tsudo " + cmd + " " + install)
+
+ return None
+
+ #
+ # Distro-specific hints methods
+ #
+
+ def give_debian_hints(self):
+ """
+ Provide package installation hints for Debian-based distros.
+ """
+ progs = {
+ "Pod::Usage": "perl-modules",
+ "convert": "imagemagick",
+ "dot": "graphviz",
+ "ensurepip": "python3-venv",
+ "python-sphinx": "python3-sphinx",
+ "rsvg-convert": "librsvg2-bin",
+ "virtualenv": "virtualenv",
+ "xelatex": "texlive-xetex",
+ "yaml": "python3-yaml",
+ }
+
+ if self.pdf:
+ pdf_pkgs = {
+ "fonts-dejavu": [
+ "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
+ ],
+ "fonts-noto-cjk": [
+ "/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/opentype/noto/NotoSerifCJK-Regular.ttc",
+ ],
+ "tex-gyre": [
+ "/usr/share/texmf/tex/latex/tex-gyre/tgtermes.sty"
+ ],
+ "texlive-fonts-recommended": [
+ "/usr/share/texlive/texmf-dist/fonts/tfm/adobe/zapfding/pzdr.tfm",
+ ],
+ "texlive-lang-chinese": [
+ "/usr/share/texlive/texmf-dist/tex/latex/ctex/ctexhook.sty",
+ ],
+ }
+
+ for package, files in pdf_pkgs.items():
+ self.check_missing_file(files, package, DepManager.PDF_MANDATORY)
+
+ self.check_program("dvipng", DepManager.PDF_MANDATORY)
+
+ if not self.distro_msg:
+ self.distro_msg = \
+ "Note: ImageMagick is broken on some distros, affecting PDF output. For more details:\n" \
+ "\thttps://askubuntu.com/questions/1158894/imagemagick-still-broken-using-with-usr-bin-convert"
+
+ return self.get_install_progs(progs, "apt-get install")
+
+ def give_redhat_hints(self):
+ """
+ Provide package installation hints for RedHat-based distros
+ (Fedora, RHEL and RHEL-based variants).
+ """
+ progs = {
+ "Pod::Usage": "perl-Pod-Usage",
+ "convert": "ImageMagick",
+ "dot": "graphviz",
+ "python-sphinx": "python3-sphinx",
+ "rsvg-convert": "librsvg2-tools",
+ "virtualenv": "python3-virtualenv",
+ "xelatex": "texlive-xetex-bin",
+ "yaml": "python3-pyyaml",
+ }
+
+ fedora_tex_pkgs = [
+ "dejavu-sans-fonts",
+ "dejavu-sans-mono-fonts",
+ "dejavu-serif-fonts",
+ "texlive-collection-fontsrecommended",
+ "texlive-collection-latex",
+ "texlive-xecjk",
+ ]
+
+ fedora = False
+ rel = None
+
+ match = re.search(r"(release|Linux)\s+(\d+)", self.system_release)
+ if match:
+ rel = int(match.group(2))
+
+ if not rel:
+ print("Couldn't identify release number")
+ noto_sans_redhat = None
+ self.pdf = False
+ elif re.search("Fedora", self.system_release):
+ # Fedora 38 and upper use this CJK font
+
+ noto_sans_redhat = "google-noto-sans-cjk-fonts"
+ fedora = True
+ else:
+ # Almalinux, CentOS, RHEL, ...
+
+ # at least up to version 9 (and Fedora < 38), that's the CJK font
+ noto_sans_redhat = "google-noto-sans-cjk-ttc-fonts"
+
+ progs["virtualenv"] = "python-virtualenv"
+
+ if not rel or rel < 8:
+ print("ERROR: Distro not supported. Too old?")
+ return
+
+ # RHEL 8 uses Python 3.6, which is not compatible with
+ # the build system anymore. Suggest Python 3.11
+ if rel == 8:
+ self.check_program("python3.9", DepManager.SYSTEM_MANDATORY)
+ progs["python3.9"] = "python39"
+ progs["yaml"] = "python39-pyyaml"
+
+ self.recommend_python = True
+
+ # There's no python39-sphinx package. Only pip is supported
+ self.package_supported = False
+
+ if not self.distro_msg:
+ self.distro_msg = \
+ "Note: RHEL-based distros typically require extra repositories.\n" \
+ "For most, enabling epel and crb are enough:\n" \
+ "\tsudo dnf install -y epel-release\n" \
+ "\tsudo dnf config-manager --set-enabled crb\n" \
+ "Yet, some may have other required repositories. Those commands could be useful:\n" \
+ "\tsudo dnf repolist all\n" \
+ "\tsudo dnf repoquery --available --info <pkgs>\n" \
+ "\tsudo dnf config-manager --set-enabled '*' # enable all - probably not what you want"
+
+ if self.pdf:
+ pdf_pkgs = [
+ "/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/google-noto-sans-cjk-fonts/NotoSansCJK-Regular.ttc",
+ ]
+
+ self.check_missing_file(pdf_pkgs, noto_sans_redhat, DepManager.PDF_MANDATORY)
+
+ self.check_rpm_missing(fedora_tex_pkgs, DepManager.PDF_MANDATORY)
+
+ self.check_missing_tex(DepManager.PDF_MANDATORY)
+
+ # There's no texlive-ctex on RHEL 8 repositories. This will
+ # likely affect CJK pdf build only.
+ if not fedora and rel == 8:
+ self.deps.del_package("texlive-ctex")
+
+ return self.get_install_progs(progs, "dnf install")
+
+ def give_opensuse_hints(self):
+ """
+ Provide package installation hints for openSUSE-based distros
+ (Leap and Tumbleweed).
+ """
+ progs = {
+ "Pod::Usage": "perl-Pod-Usage",
+ "convert": "ImageMagick",
+ "dot": "graphviz",
+ "python-sphinx": "python3-sphinx",
+ "virtualenv": "python3-virtualenv",
+ "xelatex": "texlive-xetex-bin texlive-dejavu",
+ "yaml": "python3-pyyaml",
+ }
+
+ suse_tex_pkgs = [
+ "texlive-babel-english",
+ "texlive-caption",
+ "texlive-colortbl",
+ "texlive-courier",
+ "texlive-dvips",
+ "texlive-helvetic",
+ "texlive-makeindex",
+ "texlive-metafont",
+ "texlive-metapost",
+ "texlive-palatino",
+ "texlive-preview",
+ "texlive-times",
+ "texlive-zapfchan",
+ "texlive-zapfding",
+ ]
+
+ progs["latexmk"] = "texlive-latexmk-bin"
+
+ match = re.search(r"(Leap)\s+(\d+).(\d)", self.system_release)
+ if match:
+ rel = int(match.group(2))
+
+ # Leap 15.x uses Python 3.6, which is not compatible with
+ # the build system anymore. Suggest Python 3.11
+ if rel == 15:
+ if not self.which(self.python_cmd):
+ self.check_program("python3.11", DepManager.SYSTEM_MANDATORY)
+ progs["python3.11"] = "python311"
+ self.recommend_python = True
+
+ progs.update({
+ "python-sphinx": "python311-Sphinx python311-Sphinx-latex",
+ "virtualenv": "python311-virtualenv",
+ "yaml": "python311-PyYAML",
+ })
+ else:
+ # Tumbleweed defaults to Python 3.11
+
+ progs.update({
+ "python-sphinx": "python313-Sphinx python313-Sphinx-latex",
+ "virtualenv": "python313-virtualenv",
+ "yaml": "python313-PyYAML",
+ })
+
+ # FIXME: add support for installing CJK fonts
+ #
+ # I tried hard, but was unable to find a way to install
+ # "Noto Sans CJK SC" on openSUSE
+
+ if self.pdf:
+ self.check_rpm_missing(suse_tex_pkgs, DepManager.PDF_MANDATORY)
+ if self.pdf:
+ self.check_missing_tex()
+
+ return self.get_install_progs(progs, "zypper install --no-recommends")
+
+ def give_mageia_hints(self):
+ """
+ Provide package installation hints for Mageia and OpenMandriva.
+ """
+ progs = {
+ "Pod::Usage": "perl-Pod-Usage",
+ "convert": "ImageMagick",
+ "dot": "graphviz",
+ "python-sphinx": "python3-sphinx",
+ "rsvg-convert": "librsvg2",
+ "virtualenv": "python3-virtualenv",
+ "xelatex": "texlive",
+ "yaml": "python3-yaml",
+ }
+
+ tex_pkgs = [
+ "texlive-fontsextra",
+ "texlive-fonts-asian",
+ "fonts-ttf-dejavu",
+ ]
+
+ if re.search(r"OpenMandriva", self.system_release):
+ packager_cmd = "dnf install"
+ noto_sans = "noto-sans-cjk-fonts"
+ tex_pkgs = [
+ "texlive-collection-basic",
+ "texlive-collection-langcjk",
+ "texlive-collection-fontsextra",
+ "texlive-collection-fontsrecommended"
+ ]
+
+ # Tested on OpenMandriva Lx 4.3
+ progs["convert"] = "imagemagick"
+ progs["yaml"] = "python-pyyaml"
+ progs["python-virtualenv"] = "python-virtualenv"
+ progs["python-sphinx"] = "python-sphinx"
+ progs["xelatex"] = "texlive"
+
+ self.check_program("python-virtualenv", DepManager.PYTHON_MANDATORY)
+
+ # On my tests with openMandriva LX 4.0 docker image, upgraded
+ # to 4.3, python-virtualenv package is broken: it is missing
+ # ensurepip. Without it, the alternative would be to run:
+ # python3 -m venv --without-pip ~/sphinx_latest, but running
+ # pip there won't install sphinx at venv.
+ #
+ # Add a note about that.
+
+ if not self.distro_msg:
+ self.distro_msg = \
+ "Notes:\n"\
+ "1. for venv, ensurepip could be broken, preventing its install method.\n" \
+ "2. at least on OpenMandriva LX 4.3, texlive packages seem broken"
+
+ else:
+ packager_cmd = "urpmi"
+ noto_sans = "google-noto-sans-cjk-ttc-fonts"
+
+ progs["latexmk"] = "texlive-collection-basic"
+
+ if self.pdf:
+ pdf_pkgs = [
+ "/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/TTF/NotoSans-Regular.ttf",
+ ]
+
+ self.check_missing_file(pdf_pkgs, noto_sans, DepManager.PDF_MANDATORY)
+ self.check_rpm_missing(tex_pkgs, DepManager.PDF_MANDATORY)
+
+ return self.get_install_progs(progs, packager_cmd)
+
+ def give_arch_linux_hints(self):
+ """
+ Provide package installation hints for ArchLinux.
+ """
+ progs = {
+ "convert": "imagemagick",
+ "dot": "graphviz",
+ "latexmk": "texlive-core",
+ "rsvg-convert": "extra/librsvg",
+ "virtualenv": "python-virtualenv",
+ "xelatex": "texlive-xetex",
+ "yaml": "python-yaml",
+ }
+
+ archlinux_tex_pkgs = [
+ "texlive-basic",
+ "texlive-binextra",
+ "texlive-core",
+ "texlive-fontsrecommended",
+ "texlive-langchinese",
+ "texlive-langcjk",
+ "texlive-latexextra",
+ "ttf-dejavu",
+ ]
+
+ if self.pdf:
+ self.check_pacman_missing(archlinux_tex_pkgs,
+ DepManager.PDF_MANDATORY)
+
+ self.check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc"],
+ "noto-fonts-cjk",
+ DepManager.PDF_MANDATORY)
+
+
+ return self.get_install_progs(progs, "pacman -S")
+
+ def give_gentoo_hints(self):
+ """
+ Provide package installation hints for Gentoo.
+ """
+ texlive_deps = [
+ "dev-texlive/texlive-fontsrecommended",
+ "dev-texlive/texlive-latexextra",
+ "dev-texlive/texlive-xetex",
+ "media-fonts/dejavu",
+ ]
+
+ progs = {
+ "convert": "media-gfx/imagemagick",
+ "dot": "media-gfx/graphviz",
+ "rsvg-convert": "gnome-base/librsvg",
+ "virtualenv": "dev-python/virtualenv",
+ "xelatex": " ".join(texlive_deps),
+ "yaml": "dev-python/pyyaml",
+ "python-sphinx": "dev-python/sphinx",
+ }
+
+ if self.pdf:
+ pdf_pkgs = {
+ "media-fonts/dejavu": [
+ "/usr/share/fonts/dejavu/DejaVuSans.ttf",
+ ],
+ "media-fonts/noto-cjk": [
+ "/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf",
+ "/usr/share/fonts/noto-cjk/NotoSerifCJK-Regular.ttc",
+ ],
+ }
+ for package, files in pdf_pkgs.items():
+ self.check_missing_file(files, package, DepManager.PDF_MANDATORY)
+
+ # Handling dependencies is a nightmare, as Gentoo refuses to emerge
+ # some packages if there's no package.use file describing them.
+ # To make it worse, compilation flags shall also be present there
+ # for some packages. If USE is not perfect, error/warning messages
+ # like those are shown:
+ #
+ # !!! The following binary packages have been ignored due to non matching USE:
+ #
+ # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_13 qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf python_single_target_python3_12 -python_single_target_python3_13 qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_10 qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_10 python_single_target_python3_12 -python_single_target_python3_13 qt6 svg
+ # =media-fonts/noto-cjk-20190416 X
+ # =app-text/texlive-core-2024-r1 X cjk -xetex
+ # =app-text/texlive-core-2024-r1 X -xetex
+ # =app-text/texlive-core-2024-r1 -xetex
+ # =dev-libs/zziplib-0.13.79-r1 sdl
+ #
+ # And will ignore such packages, installing the remaining ones. That
+ # affects mostly the image extension and PDF generation.
+
+ # Package dependencies and the minimal needed args:
+ portages = {
+ "graphviz": "media-gfx/graphviz",
+ "imagemagick": "media-gfx/imagemagick",
+ "media-libs": "media-libs/harfbuzz icu",
+ "media-fonts": "media-fonts/noto-cjk",
+ "texlive": "app-text/texlive-core xetex",
+ "zziblib": "dev-libs/zziplib sdl",
+ }
+
+ extra_cmds = ""
+ if not self.distro_msg:
+ self.distro_msg = "Note: Gentoo requires package.use to be adjusted before emerging packages"
+
+ use_base = "/etc/portage/package.use"
+ files = glob(f"{use_base}/*")
+
+ for fname, portage in portages.items():
+ install = False
+
+ while install is False:
+ if not files:
+ # No files under package.usage. Install all
+ install = True
+ break
+
+ args = portage.split(" ")
+
+ name = args.pop(0)
+
+ cmd = ["grep", "-l", "-E", rf"^{name}\b" ] + files
+ result = self.run(cmd, stdout=subprocess.PIPE, text=True)
+ if result.returncode or not result.stdout.strip():
+ # File containing portage name not found
+ install = True
+ break
+
+ # Ensure that needed USE flags are present
+ if args:
+ match_fname = result.stdout.strip()
+ with open(match_fname, 'r', encoding='utf8',
+ errors='backslashreplace') as fp:
+ for line in fp:
+ for arg in args:
+ if arg.startswith("-"):
+ continue
+
+ if not re.search(rf"\s*{arg}\b", line):
+ # Needed file argument not found
+ install = True
+ break
+
+ # Everything looks ok, don't install
+ break
+
+ # emit a code to setup missing USE
+ if install:
+ extra_cmds += (f"sudo su -c 'echo \"{portage}\" > {use_base}/{fname}'\n")
+
+ # Now, we can use emerge and let it respect USE
+ return self.get_install_progs(progs,
+ "emerge --ask --changed-use --binpkg-respect-use=y",
+ extra_cmds)
+
+ def get_install(self):
+ """
+ OS-specific hints logic. Seeks for a hinter. If found, use it to
+ provide package-manager specific install commands.
+
+ Otherwise, outputs install instructions for the meta-packages.
+
+ Returns a string with the command to be executed to install the
+ the needed packages, if distro found. Otherwise, return just a
+ list of packages that require installation.
+ """
+ os_hints = {
+ re.compile("Red Hat Enterprise Linux"): self.give_redhat_hints,
+ re.compile("Fedora"): self.give_redhat_hints,
+ re.compile("AlmaLinux"): self.give_redhat_hints,
+ re.compile("Amazon Linux"): self.give_redhat_hints,
+ re.compile("CentOS"): self.give_redhat_hints,
+ re.compile("openEuler"): self.give_redhat_hints,
+ re.compile("Oracle Linux Server"): self.give_redhat_hints,
+ re.compile("Rocky Linux"): self.give_redhat_hints,
+ re.compile("Springdale Open Enterprise"): self.give_redhat_hints,
+
+ re.compile("Ubuntu"): self.give_debian_hints,
+ re.compile("Debian"): self.give_debian_hints,
+ re.compile("Devuan"): self.give_debian_hints,
+ re.compile("Kali"): self.give_debian_hints,
+ re.compile("Mint"): self.give_debian_hints,
+
+ re.compile("openSUSE"): self.give_opensuse_hints,
+
+ re.compile("Mageia"): self.give_mageia_hints,
+ re.compile("OpenMandriva"): self.give_mageia_hints,
+
+ re.compile("Arch Linux"): self.give_arch_linux_hints,
+ re.compile("Gentoo"): self.give_gentoo_hints,
+ }
+
+ # If the OS is detected, use per-OS hint logic
+ for regex, os_hint in os_hints.items():
+ if regex.search(self.system_release):
+ return os_hint()
+
+ #
+ # Fall-back to generic hint code for other distros
+ # That's far from ideal, specially for LaTeX dependencies.
+ #
+ progs = {"sphinx-build": "sphinx"}
+ if self.pdf:
+ self.check_missing_tex()
+
+ self.distro_msg = \
+ f"I don't know distro {self.system_release}.\n" \
+ "So, I can't provide you a hint with the install procedure.\n" \
+ "There are likely missing dependencies."
+
+ return self.get_install_progs(progs, None)
+
+ #
+ # Common dependencies
+ #
+ def deactivate_help(self):
+ """
+ Print a helper message to disable a virtual environment.
+ """
+
+ print("\n If you want to exit the virtualenv, you can use:")
+ print("\tdeactivate")
+
+ def get_virtenv(self):
+ """
+ Give a hint about how to activate an already-existing virtual
+ environment containing sphinx-build.
+
+ Returns a tuble with (activate_cmd_path, sphinx_version) with
+ the newest available virtual env.
+ """
+
+ cwd = os.getcwd()
+
+ activates = []
+
+ # Add all sphinx prefixes with possible version numbers
+ for p in self.virtenv_prefix:
+ activates += glob(f"{cwd}/{p}[0-9]*/bin/activate")
+
+ activates.sort(reverse=True, key=str.lower)
+
+ # Place sphinx_latest first, if it exists
+ for p in self.virtenv_prefix:
+ activates = glob(f"{cwd}/{p}*latest/bin/activate") + activates
+
+ ver = (0, 0, 0)
+ for f in activates:
+ # Discard too old Sphinx virtual environments
+ match = re.search(r"(\d+)\.(\d+)\.(\d+)", f)
+ if match:
+ ver = (int(match.group(1)), int(match.group(2)), int(match.group(3)))
+
+ if ver < self.min_version:
+ continue
+
+ sphinx_cmd = f.replace("activate", "sphinx-build")
+ if not os.path.isfile(sphinx_cmd):
+ continue
+
+ ver = self.get_sphinx_version(sphinx_cmd)
+
+ if not ver:
+ venv_dir = f.replace("/bin/activate", "")
+ print(f"Warning: virtual environment {venv_dir} is not working.\n" \
+ "Python version upgrade? Remove it with:\n\n" \
+ "\trm -rf {venv_dir}\n\n")
+ else:
+ if self.need_sphinx and ver >= self.min_version:
+ return (f, ver)
+ elif PythonVersion.parse_version(ver) > self.cur_version:
+ return (f, ver)
+
+ return ("", ver)
+
+ def recommend_sphinx_upgrade(self):
+ """
+ Check if Sphinx needs to be upgraded.
+
+ Returns a tuple with the higest available Sphinx version if found.
+ Otherwise, returns None to indicate either that no upgrade is needed
+ or no venv was found.
+ """
+
+ # Avoid running sphinx-builds from venv if cur_version is good
+ if self.cur_version and self.cur_version >= RECOMMENDED_VERSION:
+ self.latest_avail_ver = self.cur_version
+ return None
+
+ # Get the highest version from sphinx_*/bin/sphinx-build and the
+ # corresponding command to activate the venv/virtenv
+ self.activate_cmd, self.venv_ver = self.get_virtenv()
+
+ # Store the highest version from Sphinx existing virtualenvs
+ if self.activate_cmd and self.venv_ver > self.cur_version:
+ self.latest_avail_ver = self.venv_ver
+ else:
+ if self.cur_version:
+ self.latest_avail_ver = self.cur_version
+ else:
+ self.latest_avail_ver = (0, 0, 0)
+
+ # As we don't know package version of Sphinx, and there's no
+ # virtual environments, don't check if upgrades are needed
+ if not self.virtualenv:
+ if not self.latest_avail_ver:
+ return None
+
+ return self.latest_avail_ver
+
+ # Either there are already a virtual env or a new one should be created
+ self.need_pip = True
+
+ if not self.latest_avail_ver:
+ return None
+
+ # Return if the reason is due to an upgrade or not
+ if self.latest_avail_ver != (0, 0, 0):
+ if self.latest_avail_ver < RECOMMENDED_VERSION:
+ self.rec_sphinx_upgrade = 1
+
+ return self.latest_avail_ver
+
+ def recommend_package(self):
+ """
+ Recommend installing Sphinx as a distro-specific package.
+ """
+
+ print("\n2) As a package with:")
+
+ old_need = self.deps.need
+ old_optional = self.deps.optional
+
+ self.pdf = False
+ self.deps.optional = 0
+ old_verbose = self.verbose_warn_install
+ self.verbose_warn_install = 0
+
+ self.deps.clear_deps()
+
+ self.deps.add_package("python-sphinx", DepManager.PYTHON_MANDATORY)
+
+ cmd = self.get_install()
+ if cmd:
+ print(cmd)
+
+ self.deps.need = old_need
+ self.deps.optional = old_optional
+ self.verbose_warn_install = old_verbose
+
+ def recommend_sphinx_version(self, virtualenv_cmd):
+ """
+ Provide recommendations for installing or upgrading Sphinx based
+ on current version.
+
+ The logic here is complex, as it have to deal with different versions:
+
+ - minimal supported version;
+ - minimal PDF version;
+ - recommended version.
+
+ It also needs to work fine with both distro's package and
+ venv/virtualenv
+ """
+
+ if self.recommend_python:
+ cur_ver = sys.version_info[:3]
+ if cur_ver < MIN_PYTHON_VERSION:
+ print(f"\nPython version {cur_ver} is incompatible with doc build.\n" \
+ "Please upgrade it and re-run.\n")
+ return
+
+ # Version is OK. Nothing to do.
+ if self.cur_version != (0, 0, 0) and self.cur_version >= RECOMMENDED_VERSION:
+ return
+
+ if self.latest_avail_ver:
+ latest_avail_ver = PythonVersion.ver_str(self.latest_avail_ver)
+
+ if not self.need_sphinx:
+ # sphinx-build is present and its version is >= $min_version
+
+ # only recommend enabling a newer virtenv version if makes sense.
+ if self.latest_avail_ver and self.latest_avail_ver > self.cur_version:
+ print(f"\nYou may also use the newer Sphinx version {latest_avail_ver} with:")
+ if f"{self.virtenv_prefix}" in os.getcwd():
+ print("\tdeactivate")
+ print(f"\t. {self.activate_cmd}")
+ self.deactivate_help()
+ return
+
+ if self.latest_avail_ver and self.latest_avail_ver >= RECOMMENDED_VERSION:
+ return
+
+ if not self.virtualenv:
+ # No sphinx either via package or via virtenv. As we can't
+ # Compare the versions here, just return, recommending the
+ # user to install it from the package distro.
+ if not self.latest_avail_ver or self.latest_avail_ver == (0, 0, 0):
+ return
+
+ # User doesn't want a virtenv recommendation, but he already
+ # installed one via virtenv with a newer version.
+ # So, print commands to enable it
+ if self.latest_avail_ver > self.cur_version:
+ print(f"\nYou may also use the Sphinx virtualenv version {latest_avail_ver} with:")
+ if f"{self.virtenv_prefix}" in os.getcwd():
+ print("\tdeactivate")
+ print(f"\t. {self.activate_cmd}")
+ self.deactivate_help()
+ return
+ print("\n")
+ else:
+ if self.need_sphinx:
+ self.deps.need += 1
+
+ # Suggest newer versions if current ones are too old
+ if self.latest_avail_ver and self.latest_avail_ver >= self.min_version:
+ if self.latest_avail_ver >= RECOMMENDED_VERSION:
+ print(f"\nNeed to activate Sphinx (version {latest_avail_ver}) on virtualenv with:")
+ print(f"\t. {self.activate_cmd}")
+ self.deactivate_help()
+ return
+
+ # Version is above the minimal required one, but may be
+ # below the recommended one. So, print warnings/notes
+ if self.latest_avail_ver < RECOMMENDED_VERSION:
+ print(f"Warning: It is recommended at least Sphinx version {RECOMMENDED_VERSION}.")
+
+ # At this point, either it needs Sphinx or upgrade is recommended,
+ # both via pip
+
+ if self.rec_sphinx_upgrade:
+ if not self.virtualenv:
+ print("Instead of install/upgrade Python Sphinx pkg, you could use pip/pypi with:\n\n")
+ else:
+ print("To upgrade Sphinx, use:\n\n")
+ else:
+ print("\nSphinx needs to be installed either:\n1) via pip/pypi with:\n")
+
+ if not virtualenv_cmd:
+ print(" Currently not possible.\n")
+ print(" Please upgrade Python to a newer version and run this script again")
+ else:
+ print(f"\t{virtualenv_cmd} {self.virtenv_dir}")
+ print(f"\t. {self.virtenv_dir}/bin/activate")
+ print(f"\tpip install -r {self.requirement_file}")
+ self.deactivate_help()
+
+ if self.package_supported:
+ self.recommend_package()
+
+ print("\n" \
+ " Please note that Sphinx currentlys produce false-positive\n" \
+ " warnings when the same name is used for more than one type (functions,\n" \
+ " structs, enums,...). This is known Sphinx bug. For more details, see:\n" \
+ "\thttps://github.com/sphinx-doc/sphinx/pull/8313")
+
+ def check_needs(self):
+ """
+ Main method that checks needed dependencies and provides
+ recommendations.
+ """
+ self.python_cmd = sys.executable
+
+ # Check if Sphinx is already accessible from current environment
+ self.check_sphinx(self.conf)
+
+ if self.system_release:
+ print(f"Detected OS: {self.system_release}.")
+ else:
+ print("Unknown OS")
+ if self.cur_version != (0, 0, 0):
+ ver = PythonVersion.ver_str(self.cur_version)
+ print(f"Sphinx version: {ver}\n")
+
+ # Check the type of virtual env, depending on Python version
+ virtualenv_cmd = None
+
+ if sys.version_info < MIN_PYTHON_VERSION:
+ min_ver = ver_str(MIN_PYTHON_VERSION)
+ print(f"ERROR: at least python {min_ver} is required to build the kernel docs")
+ self.need_sphinx = 1
+
+ self.venv_ver = self.recommend_sphinx_upgrade()
+
+ if self.need_pip:
+ if sys.version_info < MIN_PYTHON_VERSION:
+ self.need_pip = False
+ print("Warning: python version is not supported.")
+ else:
+ virtualenv_cmd = f"{self.python_cmd} -m venv"
+ self.check_python_module("ensurepip")
+
+ # Check for needed programs/tools
+ self.check_perl_module("Pod::Usage", DepManager.SYSTEM_MANDATORY)
+
+ self.check_program("make", DepManager.SYSTEM_MANDATORY)
+ self.check_program("which", DepManager.SYSTEM_MANDATORY)
+
+ self.check_program("dot", DepManager.SYSTEM_OPTIONAL)
+ self.check_program("convert", DepManager.SYSTEM_OPTIONAL)
+
+ self.check_python_module("yaml")
+
+ if self.pdf:
+ self.check_program("xelatex", DepManager.PDF_MANDATORY)
+ self.check_program("rsvg-convert", DepManager.PDF_MANDATORY)
+ self.check_program("latexmk", DepManager.PDF_MANDATORY)
+
+ # Do distro-specific checks and output distro-install commands
+ cmd = self.get_install()
+ if cmd:
+ print(cmd)
+
+ # If distro requires some special instructions, print here.
+ # Please notice that get_install() needs to be called first.
+ if self.distro_msg:
+ print("\n" + self.distro_msg)
+
+ if not self.python_cmd:
+ if self.need == 1:
+ sys.exit("Can't build as 1 mandatory dependency is missing")
+ elif self.need:
+ sys.exit(f"Can't build as {self.need} mandatory dependencies are missing")
+
+ # Check if sphinx-build is called sphinx-build-3
+ if self.need_symlink:
+ sphinx_path = self.which("sphinx-build-3")
+ if sphinx_path:
+ print(f"\tsudo ln -sf {sphinx_path} /usr/bin/sphinx-build\n")
+
+ self.recommend_sphinx_version(virtualenv_cmd)
+ print("")
+
+ if not self.deps.optional:
+ print("All optional dependencies are met.")
+
+ if self.deps.need == 1:
+ sys.exit("Can't build as 1 mandatory dependency is missing")
+ elif self.deps.need:
+ sys.exit(f"Can't build as {self.deps.need} mandatory dependencies are missing")
+
+ print("Needed package dependencies are met.")
+
+DESCRIPTION = """
+Process some flags related to Sphinx installation and documentation build.
+"""
+
+
+def main():
+ """Main function"""
+ parser = argparse.ArgumentParser(description=DESCRIPTION)
+
+ parser.add_argument(
+ "--no-virtualenv",
+ action="store_false",
+ dest="virtualenv",
+ help="Recommend installing Sphinx instead of using a virtualenv",
+ )
+
+ parser.add_argument(
+ "--no-pdf",
+ action="store_false",
+ dest="pdf",
+ help="Don't check for dependencies required to build PDF docs",
+ )
+
+ parser.add_argument(
+ "--version-check",
+ action="store_true",
+ dest="version_check",
+ help="If version is compatible, don't check for missing dependencies",
+ )
+
+ args = parser.parse_args()
+
+ checker = SphinxDependencyChecker(args)
+
+ PythonVersion.check_python(MIN_PYTHON_VERSION,
+ bail_out=True, success_on_error=True)
+ checker.check_needs()
+
+# Call main if not used as module
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/test_doc_build.py b/tools/docs/test_doc_build.py
new file mode 100755
index 000000000000..47b4606569f9
--- /dev/null
+++ b/tools/docs/test_doc_build.py
@@ -0,0 +1,513 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+#
+# pylint: disable=R0903,R0912,R0913,R0914,R0917,C0301
+
+"""
+Install minimal supported requirements for different Sphinx versions
+and optionally test the build.
+"""
+
+import argparse
+import asyncio
+import os.path
+import shutil
+import sys
+import time
+import subprocess
+
+# Minimal python version supported by the building system.
+
+PYTHON = os.path.basename(sys.executable)
+
+min_python_bin = None
+
+for i in range(9, 13):
+ p = f"python3.{i}"
+ if shutil.which(p):
+ min_python_bin = p
+ break
+
+if not min_python_bin:
+ min_python_bin = PYTHON
+
+# Starting from 8.0, Python 3.9 is not supported anymore.
+PYTHON_VER_CHANGES = {(8, 0, 0): PYTHON}
+
+DEFAULT_VERSIONS_TO_TEST = [
+ (3, 4, 3), # Minimal supported version
+ (5, 3, 0), # CentOS Stream 9 / AlmaLinux 9
+ (6, 1, 1), # Debian 12
+ (7, 2, 1), # openSUSE Leap 15.6
+ (7, 2, 6), # Ubuntu 24.04 LTS
+ (7, 4, 7), # Ubuntu 24.10
+ (7, 3, 0), # openSUSE Tumbleweed
+ (8, 1, 3), # Fedora 42
+ (8, 2, 3) # Latest version - covers rolling distros
+]
+
+# Sphinx versions to be installed and their incremental requirements
+SPHINX_REQUIREMENTS = {
+ # Oldest versions we support for each package required by Sphinx 3.4.3
+ (3, 4, 3): {
+ "docutils": "0.16",
+ "alabaster": "0.7.12",
+ "babel": "2.8.0",
+ "certifi": "2020.6.20",
+ "docutils": "0.16",
+ "idna": "2.10",
+ "imagesize": "1.2.0",
+ "Jinja2": "2.11.2",
+ "MarkupSafe": "1.1.1",
+ "packaging": "20.4",
+ "Pygments": "2.6.1",
+ "PyYAML": "5.1",
+ "requests": "2.24.0",
+ "snowballstemmer": "2.0.0",
+ "sphinxcontrib-applehelp": "1.0.2",
+ "sphinxcontrib-devhelp": "1.0.2",
+ "sphinxcontrib-htmlhelp": "1.0.3",
+ "sphinxcontrib-jsmath": "1.0.1",
+ "sphinxcontrib-qthelp": "1.0.3",
+ "sphinxcontrib-serializinghtml": "1.1.4",
+ "urllib3": "1.25.9",
+ },
+
+ # Update package dependencies to a more modern base. The goal here
+ # is to avoid to many incremental changes for the next entries
+ (3, 5, 0): {
+ "alabaster": "0.7.13",
+ "babel": "2.17.0",
+ "certifi": "2025.6.15",
+ "idna": "3.10",
+ "imagesize": "1.4.1",
+ "packaging": "25.0",
+ "Pygments": "2.8.1",
+ "requests": "2.32.4",
+ "snowballstemmer": "3.0.1",
+ "sphinxcontrib-applehelp": "1.0.4",
+ "sphinxcontrib-htmlhelp": "2.0.1",
+ "sphinxcontrib-serializinghtml": "1.1.5",
+ "urllib3": "2.0.0",
+ },
+
+ # Starting from here, ensure all docutils versions are covered with
+ # supported Sphinx versions. Other packages are upgraded only when
+ # required by pip
+ (4, 0, 0): {
+ "PyYAML": "5.1",
+ },
+ (4, 1, 0): {
+ "docutils": "0.17",
+ "Pygments": "2.19.1",
+ "Jinja2": "3.0.3",
+ "MarkupSafe": "2.0",
+ },
+ (4, 3, 0): {},
+ (4, 4, 0): {},
+ (4, 5, 0): {
+ "docutils": "0.17.1",
+ },
+ (5, 0, 0): {},
+ (5, 1, 0): {},
+ (5, 2, 0): {
+ "docutils": "0.18",
+ "Jinja2": "3.1.2",
+ "MarkupSafe": "2.0",
+ "PyYAML": "5.3.1",
+ },
+ (5, 3, 0): {
+ "docutils": "0.18.1",
+ },
+ (6, 0, 0): {},
+ (6, 1, 0): {},
+ (6, 2, 0): {
+ "PyYAML": "5.4.1",
+ },
+ (7, 0, 0): {},
+ (7, 1, 0): {},
+ (7, 2, 0): {
+ "docutils": "0.19",
+ "PyYAML": "6.0.1",
+ "sphinxcontrib-serializinghtml": "1.1.9",
+ },
+ (7, 2, 6): {
+ "docutils": "0.20",
+ },
+ (7, 3, 0): {
+ "alabaster": "0.7.14",
+ "PyYAML": "6.0.1",
+ "tomli": "2.0.1",
+ },
+ (7, 4, 0): {
+ "docutils": "0.20.1",
+ "PyYAML": "6.0.1",
+ },
+ (8, 0, 0): {
+ "docutils": "0.21",
+ },
+ (8, 1, 0): {
+ "docutils": "0.21.1",
+ "PyYAML": "6.0.1",
+ "sphinxcontrib-applehelp": "1.0.7",
+ "sphinxcontrib-devhelp": "1.0.6",
+ "sphinxcontrib-htmlhelp": "2.0.6",
+ "sphinxcontrib-qthelp": "1.0.6",
+ },
+ (8, 2, 0): {
+ "docutils": "0.21.2",
+ "PyYAML": "6.0.1",
+ "sphinxcontrib-serializinghtml": "1.1.9",
+ },
+}
+
+
+class AsyncCommands:
+ """Excecute command synchronously"""
+
+ def __init__(self, fp=None):
+
+ self.stdout = None
+ self.stderr = None
+ self.output = None
+ self.fp = fp
+
+ def log(self, out, verbose, is_info=True):
+ out = out.removesuffix('\n')
+
+ if verbose:
+ if is_info:
+ print(out)
+ else:
+ print(out, file=sys.stderr)
+
+ if self.fp:
+ self.fp.write(out + "\n")
+
+ async def _read(self, stream, verbose, is_info):
+ """Ancillary routine to capture while displaying"""
+
+ while stream is not None:
+ line = await stream.readline()
+ if line:
+ out = line.decode("utf-8", errors="backslashreplace")
+ self.log(out, verbose, is_info)
+ if is_info:
+ self.stdout += out
+ else:
+ self.stderr += out
+ else:
+ break
+
+ async def run(self, cmd, capture_output=False, check=False,
+ env=None, verbose=True):
+
+ """
+ Execute an arbitrary command, handling errors.
+
+ Please notice that this class is not thread safe
+ """
+
+ self.stdout = ""
+ self.stderr = ""
+
+ self.log("$ " + " ".join(cmd), verbose)
+
+ proc = await asyncio.create_subprocess_exec(cmd[0],
+ *cmd[1:],
+ env=env,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE)
+
+ # Handle input and output in realtime
+ await asyncio.gather(
+ self._read(proc.stdout, verbose, True),
+ self._read(proc.stderr, verbose, False),
+ )
+
+ await proc.wait()
+
+ if check and proc.returncode > 0:
+ raise subprocess.CalledProcessError(returncode=proc.returncode,
+ cmd=" ".join(cmd),
+ output=self.stdout,
+ stderr=self.stderr)
+
+ if capture_output:
+ if proc.returncode > 0:
+ self.log(f"Error {proc.returncode}", verbose=True, is_info=False)
+ return ""
+
+ return self.output
+
+ ret = subprocess.CompletedProcess(args=cmd,
+ returncode=proc.returncode,
+ stdout=self.stdout,
+ stderr=self.stderr)
+
+ return ret
+
+
+class SphinxVenv:
+ """
+ Installs Sphinx on one virtual env per Sphinx version with a minimal
+ set of dependencies, adjusting them to each specific version.
+ """
+
+ def __init__(self):
+ """Initialize instance variables"""
+
+ self.built_time = {}
+ self.first_run = True
+
+ async def _handle_version(self, args, fp,
+ cur_ver, cur_requirements, python_bin):
+ """Handle a single Sphinx version"""
+
+ cmd = AsyncCommands(fp)
+
+ ver = ".".join(map(str, cur_ver))
+
+ if not self.first_run and args.wait_input and args.build:
+ ret = input("Press Enter to continue or 'a' to abort: ").strip().lower()
+ if ret == "a":
+ print("Aborted.")
+ sys.exit()
+ else:
+ self.first_run = False
+
+ venv_dir = f"Sphinx_{ver}"
+ req_file = f"requirements_{ver}.txt"
+
+ cmd.log(f"\nSphinx {ver} with {python_bin}", verbose=True)
+
+ # Create venv
+ await cmd.run([python_bin, "-m", "venv", venv_dir],
+ verbose=args.verbose, check=True)
+ pip = os.path.join(venv_dir, "bin/pip")
+
+ # Create install list
+ reqs = []
+ for pkg, verstr in cur_requirements.items():
+ reqs.append(f"{pkg}=={verstr}")
+
+ reqs.append(f"Sphinx=={ver}")
+
+ await cmd.run([pip, "install"] + reqs, check=True, verbose=args.verbose)
+
+ # Freeze environment
+ result = await cmd.run([pip, "freeze"], verbose=False, check=True)
+
+ # Pip install succeeded. Write requirements file
+ if args.req_file:
+ with open(req_file, "w", encoding="utf-8") as fp:
+ fp.write(result.stdout)
+
+ if args.build:
+ start_time = time.time()
+
+ # Prepare a venv environment
+ env = os.environ.copy()
+ bin_dir = os.path.join(venv_dir, "bin")
+ env["PATH"] = bin_dir + ":" + env["PATH"]
+ env["VIRTUAL_ENV"] = venv_dir
+ if "PYTHONHOME" in env:
+ del env["PYTHONHOME"]
+
+ # Test doc build
+ await cmd.run(["make", "cleandocs"], env=env, check=True)
+ make = ["make"]
+
+ if args.output:
+ sphinx_build = os.path.realpath(f"{bin_dir}/sphinx-build")
+ make += [f"O={args.output}", f"SPHINXBUILD={sphinx_build}"]
+
+ if args.make_args:
+ make += args.make_args
+
+ make += args.targets
+
+ if args.verbose:
+ cmd.log(f". {bin_dir}/activate", verbose=True)
+ await cmd.run(make, env=env, check=True, verbose=True)
+ if args.verbose:
+ cmd.log("deactivate", verbose=True)
+
+ end_time = time.time()
+ elapsed_time = end_time - start_time
+ hours, minutes = divmod(elapsed_time, 3600)
+ minutes, seconds = divmod(minutes, 60)
+
+ hours = int(hours)
+ minutes = int(minutes)
+ seconds = int(seconds)
+
+ self.built_time[ver] = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
+
+ cmd.log(f"Finished doc build for Sphinx {ver}. Elapsed time: {self.built_time[ver]}", verbose=True)
+
+ async def run(self, args):
+ """
+ Navigate though multiple Sphinx versions, handling each of them
+ on a loop.
+ """
+
+ if args.log:
+ fp = open(args.log, "w", encoding="utf-8")
+ if not args.verbose:
+ args.verbose = False
+ else:
+ fp = None
+ if not args.verbose:
+ args.verbose = True
+
+ cur_requirements = {}
+ python_bin = min_python_bin
+
+ vers = set(SPHINX_REQUIREMENTS.keys()) | set(args.versions)
+
+ for cur_ver in sorted(vers):
+ if cur_ver in SPHINX_REQUIREMENTS:
+ new_reqs = SPHINX_REQUIREMENTS[cur_ver]
+ cur_requirements.update(new_reqs)
+
+ if cur_ver in PYTHON_VER_CHANGES: # pylint: disable=R1715
+ python_bin = PYTHON_VER_CHANGES[cur_ver]
+
+ if cur_ver not in args.versions:
+ continue
+
+ if args.min_version:
+ if cur_ver < args.min_version:
+ continue
+
+ if args.max_version:
+ if cur_ver > args.max_version:
+ break
+
+ await self._handle_version(args, fp, cur_ver, cur_requirements,
+ python_bin)
+
+ if args.build:
+ cmd = AsyncCommands(fp)
+ cmd.log("\nSummary:", verbose=True)
+ for ver, elapsed_time in sorted(self.built_time.items()):
+ cmd.log(f"\tSphinx {ver} elapsed time: {elapsed_time}",
+ verbose=True)
+
+ if fp:
+ fp.close()
+
+def parse_version(ver_str):
+ """Convert a version string into a tuple."""
+
+ return tuple(map(int, ver_str.split(".")))
+
+
+DEFAULT_VERS = " - "
+DEFAULT_VERS += "\n - ".join(map(lambda v: f"{v[0]}.{v[1]}.{v[2]}",
+ DEFAULT_VERSIONS_TO_TEST))
+
+SCRIPT = os.path.relpath(__file__)
+
+DESCRIPTION = f"""
+This tool allows creating Python virtual environments for different
+Sphinx versions that are supported by the Linux Kernel build system.
+
+Besides creating the virtual environment, it can also test building
+the documentation using "make htmldocs" (and/or other doc targets).
+
+If called without "--versions" argument, it covers the versions shipped
+on major distros, plus the lowest supported version:
+
+{DEFAULT_VERS}
+
+A typical usage is to run:
+
+ {SCRIPT} -m -l sphinx_builds.log
+
+This will create one virtual env for the default version set and run
+"make htmldocs" for each version, creating a log file with the
+excecuted commands on it.
+
+NOTE: The build time can be very long, specially on old versions. Also, there
+is a known bug with Sphinx version 6.0.x: each subprocess uses a lot of
+memory. That, together with "-jauto" may cause OOM killer to cause
+failures at the doc generation. To minimize the risk, you may use the
+"-a" command line parameter to constrain the built directories and/or
+reduce the number of threads from "-jauto" to, for instance, "-j4":
+
+ {SCRIPT} -m -V 6.0.1 -a "SPHINXDIRS=process" "SPHINXOPTS='-j4'"
+
+"""
+
+MAKE_TARGETS = [
+ "htmldocs",
+ "texinfodocs",
+ "infodocs",
+ "latexdocs",
+ "pdfdocs",
+ "epubdocs",
+ "xmldocs",
+]
+
+async def main():
+ """Main program"""
+
+ parser = argparse.ArgumentParser(description=DESCRIPTION,
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ ver_group = parser.add_argument_group("Version range options")
+
+ ver_group.add_argument('-V', '--versions', nargs="*",
+ default=DEFAULT_VERSIONS_TO_TEST,type=parse_version,
+ help='Sphinx versions to test')
+ ver_group.add_argument('--min-version', "--min", type=parse_version,
+ help='Sphinx minimal version')
+ ver_group.add_argument('--max-version', "--max", type=parse_version,
+ help='Sphinx maximum version')
+ ver_group.add_argument('-f', '--full', action='store_true',
+ help='Add all Sphinx (major,minor) supported versions to the version range')
+
+ build_group = parser.add_argument_group("Build options")
+
+ build_group.add_argument('-b', '--build', action='store_true',
+ help='Build documentation')
+ build_group.add_argument('-a', '--make-args', nargs="*",
+ help='extra arguments for make, like SPHINXDIRS=netlink/specs',
+ )
+ build_group.add_argument('-t', '--targets', nargs="+", choices=MAKE_TARGETS,
+ default=[MAKE_TARGETS[0]],
+ help="make build targets. Default: htmldocs.")
+ build_group.add_argument("-o", '--output',
+ help="output directory for the make O=OUTPUT")
+
+ other_group = parser.add_argument_group("Other options")
+
+ other_group.add_argument('-r', '--req-file', action='store_true',
+ help='write a requirements.txt file')
+ other_group.add_argument('-l', '--log',
+ help='Log command output on a file')
+ other_group.add_argument('-v', '--verbose', action='store_true',
+ help='Verbose all commands')
+ other_group.add_argument('-i', '--wait-input', action='store_true',
+ help='Wait for an enter before going to the next version')
+
+ args = parser.parse_args()
+
+ if not args.make_args:
+ args.make_args = []
+
+ sphinx_versions = sorted(list(SPHINX_REQUIREMENTS.keys()))
+
+ if args.full:
+ args.versions += list(SPHINX_REQUIREMENTS.keys())
+
+ venv = SphinxVenv()
+ await venv.run(args)
+
+
+# Call main method
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/tools/include/nolibc/Makefile b/tools/include/nolibc/Makefile
index 143c2d2c2ba6..8118e22844f1 100644
--- a/tools/include/nolibc/Makefile
+++ b/tools/include/nolibc/Makefile
@@ -23,7 +23,7 @@ else
Q=@
endif
-arch_file := arch-$(ARCH).h
+arch_files := arch.h $(wildcard arch-*.h)
all_files := \
compiler.h \
crt.h \
@@ -33,6 +33,7 @@ all_files := \
errno.h \
fcntl.h \
getopt.h \
+ inttypes.h \
limits.h \
math.h \
nolibc.h \
@@ -56,12 +57,14 @@ all_files := \
sys/random.h \
sys/reboot.h \
sys/resource.h \
+ sys/select.h \
sys/stat.h \
sys/syscall.h \
sys/sysmacros.h \
sys/time.h \
sys/timerfd.h \
sys/types.h \
+ sys/uio.h \
sys/utsname.h \
sys/wait.h \
time.h \
@@ -79,7 +82,7 @@ help:
@echo "Supported targets under nolibc:"
@echo " all call \"headers\""
@echo " clean clean the sysroot"
- @echo " headers prepare a sysroot in tools/include/nolibc/sysroot"
+ @echo " headers prepare a multi-arch sysroot in \$${OUTPUT}sysroot"
@echo " headers_standalone like \"headers\", and also install kernel headers"
@echo " help this help"
@echo ""
@@ -90,18 +93,11 @@ help:
@echo " OUTPUT = $(OUTPUT)"
@echo ""
+# installs headers for all archs at once.
headers:
- $(Q)mkdir -p $(OUTPUT)sysroot
- $(Q)mkdir -p $(OUTPUT)sysroot/include
- $(Q)cp --parents $(all_files) $(OUTPUT)sysroot/include/
- $(Q)if [ "$(ARCH)" = "i386" -o "$(ARCH)" = "x86_64" ]; then \
- cat arch-x86.h; \
- elif [ -e "$(arch_file)" ]; then \
- cat $(arch_file); \
- else \
- echo "Fatal: architecture $(ARCH) not yet supported by nolibc." >&2; \
- exit 1; \
- fi > $(OUTPUT)sysroot/include/arch.h
+ $(Q)mkdir -p "$(OUTPUT)sysroot"
+ $(Q)mkdir -p "$(OUTPUT)sysroot/include"
+ $(Q)cp --parents $(arch_files) $(all_files) "$(OUTPUT)sysroot/include/"
headers_standalone: headers
$(Q)$(MAKE) -C $(srctree) headers
diff --git a/tools/include/nolibc/arch-arm.h b/tools/include/nolibc/arch-arm.h
index 1f66e7e5a444..251c42579028 100644
--- a/tools/include/nolibc/arch-arm.h
+++ b/tools/include/nolibc/arch-arm.h
@@ -184,6 +184,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -193,5 +194,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_ARM_H */
diff --git a/tools/include/nolibc/arch-arm64.h b/tools/include/nolibc/arch-arm64.h
index 02a3f74c8ec8..080a55a7144e 100644
--- a/tools/include/nolibc/arch-arm64.h
+++ b/tools/include/nolibc/arch-arm64.h
@@ -141,6 +141,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -150,4 +151,5 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_ARM64_H */
diff --git a/tools/include/nolibc/arch-loongarch.h b/tools/include/nolibc/arch-loongarch.h
index 5511705303ea..c894176c3f89 100644
--- a/tools/include/nolibc/arch-loongarch.h
+++ b/tools/include/nolibc/arch-loongarch.h
@@ -142,6 +142,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -151,5 +152,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_LOONGARCH_H */
diff --git a/tools/include/nolibc/arch-m68k.h b/tools/include/nolibc/arch-m68k.h
index 6dac1845f298..2a4fbada5e79 100644
--- a/tools/include/nolibc/arch-m68k.h
+++ b/tools/include/nolibc/arch-m68k.h
@@ -128,6 +128,7 @@
_num; \
})
+#ifndef NOLIBC_NO_RUNTIME
void _start(void);
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -137,5 +138,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_M68K_H */
diff --git a/tools/include/nolibc/arch-mips.h b/tools/include/nolibc/arch-mips.h
index 0cbac63b249a..a72506ceec6b 100644
--- a/tools/include/nolibc/arch-mips.h
+++ b/tools/include/nolibc/arch-mips.h
@@ -245,6 +245,7 @@
#endif /* _ABIO32 */
+#ifndef NOLIBC_NO_RUNTIME
/* startup code, note that it's called __start on MIPS */
void __start(void);
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector __start(void)
@@ -266,5 +267,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector __
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_MIPS_H */
diff --git a/tools/include/nolibc/arch-powerpc.h b/tools/include/nolibc/arch-powerpc.h
index 204564bbcd32..e0c7e0b81f7c 100644
--- a/tools/include/nolibc/arch-powerpc.h
+++ b/tools/include/nolibc/arch-powerpc.h
@@ -183,6 +183,7 @@
#endif
#endif /* !__powerpc64__ */
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -215,5 +216,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
#endif
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_POWERPC_H */
diff --git a/tools/include/nolibc/arch-riscv.h b/tools/include/nolibc/arch-riscv.h
index 885383a86c38..1c00cacf57e1 100644
--- a/tools/include/nolibc/arch-riscv.h
+++ b/tools/include/nolibc/arch-riscv.h
@@ -139,6 +139,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -152,5 +153,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_RISCV_H */
diff --git a/tools/include/nolibc/arch-s390.h b/tools/include/nolibc/arch-s390.h
index 0a39bee261b9..74125a254ce3 100644
--- a/tools/include/nolibc/arch-s390.h
+++ b/tools/include/nolibc/arch-s390.h
@@ -139,6 +139,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -150,6 +151,7 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
struct s390_mmap_arg_struct {
unsigned long addr;
diff --git a/tools/include/nolibc/arch-sh.h b/tools/include/nolibc/arch-sh.h
index a96b8914607e..7a421197d104 100644
--- a/tools/include/nolibc/arch-sh.h
+++ b/tools/include/nolibc/arch-sh.h
@@ -140,6 +140,7 @@
_ret; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void _start_wrapper(void);
void __attribute__((weak,noreturn)) __nolibc_entrypoint __no_stack_protector _start_wrapper(void)
@@ -158,5 +159,6 @@ void __attribute__((weak,noreturn)) __nolibc_entrypoint __no_stack_protector _st
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_SH_H */
diff --git a/tools/include/nolibc/arch-sparc.h b/tools/include/nolibc/arch-sparc.h
index ca420d843e25..2ebb5686e105 100644
--- a/tools/include/nolibc/arch-sparc.h
+++ b/tools/include/nolibc/arch-sparc.h
@@ -152,6 +152,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -169,6 +170,7 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
static pid_t getpid(void);
diff --git a/tools/include/nolibc/arch-x86.h b/tools/include/nolibc/arch-x86.h
index d3efc0c3b8ad..f6c43ac5377b 100644
--- a/tools/include/nolibc/arch-x86.h
+++ b/tools/include/nolibc/arch-x86.h
@@ -157,6 +157,7 @@
_eax; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
/*
* i386 System V ABI mandates:
@@ -176,6 +177,7 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#else /* !defined(__x86_64__) */
@@ -323,6 +325,7 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
_ret; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
/*
* x86-64 System V ABI mandates:
@@ -340,6 +343,7 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#define NOLIBC_ARCH_HAS_MEMMOVE
void *memmove(void *dst, const void *src, size_t len);
@@ -351,7 +355,7 @@ void *memcpy(void *dst, const void *src, size_t len);
void *memset(void *dst, int c, size_t len);
__asm__ (
-".section .text.nolibc_memmove_memcpy\n"
+".pushsection .text.nolibc_memmove_memcpy\n"
".weak memmove\n"
".weak memcpy\n"
"memmove:\n"
@@ -371,8 +375,9 @@ __asm__ (
"rep movsb\n\t"
"cld\n\t"
"retq\n"
+".popsection\n"
-".section .text.nolibc_memset\n"
+".pushsection .text.nolibc_memset\n"
".weak memset\n"
"memset:\n"
"xchgl %eax, %esi\n\t"
@@ -381,6 +386,7 @@ __asm__ (
"rep stosb\n\t"
"popq %rax\n\t"
"retq\n"
+".popsection\n"
);
#endif /* !defined(__x86_64__) */
diff --git a/tools/include/nolibc/arch.h b/tools/include/nolibc/arch.h
index ef4743aad188..a3adaf433f2c 100644
--- a/tools/include/nolibc/arch.h
+++ b/tools/include/nolibc/arch.h
@@ -3,15 +3,6 @@
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
*/
-/* Below comes the architecture-specific code. For each architecture, we have
- * the syscall declarations and the _start code definition. This is the only
- * global part. On all architectures the kernel puts everything in the stack
- * before jumping to _start just above us, without any return address (_start
- * is not a function but an entry point). So at the stack pointer we find argc.
- * Then argv[] begins, and ends at the first NULL. Then we have envp which
- * starts and ends with a NULL as well. So envp=argv+argc+1.
- */
-
#ifndef _NOLIBC_ARCH_H
#define _NOLIBC_ARCH_H
diff --git a/tools/include/nolibc/compiler.h b/tools/include/nolibc/compiler.h
index 369cfb5a0e78..87090bbc53e0 100644
--- a/tools/include/nolibc/compiler.h
+++ b/tools/include/nolibc/compiler.h
@@ -41,8 +41,8 @@
# define __no_stack_protector __attribute__((__optimize__("-fno-stack-protector")))
#endif /* __nolibc_has_attribute(no_stack_protector) */
-#if __nolibc_has_attribute(fallthrough)
-# define __nolibc_fallthrough do { } while (0); __attribute__((fallthrough))
+#if __nolibc_has_attribute(__fallthrough__)
+# define __nolibc_fallthrough do { } while (0); __attribute__((__fallthrough__))
#else
# define __nolibc_fallthrough do { } while (0)
#endif /* __nolibc_has_attribute(fallthrough) */
diff --git a/tools/include/nolibc/crt.h b/tools/include/nolibc/crt.h
index 961cfe777c35..d9262998dae9 100644
--- a/tools/include/nolibc/crt.h
+++ b/tools/include/nolibc/crt.h
@@ -7,6 +7,8 @@
#ifndef _NOLIBC_CRT_H
#define _NOLIBC_CRT_H
+#ifndef NOLIBC_NO_RUNTIME
+
#include "compiler.h"
char **environ __attribute__((weak));
@@ -88,4 +90,5 @@ void _start_c(long *sp)
exit(exitcode);
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_CRT_H */
diff --git a/tools/include/nolibc/dirent.h b/tools/include/nolibc/dirent.h
index 758b95c48e7a..61a122a60327 100644
--- a/tools/include/nolibc/dirent.h
+++ b/tools/include/nolibc/dirent.h
@@ -86,9 +86,9 @@ int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result)
* readdir() can only return one entry at a time.
* Make sure the non-returned ones are not skipped.
*/
- ret = lseek(fd, ldir->d_off, SEEK_SET);
- if (ret == -1)
- return errno;
+ ret = sys_lseek(fd, ldir->d_off, SEEK_SET);
+ if (ret < 0)
+ return -ret;
entry->d_ino = ldir->d_ino;
/* the destination should always be big enough */
diff --git a/tools/include/nolibc/getopt.h b/tools/include/nolibc/getopt.h
index 217abb95264b..87565e3b6a33 100644
--- a/tools/include/nolibc/getopt.h
+++ b/tools/include/nolibc/getopt.h
@@ -78,7 +78,7 @@ int getopt(int argc, char * const argv[], const char *optstring)
return '?';
}
if (optstring[i] == ':') {
- optarg = 0;
+ optarg = NULL;
if (optstring[i + 1] != ':' || __optpos) {
optarg = argv[optind++];
if (__optpos)
diff --git a/tools/include/nolibc/inttypes.h b/tools/include/nolibc/inttypes.h
new file mode 100644
index 000000000000..1977bd74bfeb
--- /dev/null
+++ b/tools/include/nolibc/inttypes.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+
+#include "nolibc.h"
diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
index d2f5aa085f8e..272dfc961158 100644
--- a/tools/include/nolibc/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -104,11 +104,13 @@
#include "sys/random.h"
#include "sys/reboot.h"
#include "sys/resource.h"
+#include "sys/select.h"
#include "sys/stat.h"
#include "sys/syscall.h"
#include "sys/sysmacros.h"
#include "sys/time.h"
#include "sys/timerfd.h"
+#include "sys/uio.h"
#include "sys/utsname.h"
#include "sys/wait.h"
#include "ctype.h"
diff --git a/tools/include/nolibc/stackprotector.h b/tools/include/nolibc/stackprotector.h
index c71a2c257177..7123aa056cb0 100644
--- a/tools/include/nolibc/stackprotector.h
+++ b/tools/include/nolibc/stackprotector.h
@@ -9,6 +9,7 @@
#include "compiler.h"
+#ifndef NOLIBC_NO_RUNTIME
#if defined(_NOLIBC_STACKPROTECTOR)
#include "sys.h"
@@ -49,5 +50,6 @@ static __no_stack_protector void __stack_chk_init(void)
#else /* !defined(_NOLIBC_STACKPROTECTOR) */
static void __stack_chk_init(void) {}
#endif /* defined(_NOLIBC_STACKPROTECTOR) */
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_STACKPROTECTOR_H */
diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h
index 2c1ad23b9b5c..392f4dd94158 100644
--- a/tools/include/nolibc/std.h
+++ b/tools/include/nolibc/std.h
@@ -20,13 +20,13 @@
/* those are commonly provided by sys/types.h */
typedef unsigned int dev_t;
-typedef unsigned long ino_t;
+typedef uint64_t ino_t;
typedef unsigned int mode_t;
typedef signed int pid_t;
typedef unsigned int uid_t;
typedef unsigned int gid_t;
typedef unsigned long nlink_t;
-typedef signed long off_t;
+typedef int64_t off_t;
typedef signed long blksize_t;
typedef signed long blkcnt_t;
typedef __kernel_time_t time_t;
diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
index 7630234408c5..1f16dab2ac88 100644
--- a/tools/include/nolibc/stdio.h
+++ b/tools/include/nolibc/stdio.h
@@ -321,11 +321,13 @@ int __nolibc_printf(__nolibc_printf_cb cb, intptr_t state, size_t n, const char
if (!outstr)
outstr="(null)";
}
-#ifndef NOLIBC_IGNORE_ERRNO
else if (c == 'm') {
+#ifdef NOLIBC_IGNORE_ERRNO
+ outstr = "unknown error";
+#else
outstr = strerror(errno);
- }
#endif /* NOLIBC_IGNORE_ERRNO */
+ }
else if (c == '%') {
/* queue it verbatim */
continue;
@@ -600,7 +602,11 @@ int sscanf(const char *str, const char *format, ...)
static __attribute__((unused))
void perror(const char *msg)
{
+#ifdef NOLIBC_IGNORE_ERRNO
+ fprintf(stderr, "%s%sunknown error\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "");
+#else
fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
+#endif
}
static __attribute__((unused))
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
index 5fd99a480f82..f184e108ed0a 100644
--- a/tools/include/nolibc/stdlib.h
+++ b/tools/include/nolibc/stdlib.h
@@ -100,6 +100,7 @@ void free(void *ptr)
munmap(heap, heap->len);
}
+#ifndef NOLIBC_NO_RUNTIME
/* getenv() tries to find the environment variable named <name> in the
* environment array pointed to by global variable "environ" which must be
* declared as a char **, and must be terminated by a NULL (it is recommended
@@ -122,6 +123,7 @@ char *getenv(const char *name)
}
return NULL;
}
+#endif /* NOLIBC_NO_RUNTIME */
static __attribute__((unused))
void *malloc(size_t len)
diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h
index 163a17e7dd38..4000926f44ac 100644
--- a/tools/include/nolibc/string.h
+++ b/tools/include/nolibc/string.h
@@ -93,6 +93,21 @@ void *memset(void *dst, int b, size_t len)
}
#endif /* #ifndef NOLIBC_ARCH_HAS_MEMSET */
+#ifndef NOLIBC_ARCH_HAS_MEMCHR
+static __attribute__((unused))
+void *memchr(const void *s, int c, size_t len)
+{
+ char *p = (char *)s;
+
+ while (len--) {
+ if (*p == (char)c)
+ return p;
+ p++;
+ }
+ return NULL;
+}
+#endif /* #ifndef NOLIBC_ARCH_HAS_MEMCHR */
+
static __attribute__((unused))
char *strchr(const char *s, int c)
{
diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
index c5564f57deec..847af1ccbdc9 100644
--- a/tools/include/nolibc/sys.h
+++ b/tools/include/nolibc/sys.h
@@ -106,7 +106,7 @@ static __attribute__((unused))
void *sbrk(intptr_t inc)
{
/* first call to find current end */
- void *ret = sys_brk(0);
+ void *ret = sys_brk(NULL);
if (ret && sys_brk(ret + inc) == ret + inc)
return ret + inc;
@@ -118,6 +118,7 @@ void *sbrk(intptr_t inc)
/*
* int chdir(const char *path);
+ * int fchdir(int fildes);
*/
static __attribute__((unused))
@@ -132,6 +133,18 @@ int chdir(const char *path)
return __sysret(sys_chdir(path));
}
+static __attribute__((unused))
+int sys_fchdir(int fildes)
+{
+ return my_syscall1(__NR_fchdir, fildes);
+}
+
+static __attribute__((unused))
+int fchdir(int fildes)
+{
+ return __sysret(sys_fchdir(fildes));
+}
+
/*
* int chmod(const char *path, mode_t mode);
@@ -512,6 +525,7 @@ pid_t gettid(void)
return sys_gettid();
}
+#ifndef NOLIBC_NO_RUNTIME
static unsigned long getauxval(unsigned long key);
/*
@@ -523,7 +537,7 @@ int getpagesize(void)
{
return __sysret((int)getauxval(AT_PAGESZ) ?: -ENOENT);
}
-
+#endif /* NOLIBC_NO_RUNTIME */
/*
* uid_t getuid(void);
@@ -591,23 +605,20 @@ int link(const char *old, const char *new)
static __attribute__((unused))
off_t sys_lseek(int fd, off_t offset, int whence)
{
-#if defined(__NR_lseek)
- return my_syscall3(__NR_lseek, fd, offset, whence);
-#else
+#if defined(__NR_llseek)
__kernel_loff_t loff = 0;
off_t result;
int ret;
- /* Only exists on 32bit where nolibc off_t is also 32bit */
- ret = my_syscall5(__NR_llseek, fd, 0, offset, &loff, whence);
+ ret = my_syscall5(__NR_llseek, fd, offset >> 32, (uint32_t)offset, &loff, whence);
if (ret < 0)
result = ret;
- else if (loff != (off_t)loff)
- result = -EOVERFLOW;
else
result = loff;
return result;
+#else
+ return my_syscall3(__NR_lseek, fd, offset, whence);
#endif
}
@@ -756,51 +767,6 @@ int sched_yield(void)
/*
- * int select(int nfds, fd_set *read_fds, fd_set *write_fds,
- * fd_set *except_fds, struct timeval *timeout);
- */
-
-static __attribute__((unused))
-int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
-{
-#if defined(__ARCH_WANT_SYS_OLD_SELECT) && !defined(__NR__newselect)
- struct sel_arg_struct {
- unsigned long n;
- fd_set *r, *w, *e;
- struct timeval *t;
- } arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout };
- return my_syscall1(__NR_select, &arg);
-#elif defined(__NR__newselect)
- return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout);
-#elif defined(__NR_select)
- return my_syscall5(__NR_select, nfds, rfds, wfds, efds, timeout);
-#elif defined(__NR_pselect6)
- struct timespec t;
-
- if (timeout) {
- t.tv_sec = timeout->tv_sec;
- t.tv_nsec = timeout->tv_usec * 1000;
- }
- return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
-#else
- struct __kernel_timespec t;
-
- if (timeout) {
- t.tv_sec = timeout->tv_sec;
- t.tv_nsec = timeout->tv_usec * 1000;
- }
- return my_syscall6(__NR_pselect6_time64, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
-#endif
-}
-
-static __attribute__((unused))
-int select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
-{
- return __sysret(sys_select(nfds, rfds, wfds, efds, timeout));
-}
-
-
-/*
* int setpgid(pid_t pid, pid_t pgid);
*/
diff --git a/tools/include/nolibc/sys/auxv.h b/tools/include/nolibc/sys/auxv.h
index c52463d6c18d..0e98325e7347 100644
--- a/tools/include/nolibc/sys/auxv.h
+++ b/tools/include/nolibc/sys/auxv.h
@@ -10,6 +10,8 @@
#ifndef _NOLIBC_SYS_AUXV_H
#define _NOLIBC_SYS_AUXV_H
+#ifndef NOLIBC_NO_RUNTIME
+
#include "../crt.h"
static __attribute__((unused))
@@ -38,4 +40,5 @@ unsigned long getauxval(unsigned long type)
return ret;
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_SYS_AUXV_H */
diff --git a/tools/include/nolibc/sys/mman.h b/tools/include/nolibc/sys/mman.h
index 5228751b458c..77084ac3405a 100644
--- a/tools/include/nolibc/sys/mman.h
+++ b/tools/include/nolibc/sys/mman.h
@@ -31,11 +31,6 @@ void *sys_mmap(void *addr, size_t length, int prot, int flags, int fd,
}
#endif
-/* Note that on Linux, MAP_FAILED is -1 so we can use the generic __sysret()
- * which returns -1 upon error and still satisfy user land that checks for
- * MAP_FAILED.
- */
-
static __attribute__((unused))
void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
{
diff --git a/tools/include/nolibc/sys/reboot.h b/tools/include/nolibc/sys/reboot.h
index 4a1e435be669..38274c64a722 100644
--- a/tools/include/nolibc/sys/reboot.h
+++ b/tools/include/nolibc/sys/reboot.h
@@ -28,7 +28,7 @@ ssize_t sys_reboot(int magic1, int magic2, int cmd, void *arg)
static __attribute__((unused))
int reboot(int cmd)
{
- return __sysret(sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, 0));
+ return __sysret(sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, NULL));
}
#endif /* _NOLIBC_SYS_REBOOT_H */
diff --git a/tools/include/nolibc/sys/select.h b/tools/include/nolibc/sys/select.h
new file mode 100644
index 000000000000..2a5619c01277
--- /dev/null
+++ b/tools/include/nolibc/sys/select.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+
+#include "../nolibc.h"
+
+#ifndef _NOLIBC_SYS_SELECT_H
+#define _NOLIBC_SYS_SELECT_H
+
+#include <linux/time.h>
+#include <linux/unistd.h>
+
+/* commonly an fd_set represents 256 FDs */
+#ifndef FD_SETSIZE
+#define FD_SETSIZE 256
+#endif
+
+#define FD_SETIDXMASK (8 * sizeof(unsigned long))
+#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
+
+/* for select() */
+typedef struct {
+ unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
+} fd_set;
+
+#define FD_CLR(fd, set) do { \
+ fd_set *__set = (set); \
+ int __fd = (fd); \
+ if (__fd >= 0) \
+ __set->fds[__fd / FD_SETIDXMASK] &= \
+ ~(1U << (__fd & FD_SETBITMASK)); \
+ } while (0)
+
+#define FD_SET(fd, set) do { \
+ fd_set *__set = (set); \
+ int __fd = (fd); \
+ if (__fd >= 0) \
+ __set->fds[__fd / FD_SETIDXMASK] |= \
+ 1 << (__fd & FD_SETBITMASK); \
+ } while (0)
+
+#define FD_ISSET(fd, set) ({ \
+ fd_set *__set = (set); \
+ int __fd = (fd); \
+ int __r = 0; \
+ if (__fd >= 0) \
+ __r = !!(__set->fds[__fd / FD_SETIDXMASK] & \
+1U << (__fd & FD_SETBITMASK)); \
+ __r; \
+ })
+
+#define FD_ZERO(set) do { \
+ fd_set *__set = (set); \
+ int __idx; \
+ int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
+ for (__idx = 0; __idx < __size; __idx++) \
+ __set->fds[__idx] = 0; \
+ } while (0)
+
+/*
+ * int select(int nfds, fd_set *read_fds, fd_set *write_fds,
+ * fd_set *except_fds, struct timeval *timeout);
+ */
+
+static __attribute__((unused))
+int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
+{
+#if defined(__ARCH_WANT_SYS_OLD_SELECT) && !defined(__NR__newselect)
+ struct sel_arg_struct {
+ unsigned long n;
+ fd_set *r, *w, *e;
+ struct timeval *t;
+ } arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout };
+ return my_syscall1(__NR_select, &arg);
+#elif defined(__NR__newselect)
+ return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout);
+#elif defined(__NR_select)
+ return my_syscall5(__NR_select, nfds, rfds, wfds, efds, timeout);
+#elif defined(__NR_pselect6)
+ struct timespec t;
+
+ if (timeout) {
+ t.tv_sec = timeout->tv_sec;
+ t.tv_nsec = timeout->tv_usec * 1000;
+ }
+ return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
+#else
+ struct __kernel_timespec t;
+
+ if (timeout) {
+ t.tv_sec = timeout->tv_sec;
+ t.tv_nsec = timeout->tv_usec * 1000;
+ }
+ return my_syscall6(__NR_pselect6_time64, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
+#endif
+}
+
+static __attribute__((unused))
+int select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
+{
+ return __sysret(sys_select(nfds, rfds, wfds, efds, timeout));
+}
+
+
+#endif /* _NOLIBC_SYS_SELECT_H */
diff --git a/tools/include/nolibc/sys/uio.h b/tools/include/nolibc/sys/uio.h
new file mode 100644
index 000000000000..7ad42b927d2f
--- /dev/null
+++ b/tools/include/nolibc/sys/uio.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * uio for NOLIBC
+ * Copyright (C) 2017-2021 Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+/* make sure to include all global symbols */
+#include "../nolibc.h"
+
+#ifndef _NOLIBC_SYS_UIO_H
+#define _NOLIBC_SYS_UIO_H
+
+#include "../sys.h"
+#include <linux/uio.h>
+
+
+/*
+ * ssize_t readv(int fd, const struct iovec *iovec, int count);
+ */
+static __attribute__((unused))
+ssize_t sys_readv(int fd, const struct iovec *iovec, int count)
+{
+ return my_syscall3(__NR_readv, fd, iovec, count);
+}
+
+static __attribute__((unused))
+ssize_t readv(int fd, const struct iovec *iovec, int count)
+{
+ return __sysret(sys_readv(fd, iovec, count));
+}
+
+/*
+ * ssize_t writev(int fd, const struct iovec *iovec, int count);
+ */
+static __attribute__((unused))
+ssize_t sys_writev(int fd, const struct iovec *iovec, int count)
+{
+ return my_syscall3(__NR_writev, fd, iovec, count);
+}
+
+static __attribute__((unused))
+ssize_t writev(int fd, const struct iovec *iovec, int count)
+{
+ return __sysret(sys_writev(fd, iovec, count));
+}
+
+
+#endif /* _NOLIBC_SYS_UIO_H */
diff --git a/tools/include/nolibc/sys/wait.h b/tools/include/nolibc/sys/wait.h
index 4e66e1f7a03e..9d9319ba92cb 100644
--- a/tools/include/nolibc/sys/wait.h
+++ b/tools/include/nolibc/sys/wait.h
@@ -65,23 +65,29 @@ pid_t waitpid(pid_t pid, int *status, int options)
switch (info.si_code) {
case 0:
- *status = 0;
+ if (status)
+ *status = 0;
break;
case CLD_EXITED:
- *status = (info.si_status & 0xff) << 8;
+ if (status)
+ *status = (info.si_status & 0xff) << 8;
break;
case CLD_KILLED:
- *status = info.si_status & 0x7f;
+ if (status)
+ *status = info.si_status & 0x7f;
break;
case CLD_DUMPED:
- *status = (info.si_status & 0x7f) | 0x80;
+ if (status)
+ *status = (info.si_status & 0x7f) | 0x80;
break;
case CLD_STOPPED:
case CLD_TRAPPED:
- *status = (info.si_status << 8) + 0x7f;
+ if (status)
+ *status = (info.si_status << 8) + 0x7f;
break;
case CLD_CONTINUED:
- *status = 0xffff;
+ if (status)
+ *status = 0xffff;
break;
default:
return -1;
diff --git a/tools/include/nolibc/time.h b/tools/include/nolibc/time.h
index 6c276b8d646a..48e78f8becf9 100644
--- a/tools/include/nolibc/time.h
+++ b/tools/include/nolibc/time.h
@@ -89,13 +89,11 @@ int sys_clock_settime(clockid_t clockid, struct timespec *tp)
{
#if defined(__NR_clock_settime)
return my_syscall2(__NR_clock_settime, clockid, tp);
-#elif defined(__NR_clock_settime64)
+#else
struct __kernel_timespec ktp;
__nolibc_timespec_user_to_kernel(tp, &ktp);
return my_syscall2(__NR_clock_settime64, clockid, &ktp);
-#else
- return __nolibc_enosys(__func__, clockid, tp);
#endif
}
@@ -111,7 +109,7 @@ int sys_clock_nanosleep(clockid_t clockid, int flags, const struct timespec *rqt
{
#if defined(__NR_clock_nanosleep)
return my_syscall4(__NR_clock_nanosleep, clockid, flags, rqtp, rmtp);
-#elif defined(__NR_clock_nanosleep_time64)
+#else
struct __kernel_timespec krqtp, krmtp;
int ret;
@@ -120,8 +118,6 @@ int sys_clock_nanosleep(clockid_t clockid, int flags, const struct timespec *rqt
if (rmtp)
__nolibc_timespec_kernel_to_user(&krmtp, rmtp);
return ret;
-#else
- return __nolibc_enosys(__func__, clockid, flags, rqtp, rmtp);
#endif
}
@@ -195,7 +191,7 @@ int sys_timer_gettime(timer_t timerid, struct itimerspec *curr_value)
{
#if defined(__NR_timer_gettime)
return my_syscall2(__NR_timer_gettime, timerid, curr_value);
-#elif defined(__NR_timer_gettime64)
+#else
struct __kernel_itimerspec kcurr_value;
int ret;
@@ -203,8 +199,6 @@ int sys_timer_gettime(timer_t timerid, struct itimerspec *curr_value)
__nolibc_timespec_kernel_to_user(&kcurr_value.it_interval, &curr_value->it_interval);
__nolibc_timespec_kernel_to_user(&kcurr_value.it_value, &curr_value->it_value);
return ret;
-#else
- return __nolibc_enosys(__func__, timerid, curr_value);
#endif
}
@@ -220,7 +214,7 @@ int sys_timer_settime(timer_t timerid, int flags,
{
#if defined(__NR_timer_settime)
return my_syscall4(__NR_timer_settime, timerid, flags, new_value, old_value);
-#elif defined(__NR_timer_settime64)
+#else
struct __kernel_itimerspec knew_value, kold_value;
int ret;
@@ -232,8 +226,6 @@ int sys_timer_settime(timer_t timerid, int flags,
__nolibc_timespec_kernel_to_user(&kold_value.it_value, &old_value->it_value);
}
return ret;
-#else
- return __nolibc_enosys(__func__, timerid, flags, new_value, old_value);
#endif
}
diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h
index 16c6e9ec9451..470a5f77bc0f 100644
--- a/tools/include/nolibc/types.h
+++ b/tools/include/nolibc/types.h
@@ -70,11 +70,6 @@
#define DT_LNK 0xa
#define DT_SOCK 0xc
-/* commonly an fd_set represents 256 FDs */
-#ifndef FD_SETSIZE
-#define FD_SETSIZE 256
-#endif
-
/* PATH_MAX and MAXPATHLEN are often used and found with plenty of different
* values.
*/
@@ -115,48 +110,6 @@
#define EXIT_SUCCESS 0
#define EXIT_FAILURE 1
-#define FD_SETIDXMASK (8 * sizeof(unsigned long))
-#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
-
-/* for select() */
-typedef struct {
- unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
-} fd_set;
-
-#define FD_CLR(fd, set) do { \
- fd_set *__set = (set); \
- int __fd = (fd); \
- if (__fd >= 0) \
- __set->fds[__fd / FD_SETIDXMASK] &= \
- ~(1U << (__fd & FD_SETBITMASK)); \
- } while (0)
-
-#define FD_SET(fd, set) do { \
- fd_set *__set = (set); \
- int __fd = (fd); \
- if (__fd >= 0) \
- __set->fds[__fd / FD_SETIDXMASK] |= \
- 1 << (__fd & FD_SETBITMASK); \
- } while (0)
-
-#define FD_ISSET(fd, set) ({ \
- fd_set *__set = (set); \
- int __fd = (fd); \
- int __r = 0; \
- if (__fd >= 0) \
- __r = !!(__set->fds[__fd / FD_SETIDXMASK] & \
-1U << (__fd & FD_SETBITMASK)); \
- __r; \
- })
-
-#define FD_ZERO(set) do { \
- fd_set *__set = (set); \
- int __idx; \
- int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
- for (__idx = 0; __idx < __size; __idx++) \
- __set->fds[__idx] = 0; \
- } while (0)
-
/* for getdents64() */
struct linux_dirent64 {
uint64_t d_ino;
diff --git a/tools/include/nolibc/unistd.h b/tools/include/nolibc/unistd.h
index 7405fa2b89ba..bb5e80f3f05d 100644
--- a/tools/include/nolibc/unistd.h
+++ b/tools/include/nolibc/unistd.h
@@ -54,7 +54,7 @@ int msleep(unsigned int msecs)
{
struct timeval my_timeval = { msecs / 1000, (msecs % 1000) * 1000 };
- if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+ if (sys_select(0, NULL, NULL, NULL, &my_timeval) < 0)
return (my_timeval.tv_sec * 1000) +
(my_timeval.tv_usec / 1000) +
!!(my_timeval.tv_usec % 1000);
@@ -67,7 +67,7 @@ unsigned int sleep(unsigned int seconds)
{
struct timeval my_timeval = { seconds, 0 };
- if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+ if (sys_select(0, NULL, NULL, NULL, &my_timeval) < 0)
return my_timeval.tv_sec + !!my_timeval.tv_usec;
else
return 0;
@@ -78,7 +78,7 @@ int usleep(unsigned int usecs)
{
struct timeval my_timeval = { usecs / 1000000, usecs % 1000000 };
- return sys_select(0, 0, 0, 0, &my_timeval);
+ return sys_select(0, NULL, NULL, NULL, &my_timeval);
}
static __attribute__((unused))
diff --git a/tools/docs/lib/__init__.py b/tools/lib/python/__init__.py
index e69de29bb2d1..e69de29bb2d1 100644
--- a/tools/docs/lib/__init__.py
+++ b/tools/lib/python/__init__.py
diff --git a/tools/lib/python/abi/__init__.py b/tools/lib/python/abi/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/lib/python/abi/__init__.py
diff --git a/tools/lib/python/abi/abi_parser.py b/tools/lib/python/abi/abi_parser.py
new file mode 100644
index 000000000000..9b8db70067ef
--- /dev/null
+++ b/tools/lib/python/abi/abi_parser.py
@@ -0,0 +1,628 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0903,R0911,R0912,R0913,R0914,R0915,R0917,C0302
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Parse ABI documentation and produce results from it.
+"""
+
+from argparse import Namespace
+import logging
+import os
+import re
+
+from pprint import pformat
+from random import randrange, seed
+
+# Import Python modules
+
+from abi.helpers import AbiDebug, ABI_DIR
+
+
+class AbiParser:
+ """Main class to parse ABI files"""
+
+ TAGS = r"(what|where|date|kernelversion|contact|description|users)"
+ XREF = r"(?:^|\s|\()(\/(?:sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)(?:[,.:;\)\s]|\Z)"
+
+ def __init__(self, directory, logger=None,
+ enable_lineno=False, show_warnings=True, debug=0):
+ """Stores arguments for the class and initialize class vars"""
+
+ self.directory = directory
+ self.enable_lineno = enable_lineno
+ self.show_warnings = show_warnings
+ self.debug = debug
+
+ if not logger:
+ self.log = logging.getLogger("get_abi")
+ else:
+ self.log = logger
+
+ self.data = {}
+ self.what_symbols = {}
+ self.file_refs = {}
+ self.what_refs = {}
+
+ # Ignore files that contain such suffixes
+ self.ignore_suffixes = (".rej", ".org", ".orig", ".bak", "~")
+
+ # Regular expressions used on parser
+ self.re_abi_dir = re.compile(r"(.*)" + ABI_DIR)
+ self.re_tag = re.compile(r"(\S+)(:\s*)(.*)", re.I)
+ self.re_valid = re.compile(self.TAGS)
+ self.re_start_spc = re.compile(r"(\s*)(\S.*)")
+ self.re_whitespace = re.compile(r"^\s+")
+
+ # Regular used on print
+ self.re_what = re.compile(r"(\/?(?:[\w\-]+\/?){1,2})")
+ self.re_escape = re.compile(r"([\.\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])")
+ self.re_unprintable = re.compile(r"([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff]+)")
+ self.re_title_mark = re.compile(r"\n[\-\*\=\^\~]+\n")
+ self.re_doc = re.compile(r"Documentation/(?!devicetree)(\S+)\.rst")
+ self.re_abi = re.compile(r"(Documentation/ABI/)([\w\/\-]+)")
+ self.re_xref_node = re.compile(self.XREF)
+
+ def warn(self, fdata, msg, extra=None):
+ """Displays a parse error if warning is enabled"""
+
+ if not self.show_warnings:
+ return
+
+ msg = f"{fdata.fname}:{fdata.ln}: {msg}"
+ if extra:
+ msg += "\n\t\t" + extra
+
+ self.log.warning(msg)
+
+ def add_symbol(self, what, fname, ln=None, xref=None):
+ """Create a reference table describing where each 'what' is located"""
+
+ if what not in self.what_symbols:
+ self.what_symbols[what] = {"file": {}}
+
+ if fname not in self.what_symbols[what]["file"]:
+ self.what_symbols[what]["file"][fname] = []
+
+ if ln and ln not in self.what_symbols[what]["file"][fname]:
+ self.what_symbols[what]["file"][fname].append(ln)
+
+ if xref:
+ self.what_symbols[what]["xref"] = xref
+
+ def _parse_line(self, fdata, line):
+ """Parse a single line of an ABI file"""
+
+ new_what = False
+ new_tag = False
+ content = None
+
+ match = self.re_tag.match(line)
+ if match:
+ new = match.group(1).lower()
+ sep = match.group(2)
+ content = match.group(3)
+
+ match = self.re_valid.search(new)
+ if match:
+ new_tag = match.group(1)
+ else:
+ if fdata.tag == "description":
+ # New "tag" is actually part of description.
+ # Don't consider it a tag
+ new_tag = False
+ elif fdata.tag != "":
+ self.warn(fdata, f"tag '{fdata.tag}' is invalid", line)
+
+ if new_tag:
+ # "where" is Invalid, but was a common mistake. Warn if found
+ if new_tag == "where":
+ self.warn(fdata, "tag 'Where' is invalid. Should be 'What:' instead")
+ new_tag = "what"
+
+ if new_tag == "what":
+ fdata.space = None
+
+ if content not in self.what_symbols:
+ self.add_symbol(what=content, fname=fdata.fname, ln=fdata.ln)
+
+ if fdata.tag == "what":
+ fdata.what.append(content.strip("\n"))
+ else:
+ if fdata.key:
+ if "description" not in self.data.get(fdata.key, {}):
+ self.warn(fdata, f"{fdata.key} doesn't have a description")
+
+ for w in fdata.what:
+ self.add_symbol(what=w, fname=fdata.fname,
+ ln=fdata.what_ln, xref=fdata.key)
+
+ fdata.label = content
+ new_what = True
+
+ key = "abi_" + content.lower()
+ fdata.key = self.re_unprintable.sub("_", key).strip("_")
+
+ # Avoid duplicated keys but using a defined seed, to make
+ # the namespace identical if there aren't changes at the
+ # ABI symbols
+ seed(42)
+
+ while fdata.key in self.data:
+ char = randrange(0, 51) + ord("A")
+ if char > ord("Z"):
+ char += ord("a") - ord("Z") - 1
+
+ fdata.key += chr(char)
+
+ if fdata.key and fdata.key not in self.data:
+ self.data[fdata.key] = {
+ "what": [content],
+ "file": [fdata.file_ref],
+ "path": fdata.ftype,
+ "line_no": fdata.ln,
+ }
+
+ fdata.what = self.data[fdata.key]["what"]
+
+ self.what_refs[content] = fdata.key
+ fdata.tag = new_tag
+ fdata.what_ln = fdata.ln
+
+ if fdata.nametag["what"]:
+ t = (content, fdata.key)
+ if t not in fdata.nametag["symbols"]:
+ fdata.nametag["symbols"].append(t)
+
+ return
+
+ if fdata.tag and new_tag:
+ fdata.tag = new_tag
+
+ if new_what:
+ fdata.label = ""
+
+ if "description" in self.data[fdata.key]:
+ self.data[fdata.key]["description"] += "\n\n"
+
+ if fdata.file_ref not in self.data[fdata.key]["file"]:
+ self.data[fdata.key]["file"].append(fdata.file_ref)
+
+ if self.debug == AbiDebug.WHAT_PARSING:
+ self.log.debug("what: %s", fdata.what)
+
+ if not fdata.what:
+ self.warn(fdata, "'What:' should come first:", line)
+ return
+
+ if new_tag == "description":
+ fdata.space = None
+
+ if content:
+ sep = sep.replace(":", " ")
+
+ c = " " * len(new_tag) + sep + content
+ c = c.expandtabs()
+
+ match = self.re_start_spc.match(c)
+ if match:
+ # Preserve initial spaces for the first line
+ fdata.space = match.group(1)
+ content = match.group(2) + "\n"
+
+ self.data[fdata.key][fdata.tag] = content
+
+ return
+
+ # Store any contents before tags at the database
+ if not fdata.tag and "what" in fdata.nametag:
+ fdata.nametag["description"] += line
+ return
+
+ if fdata.tag == "description":
+ content = line.expandtabs()
+
+ if self.re_whitespace.sub("", content) == "":
+ self.data[fdata.key][fdata.tag] += "\n"
+ return
+
+ if fdata.space is None:
+ match = self.re_start_spc.match(content)
+ if match:
+ # Preserve initial spaces for the first line
+ fdata.space = match.group(1)
+
+ content = match.group(2) + "\n"
+ else:
+ if content.startswith(fdata.space):
+ content = content[len(fdata.space):]
+
+ else:
+ fdata.space = ""
+
+ if fdata.tag == "what":
+ w = content.strip("\n")
+ if w:
+ self.data[fdata.key][fdata.tag].append(w)
+ else:
+ self.data[fdata.key][fdata.tag] += content
+ return
+
+ content = line.strip()
+ if fdata.tag:
+ if fdata.tag == "what":
+ w = content.strip("\n")
+ if w:
+ self.data[fdata.key][fdata.tag].append(w)
+ else:
+ self.data[fdata.key][fdata.tag] += "\n" + content.rstrip("\n")
+ return
+
+ # Everything else is error
+ if content:
+ self.warn(fdata, "Unexpected content", line)
+
+ def parse_readme(self, nametag, fname):
+ """Parse ABI README file"""
+
+ nametag["what"] = ["Introduction"]
+ nametag["path"] = "README"
+ with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp:
+ for line in fp:
+ match = self.re_tag.match(line)
+ if match:
+ new = match.group(1).lower()
+
+ match = self.re_valid.search(new)
+ if match:
+ nametag["description"] += "\n:" + line
+ continue
+
+ nametag["description"] += line
+
+ def parse_file(self, fname, path, basename):
+ """Parse a single file"""
+
+ ref = f"abi_file_{path}_{basename}"
+ ref = self.re_unprintable.sub("_", ref).strip("_")
+
+ # Store per-file state into a namespace variable. This will be used
+ # by the per-line parser state machine and by the warning function.
+ fdata = Namespace
+
+ fdata.fname = fname
+ fdata.name = basename
+
+ pos = fname.find(ABI_DIR)
+ if pos > 0:
+ f = fname[pos:]
+ else:
+ f = fname
+
+ fdata.file_ref = (f, ref)
+ self.file_refs[f] = ref
+
+ fdata.ln = 0
+ fdata.what_ln = 0
+ fdata.tag = ""
+ fdata.label = ""
+ fdata.what = []
+ fdata.key = None
+ fdata.xrefs = None
+ fdata.space = None
+ fdata.ftype = path.split("/")[0]
+
+ fdata.nametag = {}
+ fdata.nametag["what"] = [f"ABI file {path}/{basename}"]
+ fdata.nametag["type"] = "File"
+ fdata.nametag["path"] = fdata.ftype
+ fdata.nametag["file"] = [fdata.file_ref]
+ fdata.nametag["line_no"] = 1
+ fdata.nametag["description"] = ""
+ fdata.nametag["symbols"] = []
+
+ self.data[ref] = fdata.nametag
+
+ if self.debug & AbiDebug.WHAT_OPEN:
+ self.log.debug("Opening file %s", fname)
+
+ if basename == "README":
+ self.parse_readme(fdata.nametag, fname)
+ return
+
+ with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp:
+ for line in fp:
+ fdata.ln += 1
+
+ self._parse_line(fdata, line)
+
+ if "description" in fdata.nametag:
+ fdata.nametag["description"] = fdata.nametag["description"].lstrip("\n")
+
+ if fdata.key:
+ if "description" not in self.data.get(fdata.key, {}):
+ self.warn(fdata, f"{fdata.key} doesn't have a description")
+
+ for w in fdata.what:
+ self.add_symbol(what=w, fname=fname, xref=fdata.key)
+
+ def _parse_abi(self, root=None):
+ """Internal function to parse documentation ABI recursively"""
+
+ if not root:
+ root = self.directory
+
+ with os.scandir(root) as obj:
+ for entry in obj:
+ name = os.path.join(root, entry.name)
+
+ if entry.is_dir():
+ self._parse_abi(name)
+ continue
+
+ if not entry.is_file():
+ continue
+
+ basename = os.path.basename(name)
+
+ if basename.startswith("."):
+ continue
+
+ if basename.endswith(self.ignore_suffixes):
+ continue
+
+ path = self.re_abi_dir.sub("", os.path.dirname(name))
+
+ self.parse_file(name, path, basename)
+
+ def parse_abi(self, root=None):
+ """Parse documentation ABI"""
+
+ self._parse_abi(root)
+
+ if self.debug & AbiDebug.DUMP_ABI_STRUCTS:
+ self.log.debug(pformat(self.data))
+
+ def desc_txt(self, desc):
+ """Print description as found inside ABI files"""
+
+ desc = desc.strip(" \t\n")
+
+ return desc + "\n\n"
+
+ def xref(self, fname):
+ """
+ Converts a Documentation/ABI + basename into a ReST cross-reference
+ """
+
+ xref = self.file_refs.get(fname)
+ if not xref:
+ return None
+ else:
+ return xref
+
+ def desc_rst(self, desc):
+ """Enrich ReST output by creating cross-references"""
+
+ # Remove title markups from the description
+ # Having titles inside ABI files will only work if extra
+ # care would be taken in order to strictly follow the same
+ # level order for each markup.
+ desc = self.re_title_mark.sub("\n\n", "\n" + desc)
+ desc = desc.rstrip(" \t\n").lstrip("\n")
+
+ # Python's regex performance for non-compiled expressions is a lot
+ # than Perl, as Perl automatically caches them at their
+ # first usage. Here, we'll need to do the same, as otherwise the
+ # performance penalty is be high
+
+ new_desc = ""
+ for d in desc.split("\n"):
+ if d == "":
+ new_desc += "\n"
+ continue
+
+ # Use cross-references for doc files where needed
+ d = self.re_doc.sub(r":doc:`/\1`", d)
+
+ # Use cross-references for ABI generated docs where needed
+ matches = self.re_abi.findall(d)
+ for m in matches:
+ abi = m[0] + m[1]
+
+ xref = self.file_refs.get(abi)
+ if not xref:
+ # This may happen if ABI is on a separate directory,
+ # like parsing ABI testing and symbol is at stable.
+ # The proper solution is to move this part of the code
+ # for it to be inside sphinx/kernel_abi.py
+ self.log.info("Didn't find ABI reference for '%s'", abi)
+ else:
+ new = self.re_escape.sub(r"\\\1", m[1])
+ d = re.sub(fr"\b{abi}\b", f":ref:`{new} <{xref}>`", d)
+
+ # Seek for cross reference symbols like /sys/...
+ # Need to be careful to avoid doing it on a code block
+ if d[0] not in [" ", "\t"]:
+ matches = self.re_xref_node.findall(d)
+ for m in matches:
+ # Finding ABI here is more complex due to wildcards
+ xref = self.what_refs.get(m)
+ if xref:
+ new = self.re_escape.sub(r"\\\1", m)
+ d = re.sub(fr"\b{m}\b", f":ref:`{new} <{xref}>`", d)
+
+ new_desc += d + "\n"
+
+ return new_desc + "\n\n"
+
+ def doc(self, output_in_txt=False, show_symbols=True, show_file=True,
+ filter_path=None):
+ """Print ABI at stdout"""
+
+ part = None
+ for key, v in sorted(self.data.items(),
+ key=lambda x: (x[1].get("type", ""),
+ x[1].get("what"))):
+
+ wtype = v.get("type", "Symbol")
+ file_ref = v.get("file")
+ names = v.get("what", [""])
+
+ if wtype == "File":
+ if not show_file:
+ continue
+ else:
+ if not show_symbols:
+ continue
+
+ if filter_path:
+ if v.get("path") != filter_path:
+ continue
+
+ msg = ""
+
+ if wtype != "File":
+ cur_part = names[0]
+ if cur_part.find("/") >= 0:
+ match = self.re_what.match(cur_part)
+ if match:
+ symbol = match.group(1).rstrip("/")
+ cur_part = "Symbols under " + symbol
+
+ if cur_part and cur_part != part:
+ part = cur_part
+ msg += part + "\n"+ "-" * len(part) +"\n\n"
+
+ msg += f".. _{key}:\n\n"
+
+ max_len = 0
+ for i in range(0, len(names)): # pylint: disable=C0200
+ names[i] = "**" + self.re_escape.sub(r"\\\1", names[i]) + "**"
+
+ max_len = max(max_len, len(names[i]))
+
+ msg += "+-" + "-" * max_len + "-+\n"
+ for name in names:
+ msg += f"| {name}" + " " * (max_len - len(name)) + " |\n"
+ msg += "+-" + "-" * max_len + "-+\n"
+ msg += "\n"
+
+ for ref in file_ref:
+ if wtype == "File":
+ msg += f".. _{ref[1]}:\n\n"
+ else:
+ base = os.path.basename(ref[0])
+ msg += f"Defined on file :ref:`{base} <{ref[1]}>`\n\n"
+
+ if wtype == "File":
+ msg += names[0] +"\n" + "-" * len(names[0]) +"\n\n"
+
+ desc = v.get("description")
+ if not desc and wtype != "File":
+ msg += f"DESCRIPTION MISSING for {names[0]}\n\n"
+
+ if desc:
+ if output_in_txt:
+ msg += self.desc_txt(desc)
+ else:
+ msg += self.desc_rst(desc)
+
+ symbols = v.get("symbols")
+ if symbols:
+ msg += "Has the following ABI:\n\n"
+
+ for w, label in symbols:
+ # Escape special chars from content
+ content = self.re_escape.sub(r"\\\1", w)
+
+ msg += f"- :ref:`{content} <{label}>`\n\n"
+
+ users = v.get("users")
+ if users and users.strip(" \t\n"):
+ users = users.strip("\n").replace('\n', '\n\t')
+ msg += f"Users:\n\t{users}\n\n"
+
+ ln = v.get("line_no", 1)
+
+ yield (msg, file_ref[0][0], ln)
+
+ def check_issues(self):
+ """Warn about duplicated ABI entries"""
+
+ for what, v in self.what_symbols.items():
+ files = v.get("file")
+ if not files:
+ # Should never happen if the parser works properly
+ self.log.warning("%s doesn't have a file associated", what)
+ continue
+
+ if len(files) == 1:
+ continue
+
+ f = []
+ for fname, lines in sorted(files.items()):
+ if not lines:
+ f.append(f"{fname}")
+ elif len(lines) == 1:
+ f.append(f"{fname}:{lines[0]}")
+ else:
+ m = fname + "lines "
+ m += ", ".join(str(x) for x in lines)
+ f.append(m)
+
+ self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f))
+
+ def search_symbols(self, expr):
+ """ Searches for ABI symbols """
+
+ regex = re.compile(expr, re.I)
+
+ found_keys = 0
+ for t in sorted(self.data.items(), key=lambda x: [0]):
+ v = t[1]
+
+ wtype = v.get("type", "")
+ if wtype == "File":
+ continue
+
+ for what in v.get("what", [""]):
+ if regex.search(what):
+ found_keys += 1
+
+ kernelversion = v.get("kernelversion", "").strip(" \t\n")
+ date = v.get("date", "").strip(" \t\n")
+ contact = v.get("contact", "").strip(" \t\n")
+ users = v.get("users", "").strip(" \t\n")
+ desc = v.get("description", "").strip(" \t\n")
+
+ files = []
+ for f in v.get("file", ()):
+ files.append(f[0])
+
+ what = str(found_keys) + ". " + what
+ title_tag = "-" * len(what)
+
+ print(f"\n{what}\n{title_tag}\n")
+
+ if kernelversion:
+ print(f"Kernel version:\t\t{kernelversion}")
+
+ if date:
+ print(f"Date:\t\t\t{date}")
+
+ if contact:
+ print(f"Contact:\t\t{contact}")
+
+ if users:
+ print(f"Users:\t\t\t{users}")
+
+ print("Defined on file(s):\t" + ", ".join(files))
+
+ if desc:
+ desc = desc.strip("\n")
+ print(f"\n{desc}\n")
+
+ if not found_keys:
+ print(f"Regular expression /{expr}/ not found.")
diff --git a/tools/lib/python/abi/abi_regex.py b/tools/lib/python/abi/abi_regex.py
new file mode 100644
index 000000000000..d5553206de3c
--- /dev/null
+++ b/tools/lib/python/abi/abi_regex.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python3
+# xxpylint: disable=R0903
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Convert ABI what into regular expressions
+"""
+
+import re
+import sys
+
+from pprint import pformat
+
+from abi.abi_parser import AbiParser
+from abi.helpers import AbiDebug
+
+class AbiRegex(AbiParser):
+ """Extends AbiParser to search ABI nodes with regular expressions"""
+
+ # Escape only ASCII visible characters
+ escape_symbols = r"([\x21-\x29\x2b-\x2d\x3a-\x40\x5c\x60\x7b-\x7e])"
+ leave_others = "others"
+
+ # Tuples with regular expressions to be compiled and replacement data
+ re_whats = [
+ # Drop escape characters that might exist
+ (re.compile("\\\\"), ""),
+
+ # Temporarily escape dot characters
+ (re.compile(r"\."), "\xf6"),
+
+ # Temporarily change [0-9]+ type of patterns
+ (re.compile(r"\[0\-9\]\+"), "\xff"),
+
+ # Temporarily change [\d+-\d+] type of patterns
+ (re.compile(r"\[0\-\d+\]"), "\xff"),
+ (re.compile(r"\[0:\d+\]"), "\xff"),
+ (re.compile(r"\[(\d+)\]"), "\xf4\\\\d+\xf5"),
+
+ # Temporarily change [0-9] type of patterns
+ (re.compile(r"\[(\d)\-(\d)\]"), "\xf4\1-\2\xf5"),
+
+ # Handle multiple option patterns
+ (re.compile(r"[\{\<\[]([\w_]+)(?:[,|]+([\w_]+)){1,}[\}\>\]]"), r"(\1|\2)"),
+
+ # Handle wildcards
+ (re.compile(r"([^\/])\*"), "\\1\\\\w\xf7"),
+ (re.compile(r"/\*/"), "/.*/"),
+ (re.compile(r"/\xf6\xf6\xf6"), "/.*"),
+ (re.compile(r"\<[^\>]+\>"), "\\\\w\xf7"),
+ (re.compile(r"\{[^\}]+\}"), "\\\\w\xf7"),
+ (re.compile(r"\[[^\]]+\]"), "\\\\w\xf7"),
+
+ (re.compile(r"XX+"), "\\\\w\xf7"),
+ (re.compile(r"([^A-Z])[XYZ]([^A-Z])"), "\\1\\\\w\xf7\\2"),
+ (re.compile(r"([^A-Z])[XYZ]$"), "\\1\\\\w\xf7"),
+ (re.compile(r"_[AB]_"), "_\\\\w\xf7_"),
+
+ # Recover [0-9] type of patterns
+ (re.compile(r"\xf4"), "["),
+ (re.compile(r"\xf5"), "]"),
+
+ # Remove duplicated spaces
+ (re.compile(r"\s+"), r" "),
+
+ # Special case: drop comparison as in:
+ # What: foo = <something>
+ # (this happens on a few IIO definitions)
+ (re.compile(r"\s*\=.*$"), ""),
+
+ # Escape all other symbols
+ (re.compile(escape_symbols), r"\\\1"),
+ (re.compile(r"\\\\"), r"\\"),
+ (re.compile(r"\\([\[\]\(\)\|])"), r"\1"),
+ (re.compile(r"(\d+)\\(-\d+)"), r"\1\2"),
+
+ (re.compile(r"\xff"), r"\\d+"),
+
+ # Special case: IIO ABI which a parenthesis.
+ (re.compile(r"sqrt(.*)"), r"sqrt(.*)"),
+
+ # Simplify regexes with multiple .*
+ (re.compile(r"(?:\.\*){2,}"), ""),
+
+ # Recover dot characters
+ (re.compile(r"\xf6"), "\\."),
+ # Recover plus characters
+ (re.compile(r"\xf7"), "+"),
+ ]
+ re_has_num = re.compile(r"\\d")
+
+ # Symbol name after escape_chars that are considered a devnode basename
+ re_symbol_name = re.compile(r"(\w|\\[\.\-\:])+$")
+
+ # List of popular group names to be skipped to minimize regex group size
+ # Use AbiDebug.SUBGROUP_SIZE to detect those
+ skip_names = set(["devices", "hwmon"])
+
+ def regex_append(self, what, new):
+ """
+ Get a search group for a subset of regular expressions.
+
+ As ABI may have thousands of symbols, using a for to search all
+ regular expressions is at least O(n^2). When there are wildcards,
+ the complexity increases substantially, eventually becoming exponential.
+
+ To avoid spending too much time on them, use a logic to split
+ them into groups. The smaller the group, the better, as it would
+ mean that searches will be confined to a small number of regular
+ expressions.
+
+ The conversion to a regex subset is tricky, as we need something
+ that can be easily obtained from the sysfs symbol and from the
+ regular expression. So, we need to discard nodes that have
+ wildcards.
+
+ If it can't obtain a subgroup, place the regular expression inside
+ a special group (self.leave_others).
+ """
+
+ search_group = None
+
+ for search_group in reversed(new.split("/")):
+ if not search_group or search_group in self.skip_names:
+ continue
+ if self.re_symbol_name.match(search_group):
+ break
+
+ if not search_group:
+ search_group = self.leave_others
+
+ if self.debug & AbiDebug.SUBGROUP_MAP:
+ self.log.debug("%s: mapped as %s", what, search_group)
+
+ try:
+ if search_group not in self.regex_group:
+ self.regex_group[search_group] = []
+
+ self.regex_group[search_group].append(re.compile(new))
+ if self.search_string:
+ if what.find(self.search_string) >= 0:
+ print(f"What: {what}")
+ except re.PatternError:
+ self.log.warning("Ignoring '%s' as it produced an invalid regex:\n"
+ " '%s'", what, new)
+
+ def get_regexes(self, what):
+ """
+ Given an ABI devnode, return a list of all regular expressions that
+ may match it, based on the sub-groups created by regex_append()
+ """
+
+ re_list = []
+
+ patches = what.split("/")
+ patches.reverse()
+ patches.append(self.leave_others)
+
+ for search_group in patches:
+ if search_group in self.regex_group:
+ re_list += self.regex_group[search_group]
+
+ return re_list
+
+ def __init__(self, *args, **kwargs):
+ """
+ Override init method to get verbose argument
+ """
+
+ self.regex_group = None
+ self.search_string = None
+ self.re_string = None
+
+ if "search_string" in kwargs:
+ self.search_string = kwargs.get("search_string")
+ del kwargs["search_string"]
+
+ if self.search_string:
+
+ try:
+ self.re_string = re.compile(self.search_string)
+ except re.PatternError as e:
+ msg = f"{self.search_string} is not a valid regular expression"
+ raise ValueError(msg) from e
+
+ super().__init__(*args, **kwargs)
+
+ def parse_abi(self, *args, **kwargs):
+
+ super().parse_abi(*args, **kwargs)
+
+ self.regex_group = {}
+
+ print("Converting ABI What fields into regexes...", file=sys.stderr)
+
+ for t in sorted(self.data.items(), key=lambda x: x[0]):
+ v = t[1]
+ if v.get("type") == "File":
+ continue
+
+ v["regex"] = []
+
+ for what in v.get("what", []):
+ if not what.startswith("/sys"):
+ continue
+
+ new = what
+ for r, s in self.re_whats:
+ try:
+ new = r.sub(s, new)
+ except re.PatternError as e:
+ # Help debugging troubles with new regexes
+ raise re.PatternError(f"{e}\nwhile re.sub('{r.pattern}', {s}, str)") from e
+
+ v["regex"].append(new)
+
+ if self.debug & AbiDebug.REGEX:
+ self.log.debug("%-90s <== %s", new, what)
+
+ # Store regex into a subgroup to speedup searches
+ self.regex_append(what, new)
+
+ if self.debug & AbiDebug.SUBGROUP_DICT:
+ self.log.debug("%s", pformat(self.regex_group))
+
+ if self.debug & AbiDebug.SUBGROUP_SIZE:
+ biggestd_keys = sorted(self.regex_group.keys(),
+ key= lambda k: len(self.regex_group[k]),
+ reverse=True)
+
+ print("Top regex subgroups:", file=sys.stderr)
+ for k in biggestd_keys[:10]:
+ print(f"{k} has {len(self.regex_group[k])} elements", file=sys.stderr)
diff --git a/tools/lib/python/abi/helpers.py b/tools/lib/python/abi/helpers.py
new file mode 100644
index 000000000000..639b23e4ca33
--- /dev/null
+++ b/tools/lib/python/abi/helpers.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# pylint: disable=R0903
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Helper classes for ABI parser
+"""
+
+ABI_DIR = "Documentation/ABI/"
+
+
+class AbiDebug:
+ """Debug levels"""
+
+ WHAT_PARSING = 1
+ WHAT_OPEN = 2
+ DUMP_ABI_STRUCTS = 4
+ UNDEFINED = 8
+ REGEX = 16
+ SUBGROUP_MAP = 32
+ SUBGROUP_DICT = 64
+ SUBGROUP_SIZE = 128
+ GRAPH = 256
+
+
+DEBUG_HELP = """
+1 - enable debug parsing logic
+2 - enable debug messages on file open
+4 - enable debug for ABI parse data
+8 - enable extra debug information to identify troubles
+ with ABI symbols found at the local machine that
+ weren't found on ABI documentation (used only for
+ undefined subcommand)
+16 - enable debug for what to regex conversion
+32 - enable debug for symbol regex subgroups
+64 - enable debug for sysfs graph tree variable
+"""
diff --git a/tools/lib/python/abi/system_symbols.py b/tools/lib/python/abi/system_symbols.py
new file mode 100644
index 000000000000..4a2554da217b
--- /dev/null
+++ b/tools/lib/python/abi/system_symbols.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0912,R0914,R0915,R1702
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Parse ABI documentation and produce results from it.
+"""
+
+import os
+import re
+import sys
+
+from concurrent import futures
+from datetime import datetime
+from random import shuffle
+
+from abi.helpers import AbiDebug
+
+class SystemSymbols:
+ """Stores arguments for the class and initialize class vars"""
+
+ def graph_add_file(self, path, link=None):
+ """
+ add a file path to the sysfs graph stored at self.root
+ """
+
+ if path in self.files:
+ return
+
+ name = ""
+ ref = self.root
+ for edge in path.split("/"):
+ name += edge + "/"
+ if edge not in ref:
+ ref[edge] = {"__name": [name.rstrip("/")]}
+
+ ref = ref[edge]
+
+ if link and link not in ref["__name"]:
+ ref["__name"].append(link.rstrip("/"))
+
+ self.files.add(path)
+
+ def print_graph(self, root_prefix="", root=None, level=0):
+ """Prints a reference tree graph using UTF-8 characters"""
+
+ if not root:
+ root = self.root
+ level = 0
+
+ # Prevent endless traverse
+ if level > 5:
+ return
+
+ if level > 0:
+ prefix = "├──"
+ last_prefix = "└──"
+ else:
+ prefix = ""
+ last_prefix = ""
+
+ items = list(root.items())
+
+ names = root.get("__name", [])
+ for k, edge in items:
+ if k == "__name":
+ continue
+
+ if not k:
+ k = "/"
+
+ if len(names) > 1:
+ k += " links: " + ",".join(names[1:])
+
+ if edge == items[-1][1]:
+ print(root_prefix + last_prefix + k)
+ p = root_prefix
+ if level > 0:
+ p += " "
+ self.print_graph(p, edge, level + 1)
+ else:
+ print(root_prefix + prefix + k)
+ p = root_prefix + "│ "
+ self.print_graph(p, edge, level + 1)
+
+ def _walk(self, root):
+ """
+ Walk through sysfs to get all devnodes that aren't ignored.
+
+ By default, uses /sys as sysfs mounting point. If another
+ directory is used, it replaces them to /sys at the patches.
+ """
+
+ with os.scandir(root) as obj:
+ for entry in obj:
+ path = os.path.join(root, entry.name)
+ if self.sysfs:
+ p = path.replace(self.sysfs, "/sys", count=1)
+ else:
+ p = path
+
+ if self.re_ignore.search(p):
+ return
+
+ # Handle link first to avoid directory recursion
+ if entry.is_symlink():
+ real = os.path.realpath(path)
+ if not self.sysfs:
+ self.aliases[path] = real
+ else:
+ real = real.replace(self.sysfs, "/sys", count=1)
+
+ # Add absfile location to graph if it doesn't exist
+ if not self.re_ignore.search(real):
+ # Add link to the graph
+ self.graph_add_file(real, p)
+
+ elif entry.is_file():
+ self.graph_add_file(p)
+
+ elif entry.is_dir():
+ self._walk(path)
+
+ def __init__(self, abi, sysfs="/sys", hints=False):
+ """
+ Initialize internal variables and get a list of all files inside
+ sysfs that can currently be parsed.
+
+ Please notice that there are several entries on sysfs that aren't
+ documented as ABI. Ignore those.
+
+ The real paths will be stored under self.files. Aliases will be
+ stored in separate, as self.aliases.
+ """
+
+ self.abi = abi
+ self.log = abi.log
+
+ if sysfs != "/sys":
+ self.sysfs = sysfs.rstrip("/")
+ else:
+ self.sysfs = None
+
+ self.hints = hints
+
+ self.root = {}
+ self.aliases = {}
+ self.files = set()
+
+ dont_walk = [
+ # Those require root access and aren't documented at ABI
+ f"^{sysfs}/kernel/debug",
+ f"^{sysfs}/kernel/tracing",
+ f"^{sysfs}/fs/pstore",
+ f"^{sysfs}/fs/bpf",
+ f"^{sysfs}/fs/fuse",
+
+ # This is not documented at ABI
+ f"^{sysfs}/module",
+
+ f"^{sysfs}/fs/cgroup", # this is big and has zero docs under ABI
+ f"^{sysfs}/firmware", # documented elsewhere: ACPI, DT bindings
+ "sections|notes", # aren't actually part of ABI
+
+ # kernel-parameters.txt - not easy to parse
+ "parameters",
+ ]
+
+ self.re_ignore = re.compile("|".join(dont_walk))
+
+ print(f"Reading {sysfs} directory contents...", file=sys.stderr)
+ self._walk(sysfs)
+
+ def check_file(self, refs, found):
+ """Check missing ABI symbols for a given sysfs file"""
+
+ res_list = []
+
+ try:
+ for names in refs:
+ fname = names[0]
+
+ res = {
+ "found": False,
+ "fname": fname,
+ "msg": "",
+ }
+ res_list.append(res)
+
+ re_what = self.abi.get_regexes(fname)
+ if not re_what:
+ self.abi.log.warning(f"missing rules for {fname}")
+ continue
+
+ for name in names:
+ for r in re_what:
+ if self.abi.debug & AbiDebug.UNDEFINED:
+ self.log.debug("check if %s matches '%s'", name, r.pattern)
+ if r.match(name):
+ res["found"] = True
+ if found:
+ res["msg"] += f" {fname}: regex:\n\t"
+ continue
+
+ if self.hints and not res["found"]:
+ res["msg"] += f" {fname} not found. Tested regexes:\n"
+ for r in re_what:
+ res["msg"] += " " + r.pattern + "\n"
+
+ except KeyboardInterrupt:
+ pass
+
+ return res_list
+
+ def _ref_interactor(self, root):
+ """Recursive function to interact over the sysfs tree"""
+
+ for k, v in root.items():
+ if isinstance(v, dict):
+ yield from self._ref_interactor(v)
+
+ if root == self.root or k == "__name":
+ continue
+
+ if self.abi.re_string:
+ fname = v["__name"][0]
+ if self.abi.re_string.search(fname):
+ yield v
+ else:
+ yield v
+
+
+ def get_fileref(self, all_refs, chunk_size):
+ """Interactor to group refs into chunks"""
+
+ n = 0
+ refs = []
+
+ for ref in all_refs:
+ refs.append(ref)
+
+ n += 1
+ if n >= chunk_size:
+ yield refs
+ n = 0
+ refs = []
+
+ yield refs
+
+ def check_undefined_symbols(self, max_workers=None, chunk_size=50,
+ found=None, dry_run=None):
+ """Seach ABI for sysfs symbols missing documentation"""
+
+ self.abi.parse_abi()
+
+ if self.abi.debug & AbiDebug.GRAPH:
+ self.print_graph()
+
+ all_refs = []
+ for ref in self._ref_interactor(self.root):
+ all_refs.append(ref["__name"])
+
+ if dry_run:
+ print("Would check", file=sys.stderr)
+ for ref in all_refs:
+ print(", ".join(ref))
+
+ return
+
+ print("Starting to search symbols (it may take several minutes):",
+ file=sys.stderr)
+ start = datetime.now()
+ old_elapsed = None
+
+ # Python doesn't support multithreading due to limitations on its
+ # global lock (GIL). While Python 3.13 finally made GIL optional,
+ # there are still issues related to it. Also, we want to have
+ # backward compatibility with older versions of Python.
+ #
+ # So, use instead multiprocess. However, Python is very slow passing
+ # data from/to multiple processes. Also, it may consume lots of memory
+ # if the data to be shared is not small. So, we need to group workload
+ # in chunks that are big enough to generate performance gains while
+ # not being so big that would cause out-of-memory.
+
+ num_refs = len(all_refs)
+ print(f"Number of references to parse: {num_refs}", file=sys.stderr)
+
+ if not max_workers:
+ max_workers = os.cpu_count()
+ elif max_workers > os.cpu_count():
+ max_workers = os.cpu_count()
+
+ max_workers = max(max_workers, 1)
+
+ max_chunk_size = int((num_refs + max_workers - 1) / max_workers)
+ chunk_size = min(chunk_size, max_chunk_size)
+ chunk_size = max(1, chunk_size)
+
+ if max_workers > 1:
+ executor = futures.ProcessPoolExecutor
+
+ # Place references in a random order. This may help improving
+ # performance, by mixing complex/simple expressions when creating
+ # chunks
+ shuffle(all_refs)
+ else:
+ # Python has a high overhead with processes. When there's just
+ # one worker, it is faster to not create a new process.
+ # Yet, User still deserves to have a progress print. So, use
+ # python's "thread", which is actually a single process, using
+ # an internal schedule to switch between tasks. No performance
+ # gains for non-IO tasks, but still it can be quickly interrupted
+ # from time to time to display progress.
+ executor = futures.ThreadPoolExecutor
+
+ not_found = []
+ f_list = []
+ with executor(max_workers=max_workers) as exe:
+ for refs in self.get_fileref(all_refs, chunk_size):
+ if refs:
+ try:
+ f_list.append(exe.submit(self.check_file, refs, found))
+
+ except KeyboardInterrupt:
+ return
+
+ total = len(f_list)
+
+ if not total:
+ if self.abi.re_string:
+ print(f"No ABI symbol matches {self.abi.search_string}")
+ else:
+ self.abi.log.warning("No ABI symbols found")
+ return
+
+ print(f"{len(f_list):6d} jobs queued on {max_workers} workers",
+ file=sys.stderr)
+
+ while f_list:
+ try:
+ t = futures.wait(f_list, timeout=1,
+ return_when=futures.FIRST_COMPLETED)
+
+ done = t[0]
+
+ for fut in done:
+ res_list = fut.result()
+
+ for res in res_list:
+ if not res["found"]:
+ not_found.append(res["fname"])
+ if res["msg"]:
+ print(res["msg"])
+
+ f_list.remove(fut)
+ except KeyboardInterrupt:
+ return
+
+ except RuntimeError as e:
+ self.abi.log.warning(f"Future: {e}")
+ break
+
+ if sys.stderr.isatty():
+ elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0]
+ if len(f_list) < total:
+ elapsed += f" ({total - len(f_list)}/{total} jobs completed). "
+ if elapsed != old_elapsed:
+ print(elapsed + "\r", end="", flush=True,
+ file=sys.stderr)
+ old_elapsed = elapsed
+
+ elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0]
+ print(elapsed, file=sys.stderr)
+
+ for f in sorted(not_found):
+ print(f"{f} not found.")
diff --git a/tools/lib/python/feat/parse_features.py b/tools/lib/python/feat/parse_features.py
new file mode 100755
index 000000000000..b88c04d3e2fe
--- /dev/null
+++ b/tools/lib/python/feat/parse_features.py
@@ -0,0 +1,494 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0911,R0912,R0914,R0915
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+
+"""
+Library to parse the Linux Feature files and produce a ReST book.
+"""
+
+import os
+import re
+import sys
+
+from glob import iglob
+
+
+class ParseFeature:
+ """
+ Parses Documentation/features, allowing to generate ReST documentation
+ from it.
+ """
+
+ h_name = "Feature"
+ h_kconfig = "Kconfig"
+ h_description = "Description"
+ h_subsys = "Subsystem"
+ h_status = "Status"
+ h_arch = "Architecture"
+
+ # Sort order for status. Others will be mapped at the end.
+ status_map = {
+ "ok": 0,
+ "TODO": 1,
+ "N/A": 2,
+ # The only missing status is "..", which was mapped as "---",
+ # as this is an special ReST cell value. Let it get the
+ # default order (99).
+ }
+
+ def __init__(self, prefix, debug=0, enable_fname=False):
+ """
+ Sets internal variables
+ """
+
+ self.prefix = prefix
+ self.debug = debug
+ self.enable_fname = enable_fname
+
+ self.data = {}
+
+ # Initial maximum values use just the headers
+ self.max_size_name = len(self.h_name)
+ self.max_size_kconfig = len(self.h_kconfig)
+ self.max_size_description = len(self.h_description)
+ self.max_size_desc_word = 0
+ self.max_size_subsys = len(self.h_subsys)
+ self.max_size_status = len(self.h_status)
+ self.max_size_arch = len(self.h_arch)
+ self.max_size_arch_with_header = self.max_size_arch + self.max_size_arch
+ self.description_size = 1
+
+ self.msg = ""
+
+ def emit(self, msg="", end="\n"):
+ self.msg += msg + end
+
+ def parse_error(self, fname, ln, msg, data=None):
+ """
+ Displays an error message, printing file name and line
+ """
+
+ if ln:
+ fname += f"#{ln}"
+
+ print(f"Warning: file {fname}: {msg}", file=sys.stderr, end="")
+
+ if data:
+ data = data.rstrip()
+ print(f":\n\t{data}", file=sys.stderr)
+ else:
+ print("", file=sys.stderr)
+
+ def parse_feat_file(self, fname):
+ """Parses a single arch-support.txt feature file"""
+
+ if os.path.isdir(fname):
+ return
+
+ base = os.path.basename(fname)
+
+ if base != "arch-support.txt":
+ if self.debug:
+ print(f"ignoring {fname}", file=sys.stderr)
+ return
+
+ subsys = os.path.dirname(fname).split("/")[-2]
+ self.max_size_subsys = max(self.max_size_subsys, len(subsys))
+
+ feature_name = ""
+ kconfig = ""
+ description = ""
+ comments = ""
+ arch_table = {}
+
+ if self.debug > 1:
+ print(f"Opening {fname}", file=sys.stderr)
+
+ if self.enable_fname:
+ full_fname = os.path.abspath(fname)
+ self.emit(f".. FILE {full_fname}")
+
+ with open(fname, encoding="utf-8") as f:
+ for ln, line in enumerate(f, start=1):
+ line = line.strip()
+
+ match = re.match(r"^\#\s+Feature\s+name:\s*(.*\S)", line)
+ if match:
+ feature_name = match.group(1)
+
+ self.max_size_name = max(self.max_size_name,
+ len(feature_name))
+ continue
+
+ match = re.match(r"^\#\s+Kconfig:\s*(.*\S)", line)
+ if match:
+ kconfig = match.group(1)
+
+ self.max_size_kconfig = max(self.max_size_kconfig,
+ len(kconfig))
+ continue
+
+ match = re.match(r"^\#\s+description:\s*(.*\S)", line)
+ if match:
+ description = match.group(1)
+
+ self.max_size_description = max(self.max_size_description,
+ len(description))
+
+ words = re.split(r"\s+", line)[1:]
+ for word in words:
+ self.max_size_desc_word = max(self.max_size_desc_word,
+ len(word))
+
+ continue
+
+ if re.search(r"^\\s*$", line):
+ continue
+
+ if re.match(r"^\s*\-+\s*$", line):
+ continue
+
+ if re.search(r"^\s*\|\s*arch\s*\|\s*status\s*\|\s*$", line):
+ continue
+
+ match = re.match(r"^\#\s*(.*)$", line)
+ if match:
+ comments += match.group(1)
+ continue
+
+ match = re.match(r"^\s*\|\s*(\S+):\s*\|\s*(\S+)\s*\|\s*$", line)
+ if match:
+ arch = match.group(1)
+ status = match.group(2)
+
+ self.max_size_status = max(self.max_size_status,
+ len(status))
+ self.max_size_arch = max(self.max_size_arch, len(arch))
+
+ if status == "..":
+ status = "---"
+
+ arch_table[arch] = status
+
+ continue
+
+ self.parse_error(fname, ln, "Line is invalid", line)
+
+ if not feature_name:
+ self.parse_error(fname, 0, "Feature name not found")
+ return
+ if not subsys:
+ self.parse_error(fname, 0, "Subsystem not found")
+ return
+ if not kconfig:
+ self.parse_error(fname, 0, "Kconfig not found")
+ return
+ if not description:
+ self.parse_error(fname, 0, "Description not found")
+ return
+ if not arch_table:
+ self.parse_error(fname, 0, "Architecture table not found")
+ return
+
+ self.data[feature_name] = {
+ "where": fname,
+ "subsys": subsys,
+ "kconfig": kconfig,
+ "description": description,
+ "comments": comments,
+ "table": arch_table,
+ }
+
+ self.max_size_arch_with_header = self.max_size_arch + len(self.h_arch)
+
+ def parse(self):
+ """Parses all arch-support.txt feature files inside self.prefix"""
+
+ path = os.path.expanduser(self.prefix)
+
+ if self.debug > 2:
+ print(f"Running parser for {path}")
+
+ example_path = os.path.join(path, "arch-support.txt")
+
+ for fname in iglob(os.path.join(path, "**"), recursive=True):
+ if fname != example_path:
+ self.parse_feat_file(fname)
+
+ return self.data
+
+ def output_arch_table(self, arch, feat=None):
+ """
+ Output feature(s) for a given architecture.
+ """
+
+ title = f"Feature status on {arch} architecture"
+
+ self.emit("=" * len(title))
+ self.emit(title)
+ self.emit("=" * len(title))
+ self.emit()
+
+ self.emit("=" * self.max_size_subsys + " ", end="")
+ self.emit("=" * self.max_size_name + " ", end="")
+ self.emit("=" * self.max_size_kconfig + " ", end="")
+ self.emit("=" * self.max_size_status + " ", end="")
+ self.emit("=" * self.max_size_description)
+
+ self.emit(f"{self.h_subsys:<{self.max_size_subsys}} ", end="")
+ self.emit(f"{self.h_name:<{self.max_size_name}} ", end="")
+ self.emit(f"{self.h_kconfig:<{self.max_size_kconfig}} ", end="")
+ self.emit(f"{self.h_status:<{self.max_size_status}} ", end="")
+ self.emit(f"{self.h_description:<{self.max_size_description}}")
+
+ self.emit("=" * self.max_size_subsys + " ", end="")
+ self.emit("=" * self.max_size_name + " ", end="")
+ self.emit("=" * self.max_size_kconfig + " ", end="")
+ self.emit("=" * self.max_size_status + " ", end="")
+ self.emit("=" * self.max_size_description)
+
+ sorted_features = sorted(self.data.keys(),
+ key=lambda x: (self.data[x]["subsys"],
+ x.lower()))
+
+ for name in sorted_features:
+ if feat and name != feat:
+ continue
+
+ arch_table = self.data[name]["table"]
+
+ if not arch in arch_table:
+ continue
+
+ self.emit(f"{self.data[name]['subsys']:<{self.max_size_subsys}} ",
+ end="")
+ self.emit(f"{name:<{self.max_size_name}} ", end="")
+ self.emit(f"{self.data[name]['kconfig']:<{self.max_size_kconfig}} ",
+ end="")
+ self.emit(f"{arch_table[arch]:<{self.max_size_status}} ",
+ end="")
+ self.emit(f"{self.data[name]['description']}")
+
+ self.emit("=" * self.max_size_subsys + " ", end="")
+ self.emit("=" * self.max_size_name + " ", end="")
+ self.emit("=" * self.max_size_kconfig + " ", end="")
+ self.emit("=" * self.max_size_status + " ", end="")
+ self.emit("=" * self.max_size_description)
+
+ return self.msg
+
+ def output_feature(self, feat):
+ """
+ Output a feature on all architectures
+ """
+
+ title = f"Feature {feat}"
+
+ self.emit("=" * len(title))
+ self.emit(title)
+ self.emit("=" * len(title))
+ self.emit()
+
+ if not feat in self.data:
+ return
+
+ if self.data[feat]["subsys"]:
+ self.emit(f":Subsystem: {self.data[feat]['subsys']}")
+ if self.data[feat]["kconfig"]:
+ self.emit(f":Kconfig: {self.data[feat]['kconfig']}")
+
+ desc = self.data[feat]["description"]
+ desc = desc[0].upper() + desc[1:]
+ desc = desc.rstrip(". \t")
+ self.emit(f"\n{desc}.\n")
+
+ com = self.data[feat]["comments"].strip()
+ if com:
+ self.emit("Comments")
+ self.emit("--------")
+ self.emit(f"\n{com}\n")
+
+ self.emit("=" * self.max_size_arch + " ", end="")
+ self.emit("=" * self.max_size_status)
+
+ self.emit(f"{self.h_arch:<{self.max_size_arch}} ", end="")
+ self.emit(f"{self.h_status:<{self.max_size_status}}")
+
+ self.emit("=" * self.max_size_arch + " ", end="")
+ self.emit("=" * self.max_size_status)
+
+ arch_table = self.data[feat]["table"]
+ for arch in sorted(arch_table.keys()):
+ self.emit(f"{arch:<{self.max_size_arch}} ", end="")
+ self.emit(f"{arch_table[arch]:<{self.max_size_status}}")
+
+ self.emit("=" * self.max_size_arch + " ", end="")
+ self.emit("=" * self.max_size_status)
+
+ return self.msg
+
+ def matrix_lines(self, desc_size, max_size_status, header):
+ """
+ Helper function to split element tables at the output matrix
+ """
+
+ if header:
+ ln_marker = "="
+ else:
+ ln_marker = "-"
+
+ self.emit("+" + ln_marker * self.max_size_name + "+", end="")
+ self.emit(ln_marker * desc_size, end="")
+ self.emit("+" + ln_marker * max_size_status + "+")
+
+ def output_matrix(self):
+ """
+ Generates a set of tables, groped by subsystem, containing
+ what's the feature state on each architecture.
+ """
+
+ title = "Feature status on all architectures"
+
+ self.emit("=" * len(title))
+ self.emit(title)
+ self.emit("=" * len(title))
+ self.emit()
+
+ desc_title = f"{self.h_kconfig} / {self.h_description}"
+
+ desc_size = self.max_size_kconfig + 4
+ if not self.description_size:
+ desc_size = max(self.max_size_description, desc_size)
+ else:
+ desc_size = max(self.description_size, desc_size)
+
+ desc_size = max(self.max_size_desc_word, desc_size, len(desc_title))
+
+ notcompat = "Not compatible"
+ self.max_size_status = max(self.max_size_status, len(notcompat))
+
+ min_status_size = self.max_size_status + self.max_size_arch + 4
+ max_size_status = max(min_status_size, self.max_size_status)
+
+ h_status_per_arch = "Status per architecture"
+ max_size_status = max(max_size_status, len(h_status_per_arch))
+
+ cur_subsys = None
+ for name in sorted(self.data.keys(),
+ key=lambda x: (self.data[x]["subsys"], x.lower())):
+ if not cur_subsys or cur_subsys != self.data[name]["subsys"]:
+ if cur_subsys:
+ self.emit()
+
+ cur_subsys = self.data[name]["subsys"]
+
+ title = f"Subsystem: {cur_subsys}"
+ self.emit(title)
+ self.emit("=" * len(title))
+ self.emit()
+
+ self.matrix_lines(desc_size, max_size_status, 0)
+
+ self.emit(f"|{self.h_name:<{self.max_size_name}}", end="")
+ self.emit(f"|{desc_title:<{desc_size}}", end="")
+ self.emit(f"|{h_status_per_arch:<{max_size_status}}|")
+
+ self.matrix_lines(desc_size, max_size_status, 1)
+
+ lines = []
+ descs = []
+ cur_status = ""
+ line = ""
+
+ arch_table = sorted(self.data[name]["table"].items(),
+ key=lambda x: (self.status_map.get(x[1], 99),
+ x[0].lower()))
+
+ for arch, status in arch_table:
+ if status == "---":
+ status = notcompat
+
+ if status != cur_status:
+ if line != "":
+ lines.append(line)
+ line = ""
+ line = f"- **{status}**: {arch}"
+ elif len(line) + len(arch) + 2 < max_size_status:
+ line += f", {arch}"
+ else:
+ lines.append(line)
+ line = f" {arch}"
+ cur_status = status
+
+ if line != "":
+ lines.append(line)
+
+ description = self.data[name]["description"]
+ while len(description) > desc_size:
+ desc_line = description[:desc_size]
+
+ last_space = desc_line.rfind(" ")
+ if last_space != -1:
+ desc_line = desc_line[:last_space]
+ descs.append(desc_line)
+ description = description[last_space + 1:]
+ else:
+ desc_line = desc_line[:-1]
+ descs.append(desc_line + "\\")
+ description = description[len(desc_line):]
+
+ if description:
+ descs.append(description)
+
+ while len(lines) < 2 + len(descs):
+ lines.append("")
+
+ for ln, line in enumerate(lines):
+ col = ["", ""]
+
+ if not ln:
+ col[0] = name
+ col[1] = f"``{self.data[name]['kconfig']}``"
+ else:
+ if ln >= 2 and descs:
+ col[1] = descs.pop(0)
+
+ self.emit(f"|{col[0]:<{self.max_size_name}}", end="")
+ self.emit(f"|{col[1]:<{desc_size}}", end="")
+ self.emit(f"|{line:<{max_size_status}}|")
+
+ self.matrix_lines(desc_size, max_size_status, 0)
+
+ return self.msg
+
+ def list_arch_features(self, arch, feat):
+ """
+ Print a matrix of kernel feature support for the chosen architecture.
+ """
+ self.emit("#")
+ self.emit(f"# Kernel feature support matrix of the '{arch}' architecture:")
+ self.emit("#")
+
+ # Sort by subsystem, then by feature name (case‑insensitive)
+ for name in sorted(self.data.keys(),
+ key=lambda n: (self.data[n]["subsys"].lower(),
+ n.lower())):
+ if feat and name != feat:
+ continue
+
+ feature = self.data[name]
+ arch_table = feature["table"]
+ status = arch_table.get(arch, "")
+ status = " " * ((4 - len(status)) // 2) + status
+
+ self.emit(f"{feature['subsys']:>{self.max_size_subsys + 1}}/ ",
+ end="")
+ self.emit(f"{name:<{self.max_size_name}}: ", end="")
+ self.emit(f"{status:<5}| ", end="")
+ self.emit(f"{feature['kconfig']:>{self.max_size_kconfig}} ",
+ end="")
+ self.emit(f"# {feature['description']}")
+
+ return self.msg
diff --git a/tools/lib/python/jobserver.py b/tools/lib/python/jobserver.py
new file mode 100755
index 000000000000..a24f30ef4fa8
--- /dev/null
+++ b/tools/lib/python/jobserver.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0+
+#
+# pylint: disable=C0103,C0209
+#
+#
+
+"""
+Interacts with the POSIX jobserver during the Kernel build time.
+
+A "normal" jobserver task, like the one initiated by a make subrocess would do:
+
+ - open read/write file descriptors to communicate with the job server;
+ - ask for one slot by calling:
+ claim = os.read(reader, 1)
+ - when the job finshes, call:
+ os.write(writer, b"+") # os.write(writer, claim)
+
+Here, the goal is different: This script aims to get the remaining number
+of slots available, using all of them to run a command which handle tasks in
+parallel. To to that, it has a loop that ends only after there are no
+slots left. It then increments the number by one, in order to allow a
+call equivalent to make -j$((claim+1)), e.g. having a parent make creating
+$claim child to do the actual work.
+
+The end goal here is to keep the total number of build tasks under the
+limit established by the initial make -j$n_proc call.
+
+See:
+ https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html#POSIX-Jobserver
+"""
+
+import errno
+import os
+import subprocess
+import sys
+
+class JobserverExec:
+ """
+ Claim all slots from make using POSIX Jobserver.
+
+ The main methods here are:
+ - open(): reserves all slots;
+ - close(): method returns all used slots back to make;
+ - run(): executes a command setting PARALLELISM=<available slots jobs + 1>
+ """
+
+ def __init__(self):
+ """Initialize internal vars"""
+ self.claim = 0
+ self.jobs = b""
+ self.reader = None
+ self.writer = None
+ self.is_open = False
+
+ def open(self):
+ """Reserve all available slots to be claimed later on"""
+
+ if self.is_open:
+ return
+
+ try:
+ # Fetch the make environment options.
+ flags = os.environ["MAKEFLAGS"]
+ # Look for "--jobserver=R,W"
+ # Note that GNU Make has used --jobserver-fds and --jobserver-auth
+ # so this handles all of them.
+ opts = [x for x in flags.split(" ") if x.startswith("--jobserver")]
+
+ # Parse out R,W file descriptor numbers and set them nonblocking.
+ # If the MAKEFLAGS variable contains multiple instances of the
+ # --jobserver-auth= option, the last one is relevant.
+ fds = opts[-1].split("=", 1)[1]
+
+ # Starting with GNU Make 4.4, named pipes are used for reader
+ # and writer.
+ # Example argument: --jobserver-auth=fifo:/tmp/GMfifo8134
+ _, _, path = fds.partition("fifo:")
+
+ if path:
+ self.reader = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
+ self.writer = os.open(path, os.O_WRONLY)
+ else:
+ self.reader, self.writer = [int(x) for x in fds.split(",", 1)]
+ # Open a private copy of reader to avoid setting nonblocking
+ # on an unexpecting process with the same reader fd.
+ self.reader = os.open("/proc/self/fd/%d" % (self.reader),
+ os.O_RDONLY | os.O_NONBLOCK)
+
+ # Read out as many jobserver slots as possible
+ while True:
+ try:
+ slot = os.read(self.reader, 8)
+ self.jobs += slot
+ except (OSError, IOError) as e:
+ if e.errno == errno.EWOULDBLOCK:
+ # Stop at the end of the jobserver queue.
+ break
+ # If something went wrong, give back the jobs.
+ if self.jobs:
+ os.write(self.writer, self.jobs)
+ raise e
+
+ # Add a bump for our caller's reserveration, since we're just going
+ # to sit here blocked on our child.
+ self.claim = len(self.jobs) + 1
+
+ except (KeyError, IndexError, ValueError, OSError, IOError):
+ # Any missing environment strings or bad fds should result in just
+ # not being parallel.
+ self.claim = None
+
+ self.is_open = True
+
+ def close(self):
+ """Return all reserved slots to Jobserver"""
+
+ if not self.is_open:
+ return
+
+ # Return all the reserved slots.
+ if len(self.jobs):
+ os.write(self.writer, self.jobs)
+
+ self.is_open = False
+
+ def __enter__(self):
+ self.open()
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ self.close()
+
+ def run(self, cmd, *args, **pwargs):
+ """
+ Run a command setting PARALLELISM env variable to the number of
+ available job slots (claim) + 1, e.g. it will reserve claim slots
+ to do the actual build work, plus one to monitor its children.
+ """
+ self.open() # Ensure that self.claim is set
+
+ # We can only claim parallelism if there was a jobserver (i.e. a
+ # top-level "-jN" argument) and there were no other failures. Otherwise
+ # leave out the environment variable and let the child figure out what
+ # is best.
+ if self.claim:
+ os.environ["PARALLELISM"] = str(self.claim)
+
+ return subprocess.call(cmd, *args, **pwargs)
diff --git a/tools/lib/python/kdoc/__init__.py b/tools/lib/python/kdoc/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/lib/python/kdoc/__init__.py
diff --git a/tools/docs/lib/enrich_formatter.py b/tools/lib/python/kdoc/enrich_formatter.py
index bb171567a4ca..bb171567a4ca 100644
--- a/tools/docs/lib/enrich_formatter.py
+++ b/tools/lib/python/kdoc/enrich_formatter.py
diff --git a/tools/lib/python/kdoc/kdoc_files.py b/tools/lib/python/kdoc/kdoc_files.py
new file mode 100644
index 000000000000..bfe02baf1606
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_files.py
@@ -0,0 +1,294 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+#
+# pylint: disable=R0903,R0913,R0914,R0917
+
+"""
+Parse lernel-doc tags on multiple kernel source files.
+"""
+
+import argparse
+import logging
+import os
+import re
+
+from kdoc.kdoc_parser import KernelDoc
+from kdoc.kdoc_output import OutputFormat
+
+
+class GlobSourceFiles:
+ """
+ Parse C source code file names and directories via an Interactor.
+ """
+
+ def __init__(self, srctree=None, valid_extensions=None):
+ """
+ Initialize valid extensions with a tuple.
+
+ If not defined, assume default C extensions (.c and .h)
+
+ It would be possible to use python's glob function, but it is
+ very slow, and it is not interactive. So, it would wait to read all
+ directories before actually do something.
+
+ So, let's use our own implementation.
+ """
+
+ if not valid_extensions:
+ self.extensions = (".c", ".h")
+ else:
+ self.extensions = valid_extensions
+
+ self.srctree = srctree
+
+ def _parse_dir(self, dirname):
+ """Internal function to parse files recursively"""
+
+ with os.scandir(dirname) as obj:
+ for entry in obj:
+ name = os.path.join(dirname, entry.name)
+
+ if entry.is_dir(follow_symlinks=False):
+ yield from self._parse_dir(name)
+
+ if not entry.is_file():
+ continue
+
+ basename = os.path.basename(name)
+
+ if not basename.endswith(self.extensions):
+ continue
+
+ yield name
+
+ def parse_files(self, file_list, file_not_found_cb):
+ """
+ Define an iterator to parse all source files from file_list,
+ handling directories if any
+ """
+
+ if not file_list:
+ return
+
+ for fname in file_list:
+ if self.srctree:
+ f = os.path.join(self.srctree, fname)
+ else:
+ f = fname
+
+ if os.path.isdir(f):
+ yield from self._parse_dir(f)
+ elif os.path.isfile(f):
+ yield f
+ elif file_not_found_cb:
+ file_not_found_cb(fname)
+
+
+class KernelFiles():
+ """
+ Parse kernel-doc tags on multiple kernel source files.
+
+ There are two type of parsers defined here:
+ - self.parse_file(): parses both kernel-doc markups and
+ EXPORT_SYMBOL* macros;
+ - self.process_export_file(): parses only EXPORT_SYMBOL* macros.
+ """
+
+ def warning(self, msg):
+ """Ancillary routine to output a warning and increment error count"""
+
+ self.config.log.warning(msg)
+ self.errors += 1
+
+ def error(self, msg):
+ """Ancillary routine to output an error and increment error count"""
+
+ self.config.log.error(msg)
+ self.errors += 1
+
+ def parse_file(self, fname):
+ """
+ Parse a single Kernel source.
+ """
+
+ # Prevent parsing the same file twice if results are cached
+ if fname in self.files:
+ return
+
+ doc = KernelDoc(self.config, fname)
+ export_table, entries = doc.parse_kdoc()
+
+ self.export_table[fname] = export_table
+
+ self.files.add(fname)
+ self.export_files.add(fname) # parse_kdoc() already check exports
+
+ self.results[fname] = entries
+
+ def process_export_file(self, fname):
+ """
+ Parses EXPORT_SYMBOL* macros from a single Kernel source file.
+ """
+
+ # Prevent parsing the same file twice if results are cached
+ if fname in self.export_files:
+ return
+
+ doc = KernelDoc(self.config, fname)
+ export_table = doc.parse_export()
+
+ if not export_table:
+ self.error(f"Error: Cannot check EXPORT_SYMBOL* on {fname}")
+ export_table = set()
+
+ self.export_table[fname] = export_table
+ self.export_files.add(fname)
+
+ def file_not_found_cb(self, fname):
+ """
+ Callback to warn if a file was not found.
+ """
+
+ self.error(f"Cannot find file {fname}")
+
+ def __init__(self, verbose=False, out_style=None,
+ werror=False, wreturn=False, wshort_desc=False,
+ wcontents_before_sections=False,
+ logger=None):
+ """
+ Initialize startup variables and parse all files
+ """
+
+ if not verbose:
+ verbose = bool(os.environ.get("KBUILD_VERBOSE", 0))
+
+ if out_style is None:
+ out_style = OutputFormat()
+
+ if not werror:
+ kcflags = os.environ.get("KCFLAGS", None)
+ if kcflags:
+ match = re.search(r"(\s|^)-Werror(\s|$)/", kcflags)
+ if match:
+ werror = True
+
+ # reading this variable is for backwards compat just in case
+ # someone was calling it with the variable from outside the
+ # kernel's build system
+ kdoc_werror = os.environ.get("KDOC_WERROR", None)
+ if kdoc_werror:
+ werror = kdoc_werror
+
+ # Some variables are global to the parser logic as a whole as they are
+ # used to send control configuration to KernelDoc class. As such,
+ # those variables are read-only inside the KernelDoc.
+ self.config = argparse.Namespace
+
+ self.config.verbose = verbose
+ self.config.werror = werror
+ self.config.wreturn = wreturn
+ self.config.wshort_desc = wshort_desc
+ self.config.wcontents_before_sections = wcontents_before_sections
+
+ if not logger:
+ self.config.log = logging.getLogger("kernel-doc")
+ else:
+ self.config.log = logger
+
+ self.config.warning = self.warning
+
+ self.config.src_tree = os.environ.get("SRCTREE", None)
+
+ # Initialize variables that are internal to KernelFiles
+
+ self.out_style = out_style
+
+ self.errors = 0
+ self.results = {}
+
+ self.files = set()
+ self.export_files = set()
+ self.export_table = {}
+
+ def parse(self, file_list, export_file=None):
+ """
+ Parse all files
+ """
+
+ glob = GlobSourceFiles(srctree=self.config.src_tree)
+
+ for fname in glob.parse_files(file_list, self.file_not_found_cb):
+ self.parse_file(fname)
+
+ for fname in glob.parse_files(export_file, self.file_not_found_cb):
+ self.process_export_file(fname)
+
+ def out_msg(self, fname, name, arg):
+ """
+ Return output messages from a file name using the output style
+ filtering.
+
+ If output type was not handled by the styler, return None.
+ """
+
+ # NOTE: we can add rules here to filter out unwanted parts,
+ # although OutputFormat.msg already does that.
+
+ return self.out_style.msg(fname, name, arg)
+
+ def msg(self, enable_lineno=False, export=False, internal=False,
+ symbol=None, nosymbol=None, no_doc_sections=False,
+ filenames=None, export_file=None):
+ """
+ Interacts over the kernel-doc results and output messages,
+ returning kernel-doc markups on each interaction
+ """
+
+ self.out_style.set_config(self.config)
+
+ if not filenames:
+ filenames = sorted(self.results.keys())
+
+ glob = GlobSourceFiles(srctree=self.config.src_tree)
+
+ for fname in filenames:
+ function_table = set()
+
+ if internal or export:
+ if not export_file:
+ export_file = [fname]
+
+ for f in glob.parse_files(export_file, self.file_not_found_cb):
+ function_table |= self.export_table[f]
+
+ if symbol:
+ for s in symbol:
+ function_table.add(s)
+
+ self.out_style.set_filter(export, internal, symbol, nosymbol,
+ function_table, enable_lineno,
+ no_doc_sections)
+
+ msg = ""
+ if fname not in self.results:
+ self.config.log.warning("No kernel-doc for file %s", fname)
+ continue
+
+ symbols = self.results[fname]
+ self.out_style.set_symbols(symbols)
+
+ for arg in symbols:
+ m = self.out_msg(fname, arg.name, arg)
+
+ if m is None:
+ ln = arg.get("ln", 0)
+ dtype = arg.get('type', "")
+
+ self.config.log.warning("%s:%d Can't handle %s",
+ fname, ln, dtype)
+ else:
+ msg += m
+
+ if msg:
+ yield fname, msg
diff --git a/tools/lib/python/kdoc/kdoc_item.py b/tools/lib/python/kdoc/kdoc_item.py
new file mode 100644
index 000000000000..19805301cb2c
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_item.py
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# A class that will, eventually, encapsulate all of the parsed data that we
+# then pass into the output modules.
+#
+
+class KdocItem:
+ def __init__(self, name, fname, type, start_line, **other_stuff):
+ self.name = name
+ self.fname = fname
+ self.type = type
+ self.declaration_start_line = start_line
+ self.sections = {}
+ self.sections_start_lines = {}
+ self.parameterlist = []
+ self.parameterdesc_start_lines = []
+ self.parameterdescs = {}
+ self.parametertypes = {}
+ #
+ # Just save everything else into our own dict so that the output
+ # side can grab it directly as before. As we move things into more
+ # structured data, this will, hopefully, fade away.
+ #
+ self.other_stuff = other_stuff
+
+ def get(self, key, default = None):
+ return self.other_stuff.get(key, default)
+
+ def __getitem__(self, key):
+ return self.get(key)
+
+ #
+ # Tracking of section and parameter information.
+ #
+ def set_sections(self, sections, start_lines):
+ self.sections = sections
+ self.section_start_lines = start_lines
+
+ def set_params(self, names, descs, types, starts):
+ self.parameterlist = names
+ self.parameterdescs = descs
+ self.parametertypes = types
+ self.parameterdesc_start_lines = starts
diff --git a/tools/lib/python/kdoc/kdoc_output.py b/tools/lib/python/kdoc/kdoc_output.py
new file mode 100644
index 000000000000..b1aaa7fc3604
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_output.py
@@ -0,0 +1,824 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+#
+# pylint: disable=C0301,R0902,R0911,R0912,R0913,R0914,R0915,R0917
+
+"""
+Implement output filters to print kernel-doc documentation.
+
+The implementation uses a virtual base class (OutputFormat) which
+contains dispatches to virtual methods, and some code to filter
+out output messages.
+
+The actual implementation is done on one separate class per each type
+of output. Currently, there are output classes for ReST and man/troff.
+"""
+
+import os
+import re
+from datetime import datetime
+
+from kdoc.kdoc_parser import KernelDoc, type_param
+from kdoc.kdoc_re import KernRe
+
+
+function_pointer = KernRe(r"([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)", cache=False)
+
+# match expressions used to find embedded type information
+type_constant = KernRe(r"\b``([^\`]+)``\b", cache=False)
+type_constant2 = KernRe(r"\%([-_*\w]+)", cache=False)
+type_func = KernRe(r"(\w+)\(\)", cache=False)
+type_param_ref = KernRe(r"([\!~\*]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False)
+
+# Special RST handling for func ptr params
+type_fp_param = KernRe(r"\@(\w+)\(\)", cache=False)
+
+# Special RST handling for structs with func ptr params
+type_fp_param2 = KernRe(r"\@(\w+->\S+)\(\)", cache=False)
+
+type_env = KernRe(r"(\$\w+)", cache=False)
+type_enum = KernRe(r"\&(enum\s*([_\w]+))", cache=False)
+type_struct = KernRe(r"\&(struct\s*([_\w]+))", cache=False)
+type_typedef = KernRe(r"\&(typedef\s*([_\w]+))", cache=False)
+type_union = KernRe(r"\&(union\s*([_\w]+))", cache=False)
+type_member = KernRe(r"\&([_\w]+)(\.|->)([_\w]+)", cache=False)
+type_fallback = KernRe(r"\&([_\w]+)", cache=False)
+type_member_func = type_member + KernRe(r"\(\)", cache=False)
+
+
+class OutputFormat:
+ """
+ Base class for OutputFormat. If used as-is, it means that only
+ warnings will be displayed.
+ """
+
+ # output mode.
+ OUTPUT_ALL = 0 # output all symbols and doc sections
+ OUTPUT_INCLUDE = 1 # output only specified symbols
+ OUTPUT_EXPORTED = 2 # output exported symbols
+ OUTPUT_INTERNAL = 3 # output non-exported symbols
+
+ # Virtual member to be overridden at the inherited classes
+ highlights = []
+
+ def __init__(self):
+ """Declare internal vars and set mode to OUTPUT_ALL"""
+
+ self.out_mode = self.OUTPUT_ALL
+ self.enable_lineno = None
+ self.nosymbol = {}
+ self.symbol = None
+ self.function_table = None
+ self.config = None
+ self.no_doc_sections = False
+
+ self.data = ""
+
+ def set_config(self, config):
+ """
+ Setup global config variables used by both parser and output.
+ """
+
+ self.config = config
+
+ def set_filter(self, export, internal, symbol, nosymbol, function_table,
+ enable_lineno, no_doc_sections):
+ """
+ Initialize filter variables according to the requested mode.
+
+ Only one choice is valid between export, internal and symbol.
+
+ The nosymbol filter can be used on all modes.
+ """
+
+ self.enable_lineno = enable_lineno
+ self.no_doc_sections = no_doc_sections
+ self.function_table = function_table
+
+ if symbol:
+ self.out_mode = self.OUTPUT_INCLUDE
+ elif export:
+ self.out_mode = self.OUTPUT_EXPORTED
+ elif internal:
+ self.out_mode = self.OUTPUT_INTERNAL
+ else:
+ self.out_mode = self.OUTPUT_ALL
+
+ if nosymbol:
+ self.nosymbol = set(nosymbol)
+
+
+ def highlight_block(self, block):
+ """
+ Apply the RST highlights to a sub-block of text.
+ """
+
+ for r, sub in self.highlights:
+ block = r.sub(sub, block)
+
+ return block
+
+ def out_warnings(self, args):
+ """
+ Output warnings for identifiers that will be displayed.
+ """
+
+ for log_msg in args.warnings:
+ self.config.warning(log_msg)
+
+ def check_doc(self, name, args):
+ """Check if DOC should be output"""
+
+ if self.no_doc_sections:
+ return False
+
+ if name in self.nosymbol:
+ return False
+
+ if self.out_mode == self.OUTPUT_ALL:
+ self.out_warnings(args)
+ return True
+
+ if self.out_mode == self.OUTPUT_INCLUDE:
+ if name in self.function_table:
+ self.out_warnings(args)
+ return True
+
+ return False
+
+ def check_declaration(self, dtype, name, args):
+ """
+ Checks if a declaration should be output or not based on the
+ filtering criteria.
+ """
+
+ if name in self.nosymbol:
+ return False
+
+ if self.out_mode == self.OUTPUT_ALL:
+ self.out_warnings(args)
+ return True
+
+ if self.out_mode in [self.OUTPUT_INCLUDE, self.OUTPUT_EXPORTED]:
+ if name in self.function_table:
+ return True
+
+ if self.out_mode == self.OUTPUT_INTERNAL:
+ if dtype != "function":
+ self.out_warnings(args)
+ return True
+
+ if name not in self.function_table:
+ self.out_warnings(args)
+ return True
+
+ return False
+
+ def msg(self, fname, name, args):
+ """
+ Handles a single entry from kernel-doc parser
+ """
+
+ self.data = ""
+
+ dtype = args.type
+
+ if dtype == "doc":
+ self.out_doc(fname, name, args)
+ return self.data
+
+ if not self.check_declaration(dtype, name, args):
+ return self.data
+
+ if dtype == "function":
+ self.out_function(fname, name, args)
+ return self.data
+
+ if dtype == "enum":
+ self.out_enum(fname, name, args)
+ return self.data
+
+ if dtype == "typedef":
+ self.out_typedef(fname, name, args)
+ return self.data
+
+ if dtype in ["struct", "union"]:
+ self.out_struct(fname, name, args)
+ return self.data
+
+ # Warn if some type requires an output logic
+ self.config.log.warning("doesn't know how to output '%s' block",
+ dtype)
+
+ return None
+
+ # Virtual methods to be overridden by inherited classes
+ # At the base class, those do nothing.
+ def set_symbols(self, symbols):
+ """Get a list of all symbols from kernel_doc"""
+
+ def out_doc(self, fname, name, args):
+ """Outputs a DOC block"""
+
+ def out_function(self, fname, name, args):
+ """Outputs a function"""
+
+ def out_enum(self, fname, name, args):
+ """Outputs an enum"""
+
+ def out_typedef(self, fname, name, args):
+ """Outputs a typedef"""
+
+ def out_struct(self, fname, name, args):
+ """Outputs a struct"""
+
+
+class RestFormat(OutputFormat):
+ """Consts and functions used by ReST output"""
+
+ highlights = [
+ (type_constant, r"``\1``"),
+ (type_constant2, r"``\1``"),
+
+ # Note: need to escape () to avoid func matching later
+ (type_member_func, r":c:type:`\1\2\3\\(\\) <\1>`"),
+ (type_member, r":c:type:`\1\2\3 <\1>`"),
+ (type_fp_param, r"**\1\\(\\)**"),
+ (type_fp_param2, r"**\1\\(\\)**"),
+ (type_func, r"\1()"),
+ (type_enum, r":c:type:`\1 <\2>`"),
+ (type_struct, r":c:type:`\1 <\2>`"),
+ (type_typedef, r":c:type:`\1 <\2>`"),
+ (type_union, r":c:type:`\1 <\2>`"),
+
+ # in rst this can refer to any type
+ (type_fallback, r":c:type:`\1`"),
+ (type_param_ref, r"**\1\2**")
+ ]
+ blankline = "\n"
+
+ sphinx_literal = KernRe(r'^[^.].*::$', cache=False)
+ sphinx_cblock = KernRe(r'^\.\.\ +code-block::', cache=False)
+
+ def __init__(self):
+ """
+ Creates class variables.
+
+ Not really mandatory, but it is a good coding style and makes
+ pylint happy.
+ """
+
+ super().__init__()
+ self.lineprefix = ""
+
+ def print_lineno(self, ln):
+ """Outputs a line number"""
+
+ if self.enable_lineno and ln is not None:
+ ln += 1
+ self.data += f".. LINENO {ln}\n"
+
+ def output_highlight(self, args):
+ """
+ Outputs a C symbol that may require being converted to ReST using
+ the self.highlights variable
+ """
+
+ input_text = args
+ output = ""
+ in_literal = False
+ litprefix = ""
+ block = ""
+
+ for line in input_text.strip("\n").split("\n"):
+
+ # If we're in a literal block, see if we should drop out of it.
+ # Otherwise, pass the line straight through unmunged.
+ if in_literal:
+ if line.strip(): # If the line is not blank
+ # If this is the first non-blank line in a literal block,
+ # figure out the proper indent.
+ if not litprefix:
+ r = KernRe(r'^(\s*)')
+ if r.match(line):
+ litprefix = '^' + r.group(1)
+ else:
+ litprefix = ""
+
+ output += line + "\n"
+ elif not KernRe(litprefix).match(line):
+ in_literal = False
+ else:
+ output += line + "\n"
+ else:
+ output += line + "\n"
+
+ # Not in a literal block (or just dropped out)
+ if not in_literal:
+ block += line + "\n"
+ if self.sphinx_literal.match(line) or self.sphinx_cblock.match(line):
+ in_literal = True
+ litprefix = ""
+ output += self.highlight_block(block)
+ block = ""
+
+ # Handle any remaining block
+ if block:
+ output += self.highlight_block(block)
+
+ # Print the output with the line prefix
+ for line in output.strip("\n").split("\n"):
+ self.data += self.lineprefix + line + "\n"
+
+ def out_section(self, args, out_docblock=False):
+ """
+ Outputs a block section.
+
+ This could use some work; it's used to output the DOC: sections, and
+ starts by putting out the name of the doc section itself, but that
+ tends to duplicate a header already in the template file.
+ """
+ for section, text in args.sections.items():
+ # Skip sections that are in the nosymbol_table
+ if section in self.nosymbol:
+ continue
+
+ if out_docblock:
+ if not self.out_mode == self.OUTPUT_INCLUDE:
+ self.data += f".. _{section}:\n\n"
+ self.data += f'{self.lineprefix}**{section}**\n\n'
+ else:
+ self.data += f'{self.lineprefix}**{section}**\n\n'
+
+ self.print_lineno(args.section_start_lines.get(section, 0))
+ self.output_highlight(text)
+ self.data += "\n"
+ self.data += "\n"
+
+ def out_doc(self, fname, name, args):
+ if not self.check_doc(name, args):
+ return
+ self.out_section(args, out_docblock=True)
+
+ def out_function(self, fname, name, args):
+
+ oldprefix = self.lineprefix
+ signature = ""
+
+ func_macro = args.get('func_macro', False)
+ if func_macro:
+ signature = name
+ else:
+ if args.get('functiontype'):
+ signature = args['functiontype'] + " "
+ signature += name + " ("
+
+ ln = args.declaration_start_line
+ count = 0
+ for parameter in args.parameterlist:
+ if count != 0:
+ signature += ", "
+ count += 1
+ dtype = args.parametertypes.get(parameter, "")
+
+ if function_pointer.search(dtype):
+ signature += function_pointer.group(1) + parameter + function_pointer.group(3)
+ else:
+ signature += dtype
+
+ if not func_macro:
+ signature += ")"
+
+ self.print_lineno(ln)
+ if args.get('typedef') or not args.get('functiontype'):
+ self.data += f".. c:macro:: {name}\n\n"
+
+ if args.get('typedef'):
+ self.data += " **Typedef**: "
+ self.lineprefix = ""
+ self.output_highlight(args.get('purpose', ""))
+ self.data += "\n\n**Syntax**\n\n"
+ self.data += f" ``{signature}``\n\n"
+ else:
+ self.data += f"``{signature}``\n\n"
+ else:
+ self.data += f".. c:function:: {signature}\n\n"
+
+ if not args.get('typedef'):
+ self.print_lineno(ln)
+ self.lineprefix = " "
+ self.output_highlight(args.get('purpose', ""))
+ self.data += "\n"
+
+ # Put descriptive text into a container (HTML <div>) to help set
+ # function prototypes apart
+ self.lineprefix = " "
+
+ if args.parameterlist:
+ self.data += ".. container:: kernelindent\n\n"
+ self.data += f"{self.lineprefix}**Parameters**\n\n"
+
+ for parameter in args.parameterlist:
+ parameter_name = KernRe(r'\[.*').sub('', parameter)
+ dtype = args.parametertypes.get(parameter, "")
+
+ if dtype:
+ self.data += f"{self.lineprefix}``{dtype}``\n"
+ else:
+ self.data += f"{self.lineprefix}``{parameter}``\n"
+
+ self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0))
+
+ self.lineprefix = " "
+ if parameter_name in args.parameterdescs and \
+ args.parameterdescs[parameter_name] != KernelDoc.undescribed:
+
+ self.output_highlight(args.parameterdescs[parameter_name])
+ self.data += "\n"
+ else:
+ self.data += f"{self.lineprefix}*undescribed*\n\n"
+ self.lineprefix = " "
+
+ self.out_section(args)
+ self.lineprefix = oldprefix
+
+ def out_enum(self, fname, name, args):
+
+ oldprefix = self.lineprefix
+ ln = args.declaration_start_line
+
+ self.data += f"\n\n.. c:enum:: {name}\n\n"
+
+ self.print_lineno(ln)
+ self.lineprefix = " "
+ self.output_highlight(args.get('purpose', ''))
+ self.data += "\n"
+
+ self.data += ".. container:: kernelindent\n\n"
+ outer = self.lineprefix + " "
+ self.lineprefix = outer + " "
+ self.data += f"{outer}**Constants**\n\n"
+
+ for parameter in args.parameterlist:
+ self.data += f"{outer}``{parameter}``\n"
+
+ if args.parameterdescs.get(parameter, '') != KernelDoc.undescribed:
+ self.output_highlight(args.parameterdescs[parameter])
+ else:
+ self.data += f"{self.lineprefix}*undescribed*\n\n"
+ self.data += "\n"
+
+ self.lineprefix = oldprefix
+ self.out_section(args)
+
+ def out_typedef(self, fname, name, args):
+
+ oldprefix = self.lineprefix
+ ln = args.declaration_start_line
+
+ self.data += f"\n\n.. c:type:: {name}\n\n"
+
+ self.print_lineno(ln)
+ self.lineprefix = " "
+
+ self.output_highlight(args.get('purpose', ''))
+
+ self.data += "\n"
+
+ self.lineprefix = oldprefix
+ self.out_section(args)
+
+ def out_struct(self, fname, name, args):
+
+ purpose = args.get('purpose', "")
+ declaration = args.get('definition', "")
+ dtype = args.type
+ ln = args.declaration_start_line
+
+ self.data += f"\n\n.. c:{dtype}:: {name}\n\n"
+
+ self.print_lineno(ln)
+
+ oldprefix = self.lineprefix
+ self.lineprefix += " "
+
+ self.output_highlight(purpose)
+ self.data += "\n"
+
+ self.data += ".. container:: kernelindent\n\n"
+ self.data += f"{self.lineprefix}**Definition**::\n\n"
+
+ self.lineprefix = self.lineprefix + " "
+
+ declaration = declaration.replace("\t", self.lineprefix)
+
+ self.data += f"{self.lineprefix}{dtype} {name}" + ' {' + "\n"
+ self.data += f"{declaration}{self.lineprefix}" + "};\n\n"
+
+ self.lineprefix = " "
+ self.data += f"{self.lineprefix}**Members**\n\n"
+ for parameter in args.parameterlist:
+ if not parameter or parameter.startswith("#"):
+ continue
+
+ parameter_name = parameter.split("[", maxsplit=1)[0]
+
+ if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed:
+ continue
+
+ self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0))
+
+ self.data += f"{self.lineprefix}``{parameter}``\n"
+
+ self.lineprefix = " "
+ self.output_highlight(args.parameterdescs[parameter_name])
+ self.lineprefix = " "
+
+ self.data += "\n"
+
+ self.data += "\n"
+
+ self.lineprefix = oldprefix
+ self.out_section(args)
+
+
+class ManFormat(OutputFormat):
+ """Consts and functions used by man pages output"""
+
+ highlights = (
+ (type_constant, r"\1"),
+ (type_constant2, r"\1"),
+ (type_func, r"\\fB\1\\fP"),
+ (type_enum, r"\\fI\1\\fP"),
+ (type_struct, r"\\fI\1\\fP"),
+ (type_typedef, r"\\fI\1\\fP"),
+ (type_union, r"\\fI\1\\fP"),
+ (type_param, r"\\fI\1\\fP"),
+ (type_param_ref, r"\\fI\1\2\\fP"),
+ (type_member, r"\\fI\1\2\3\\fP"),
+ (type_fallback, r"\\fI\1\\fP")
+ )
+ blankline = ""
+
+ date_formats = [
+ "%a %b %d %H:%M:%S %Z %Y",
+ "%a %b %d %H:%M:%S %Y",
+ "%Y-%m-%d",
+ "%b %d %Y",
+ "%B %d %Y",
+ "%m %d %Y",
+ ]
+
+ def __init__(self, modulename):
+ """
+ Creates class variables.
+
+ Not really mandatory, but it is a good coding style and makes
+ pylint happy.
+ """
+
+ super().__init__()
+ self.modulename = modulename
+ self.symbols = []
+
+ dt = None
+ tstamp = os.environ.get("KBUILD_BUILD_TIMESTAMP")
+ if tstamp:
+ for fmt in self.date_formats:
+ try:
+ dt = datetime.strptime(tstamp, fmt)
+ break
+ except ValueError:
+ pass
+
+ if not dt:
+ dt = datetime.now()
+
+ self.man_date = dt.strftime("%B %Y")
+
+ def arg_name(self, args, name):
+ """
+ Return the name that will be used for the man page.
+
+ As we may have the same name on different namespaces,
+ prepend the data type for all types except functions and typedefs.
+
+ The doc section is special: it uses the modulename.
+ """
+
+ dtype = args.type
+
+ if dtype == "doc":
+ return self.modulename
+
+ if dtype in ["function", "typedef"]:
+ return name
+
+ return f"{dtype} {name}"
+
+ def set_symbols(self, symbols):
+ """
+ Get a list of all symbols from kernel_doc.
+
+ Man pages will uses it to add a SEE ALSO section with other
+ symbols at the same file.
+ """
+ self.symbols = symbols
+
+ def out_tail(self, fname, name, args):
+ """Adds a tail for all man pages"""
+
+ # SEE ALSO section
+ self.data += f'.SH "SEE ALSO"' + "\n.PP\n"
+ self.data += (f"Kernel file \\fB{args.fname}\\fR\n")
+ if len(self.symbols) >= 2:
+ cur_name = self.arg_name(args, name)
+
+ related = []
+ for arg in self.symbols:
+ out_name = self.arg_name(arg, arg.name)
+
+ if cur_name == out_name:
+ continue
+
+ related.append(f"\\fB{out_name}\\fR(9)")
+
+ self.data += ",\n".join(related) + "\n"
+
+ # TODO: does it make sense to add other sections? Maybe
+ # REPORTING ISSUES? LICENSE?
+
+ def msg(self, fname, name, args):
+ """
+ Handles a single entry from kernel-doc parser.
+
+ Add a tail at the end of man pages output.
+ """
+ super().msg(fname, name, args)
+ self.out_tail(fname, name, args)
+
+ return self.data
+
+ def output_highlight(self, block):
+ """
+ Outputs a C symbol that may require being highlighted with
+ self.highlights variable using troff syntax
+ """
+
+ contents = self.highlight_block(block)
+
+ if isinstance(contents, list):
+ contents = "\n".join(contents)
+
+ for line in contents.strip("\n").split("\n"):
+ line = KernRe(r"^\s*").sub("", line)
+ if not line:
+ continue
+
+ if line[0] == ".":
+ self.data += "\\&" + line + "\n"
+ else:
+ self.data += line + "\n"
+
+ def out_doc(self, fname, name, args):
+ if not self.check_doc(name, args):
+ return
+
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{self.modulename}" 9 "{out_name}" "{self.man_date}" "API Manual" LINUX' + "\n"
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section}"' + "\n"
+ self.output_highlight(text)
+
+ def out_function(self, fname, name, args):
+ """output function in man"""
+
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{name}" 9 "{out_name}" "{self.man_date}" "Kernel Hacker\'s Manual" LINUX' + "\n"
+
+ self.data += ".SH NAME\n"
+ self.data += f"{name} \\- {args['purpose']}\n"
+
+ self.data += ".SH SYNOPSIS\n"
+ if args.get('functiontype', ''):
+ self.data += f'.B "{args["functiontype"]}" {name}' + "\n"
+ else:
+ self.data += f'.B "{name}' + "\n"
+
+ count = 0
+ parenth = "("
+ post = ","
+
+ for parameter in args.parameterlist:
+ if count == len(args.parameterlist) - 1:
+ post = ");"
+
+ dtype = args.parametertypes.get(parameter, "")
+ if function_pointer.match(dtype):
+ # Pointer-to-function
+ self.data += f'".BI "{parenth}{function_pointer.group(1)}" " ") ({function_pointer.group(2)}){post}"' + "\n"
+ else:
+ dtype = KernRe(r'([^\*])$').sub(r'\1 ', dtype)
+
+ self.data += f'.BI "{parenth}{dtype}" "{post}"' + "\n"
+ count += 1
+ parenth = ""
+
+ if args.parameterlist:
+ self.data += ".SH ARGUMENTS\n"
+
+ for parameter in args.parameterlist:
+ parameter_name = re.sub(r'\[.*', '', parameter)
+
+ self.data += f'.IP "{parameter}" 12' + "\n"
+ self.output_highlight(args.parameterdescs.get(parameter_name, ""))
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section.upper()}"' + "\n"
+ self.output_highlight(text)
+
+ def out_enum(self, fname, name, args):
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{self.modulename}" 9 "{out_name}" "{self.man_date}" "API Manual" LINUX' + "\n"
+
+ self.data += ".SH NAME\n"
+ self.data += f"enum {name} \\- {args['purpose']}\n"
+
+ self.data += ".SH SYNOPSIS\n"
+ self.data += f"enum {name}" + " {\n"
+
+ count = 0
+ for parameter in args.parameterlist:
+ self.data += f'.br\n.BI " {parameter}"' + "\n"
+ if count == len(args.parameterlist) - 1:
+ self.data += "\n};\n"
+ else:
+ self.data += ", \n.br\n"
+
+ count += 1
+
+ self.data += ".SH Constants\n"
+
+ for parameter in args.parameterlist:
+ parameter_name = KernRe(r'\[.*').sub('', parameter)
+ self.data += f'.IP "{parameter}" 12' + "\n"
+ self.output_highlight(args.parameterdescs.get(parameter_name, ""))
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section}"' + "\n"
+ self.output_highlight(text)
+
+ def out_typedef(self, fname, name, args):
+ module = self.modulename
+ purpose = args.get('purpose')
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{module}" 9 "{out_name}" "{self.man_date}" "API Manual" LINUX' + "\n"
+
+ self.data += ".SH NAME\n"
+ self.data += f"typedef {name} \\- {purpose}\n"
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section}"' + "\n"
+ self.output_highlight(text)
+
+ def out_struct(self, fname, name, args):
+ module = self.modulename
+ purpose = args.get('purpose')
+ definition = args.get('definition')
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{module}" 9 "{out_name}" "{self.man_date}" "API Manual" LINUX' + "\n"
+
+ self.data += ".SH NAME\n"
+ self.data += f"{args.type} {name} \\- {purpose}\n"
+
+ # Replace tabs with two spaces and handle newlines
+ declaration = definition.replace("\t", " ")
+ declaration = KernRe(r"\n").sub('"\n.br\n.BI "', declaration)
+
+ self.data += ".SH SYNOPSIS\n"
+ self.data += f"{args.type} {name} " + "{" + "\n.br\n"
+ self.data += f'.BI "{declaration}\n' + "};\n.br\n\n"
+
+ self.data += ".SH Members\n"
+ for parameter in args.parameterlist:
+ if parameter.startswith("#"):
+ continue
+
+ parameter_name = re.sub(r"\[.*", "", parameter)
+
+ if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed:
+ continue
+
+ self.data += f'.IP "{parameter}" 12' + "\n"
+ self.output_highlight(args.parameterdescs.get(parameter_name))
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section}"' + "\n"
+ self.output_highlight(text)
diff --git a/tools/lib/python/kdoc/kdoc_parser.py b/tools/lib/python/kdoc/kdoc_parser.py
new file mode 100644
index 000000000000..500aafc50032
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_parser.py
@@ -0,0 +1,1670 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+#
+# pylint: disable=C0301,C0302,R0904,R0912,R0913,R0914,R0915,R0917,R1702
+
+"""
+kdoc_parser
+===========
+
+Read a C language source or header FILE and extract embedded
+documentation comments
+"""
+
+import sys
+import re
+from pprint import pformat
+
+from kdoc.kdoc_re import NestedMatch, KernRe
+from kdoc.kdoc_item import KdocItem
+
+#
+# Regular expressions used to parse kernel-doc markups at KernelDoc class.
+#
+# Let's declare them in lowercase outside any class to make it easier to
+# convert from the Perl script.
+#
+# As those are evaluated at the beginning, no need to cache them
+#
+
+# Allow whitespace at end of comment start.
+doc_start = KernRe(r'^/\*\*\s*$', cache=False)
+
+doc_end = KernRe(r'\*/', cache=False)
+doc_com = KernRe(r'\s*\*\s*', cache=False)
+doc_com_body = KernRe(r'\s*\* ?', cache=False)
+doc_decl = doc_com + KernRe(r'(\w+)', cache=False)
+
+# @params and a strictly limited set of supported section names
+# Specifically:
+# Match @word:
+# @...:
+# @{section-name}:
+# while trying to not match literal block starts like "example::"
+#
+known_section_names = 'description|context|returns?|notes?|examples?'
+known_sections = KernRe(known_section_names, flags = re.I)
+doc_sect = doc_com + \
+ KernRe(r'\s*(@[.\w]+|@\.\.\.|' + known_section_names + r')\s*:([^:].*)?$',
+ flags=re.I, cache=False)
+
+doc_content = doc_com_body + KernRe(r'(.*)', cache=False)
+doc_inline_start = KernRe(r'^\s*/\*\*\s*$', cache=False)
+doc_inline_sect = KernRe(r'\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)', cache=False)
+doc_inline_end = KernRe(r'^\s*\*/\s*$', cache=False)
+doc_inline_oneline = KernRe(r'^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$', cache=False)
+
+export_symbol = KernRe(r'^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*', cache=False)
+export_symbol_ns = KernRe(r'^\s*EXPORT_SYMBOL_NS(_GPL)?\s*\(\s*(\w+)\s*,\s*"\S+"\)\s*', cache=False)
+
+type_param = KernRe(r"@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False)
+
+#
+# Tests for the beginning of a kerneldoc block in its various forms.
+#
+doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False)
+doc_begin_data = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)", cache = False)
+doc_begin_func = KernRe(str(doc_com) + # initial " * '
+ r"(?:\w+\s*\*\s*)?" + # type (not captured)
+ r'(?:define\s+)?' + # possible "define" (not captured)
+ r'(\w+)\s*(?:\(\w*\))?\s*' + # name and optional "(...)"
+ r'(?:[-:].*)?$', # description (not captured)
+ cache = False)
+
+#
+# Here begins a long set of transformations to turn structure member prefixes
+# and macro invocations into something we can parse and generate kdoc for.
+#
+struct_args_pattern = r'([^,)]+)'
+
+struct_xforms = [
+ # Strip attributes
+ (KernRe(r"__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)", flags=re.I | re.S, cache=False), ' '),
+ (KernRe(r'\s*__aligned\s*\([^;]*\)', re.S), ' '),
+ (KernRe(r'\s*__counted_by\s*\([^;]*\)', re.S), ' '),
+ (KernRe(r'\s*__counted_by_(le|be)\s*\([^;]*\)', re.S), ' '),
+ (KernRe(r'\s*__packed\s*', re.S), ' '),
+ (KernRe(r'\s*CRYPTO_MINALIGN_ATTR', re.S), ' '),
+ (KernRe(r'\s*__private', re.S), ' '),
+ (KernRe(r'\s*__rcu', re.S), ' '),
+ (KernRe(r'\s*____cacheline_aligned_in_smp', re.S), ' '),
+ (KernRe(r'\s*____cacheline_aligned', re.S), ' '),
+ (KernRe(r'\s*__cacheline_group_(begin|end)\([^\)]+\);'), ''),
+ #
+ # Unwrap struct_group macros based on this definition:
+ # __struct_group(TAG, NAME, ATTRS, MEMBERS...)
+ # which has variants like: struct_group(NAME, MEMBERS...)
+ # Only MEMBERS arguments require documentation.
+ #
+ # Parsing them happens on two steps:
+ #
+ # 1. drop struct group arguments that aren't at MEMBERS,
+ # storing them as STRUCT_GROUP(MEMBERS)
+ #
+ # 2. remove STRUCT_GROUP() ancillary macro.
+ #
+ # The original logic used to remove STRUCT_GROUP() using an
+ # advanced regex:
+ #
+ # \bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*;
+ #
+ # with two patterns that are incompatible with
+ # Python re module, as it has:
+ #
+ # - a recursive pattern: (?1)
+ # - an atomic grouping: (?>...)
+ #
+ # I tried a simpler version: but it didn't work either:
+ # \bSTRUCT_GROUP\(([^\)]+)\)[^;]*;
+ #
+ # As it doesn't properly match the end parenthesis on some cases.
+ #
+ # So, a better solution was crafted: there's now a NestedMatch
+ # class that ensures that delimiters after a search are properly
+ # matched. So, the implementation to drop STRUCT_GROUP() will be
+ # handled in separate.
+ #
+ (KernRe(r'\bstruct_group\s*\(([^,]*,)', re.S), r'STRUCT_GROUP('),
+ (KernRe(r'\bstruct_group_attr\s*\(([^,]*,){2}', re.S), r'STRUCT_GROUP('),
+ (KernRe(r'\bstruct_group_tagged\s*\(([^,]*),([^,]*),', re.S), r'struct \1 \2; STRUCT_GROUP('),
+ (KernRe(r'\b__struct_group\s*\(([^,]*,){3}', re.S), r'STRUCT_GROUP('),
+ #
+ # Replace macros
+ #
+ # TODO: use NestedMatch for FOO($1, $2, ...) matches
+ #
+ # it is better to also move those to the NestedMatch logic,
+ # to ensure that parentheses will be properly matched.
+ #
+ (KernRe(r'__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)', re.S),
+ r'DECLARE_BITMAP(\1, __ETHTOOL_LINK_MODE_MASK_NBITS)'),
+ (KernRe(r'DECLARE_PHY_INTERFACE_MASK\s*\(([^\)]+)\)', re.S),
+ r'DECLARE_BITMAP(\1, PHY_INTERFACE_MODE_MAX)'),
+ (KernRe(r'DECLARE_BITMAP\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern + r'\)',
+ re.S), r'unsigned long \1[BITS_TO_LONGS(\2)]'),
+ (KernRe(r'DECLARE_HASHTABLE\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern + r'\)',
+ re.S), r'unsigned long \1[1 << ((\2) - 1)]'),
+ (KernRe(r'DECLARE_KFIFO\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern +
+ r',\s*' + struct_args_pattern + r'\)', re.S), r'\2 *\1'),
+ (KernRe(r'DECLARE_KFIFO_PTR\s*\(' + struct_args_pattern + r',\s*' +
+ struct_args_pattern + r'\)', re.S), r'\2 *\1'),
+ (KernRe(r'(?:__)?DECLARE_FLEX_ARRAY\s*\(' + struct_args_pattern + r',\s*' +
+ struct_args_pattern + r'\)', re.S), r'\1 \2[]'),
+ (KernRe(r'DEFINE_DMA_UNMAP_ADDR\s*\(' + struct_args_pattern + r'\)', re.S), r'dma_addr_t \1'),
+ (KernRe(r'DEFINE_DMA_UNMAP_LEN\s*\(' + struct_args_pattern + r'\)', re.S), r'__u32 \1'),
+]
+#
+# Regexes here are guaranteed to have the end delimiter matching
+# the start delimiter. Yet, right now, only one replace group
+# is allowed.
+#
+struct_nested_prefixes = [
+ (re.compile(r'\bSTRUCT_GROUP\('), r'\1'),
+]
+
+#
+# Transforms for function prototypes
+#
+function_xforms = [
+ (KernRe(r"^static +"), ""),
+ (KernRe(r"^extern +"), ""),
+ (KernRe(r"^asmlinkage +"), ""),
+ (KernRe(r"^inline +"), ""),
+ (KernRe(r"^__inline__ +"), ""),
+ (KernRe(r"^__inline +"), ""),
+ (KernRe(r"^__always_inline +"), ""),
+ (KernRe(r"^noinline +"), ""),
+ (KernRe(r"^__FORTIFY_INLINE +"), ""),
+ (KernRe(r"__init +"), ""),
+ (KernRe(r"__init_or_module +"), ""),
+ (KernRe(r"__deprecated +"), ""),
+ (KernRe(r"__flatten +"), ""),
+ (KernRe(r"__meminit +"), ""),
+ (KernRe(r"__must_check +"), ""),
+ (KernRe(r"__weak +"), ""),
+ (KernRe(r"__sched +"), ""),
+ (KernRe(r"_noprof"), ""),
+ (KernRe(r"__always_unused *"), ""),
+ (KernRe(r"__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +"), ""),
+ (KernRe(r"__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +"), ""),
+ (KernRe(r"__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +"), ""),
+ (KernRe(r"DECL_BUCKET_PARAMS\s*\(\s*(\S+)\s*,\s*(\S+)\s*\)"), r"\1, \2"),
+ (KernRe(r"__attribute_const__ +"), ""),
+ (KernRe(r"__attribute__\s*\(\((?:[\w\s]+(?:\([^)]*\))?\s*,?)+\)\)\s+"), ""),
+]
+
+#
+# Apply a set of transforms to a block of text.
+#
+def apply_transforms(xforms, text):
+ for search, subst in xforms:
+ text = search.sub(subst, text)
+ return text
+
+#
+# A little helper to get rid of excess white space
+#
+multi_space = KernRe(r'\s\s+')
+def trim_whitespace(s):
+ return multi_space.sub(' ', s.strip())
+
+#
+# Remove struct/enum members that have been marked "private".
+#
+def trim_private_members(text):
+ #
+ # First look for a "public:" block that ends a private region, then
+ # handle the "private until the end" case.
+ #
+ text = KernRe(r'/\*\s*private:.*?/\*\s*public:.*?\*/', flags=re.S).sub('', text)
+ text = KernRe(r'/\*\s*private:.*', flags=re.S).sub('', text)
+ #
+ # We needed the comments to do the above, but now we can take them out.
+ #
+ return KernRe(r'\s*/\*.*?\*/\s*', flags=re.S).sub('', text).strip()
+
+class state:
+ """
+ State machine enums
+ """
+
+ # Parser states
+ NORMAL = 0 # normal code
+ NAME = 1 # looking for function name
+ DECLARATION = 2 # We have seen a declaration which might not be done
+ BODY = 3 # the body of the comment
+ SPECIAL_SECTION = 4 # doc section ending with a blank line
+ PROTO = 5 # scanning prototype
+ DOCBLOCK = 6 # documentation block
+ INLINE_NAME = 7 # gathering doc outside main block
+ INLINE_TEXT = 8 # reading the body of inline docs
+
+ name = [
+ "NORMAL",
+ "NAME",
+ "DECLARATION",
+ "BODY",
+ "SPECIAL_SECTION",
+ "PROTO",
+ "DOCBLOCK",
+ "INLINE_NAME",
+ "INLINE_TEXT",
+ ]
+
+
+SECTION_DEFAULT = "Description" # default section
+
+class KernelEntry:
+
+ def __init__(self, config, fname, ln):
+ self.config = config
+ self.fname = fname
+
+ self._contents = []
+ self.prototype = ""
+
+ self.warnings = []
+
+ self.parameterlist = []
+ self.parameterdescs = {}
+ self.parametertypes = {}
+ self.parameterdesc_start_lines = {}
+
+ self.section_start_lines = {}
+ self.sections = {}
+
+ self.anon_struct_union = False
+
+ self.leading_space = None
+
+ self.fname = fname
+
+ # State flags
+ self.brcount = 0
+ self.declaration_start_line = ln + 1
+
+ #
+ # Management of section contents
+ #
+ def add_text(self, text):
+ self._contents.append(text)
+
+ def contents(self):
+ return '\n'.join(self._contents) + '\n'
+
+ # TODO: rename to emit_message after removal of kernel-doc.pl
+ def emit_msg(self, ln, msg, *, warning=True):
+ """Emit a message"""
+
+ log_msg = f"{self.fname}:{ln} {msg}"
+
+ if not warning:
+ self.config.log.info(log_msg)
+ return
+
+ # Delegate warning output to output logic, as this way it
+ # will report warnings/info only for symbols that are output
+
+ self.warnings.append(log_msg)
+ return
+
+ #
+ # Begin a new section.
+ #
+ def begin_section(self, line_no, title = SECTION_DEFAULT, dump = False):
+ if dump:
+ self.dump_section(start_new = True)
+ self.section = title
+ self.new_start_line = line_no
+
+ def dump_section(self, start_new=True):
+ """
+ Dumps section contents to arrays/hashes intended for that purpose.
+ """
+ #
+ # If we have accumulated no contents in the default ("description")
+ # section, don't bother.
+ #
+ if self.section == SECTION_DEFAULT and not self._contents:
+ return
+ name = self.section
+ contents = self.contents()
+
+ if type_param.match(name):
+ name = type_param.group(1)
+
+ self.parameterdescs[name] = contents
+ self.parameterdesc_start_lines[name] = self.new_start_line
+
+ self.new_start_line = 0
+
+ else:
+ if name in self.sections and self.sections[name] != "":
+ # Only warn on user-specified duplicate section names
+ if name != SECTION_DEFAULT:
+ self.emit_msg(self.new_start_line,
+ f"duplicate section name '{name}'")
+ # Treat as a new paragraph - add a blank line
+ self.sections[name] += '\n' + contents
+ else:
+ self.sections[name] = contents
+ self.section_start_lines[name] = self.new_start_line
+ self.new_start_line = 0
+
+# self.config.log.debug("Section: %s : %s", name, pformat(vars(self)))
+
+ if start_new:
+ self.section = SECTION_DEFAULT
+ self._contents = []
+
+python_warning = False
+
+class KernelDoc:
+ """
+ Read a C language source or header FILE and extract embedded
+ documentation comments.
+ """
+
+ # Section names
+
+ section_context = "Context"
+ section_return = "Return"
+
+ undescribed = "-- undescribed --"
+
+ def __init__(self, config, fname):
+ """Initialize internal variables"""
+
+ self.fname = fname
+ self.config = config
+
+ # Initial state for the state machines
+ self.state = state.NORMAL
+
+ # Store entry currently being processed
+ self.entry = None
+
+ # Place all potential outputs into an array
+ self.entries = []
+
+ #
+ # We need Python 3.7 for its "dicts remember the insertion
+ # order" guarantee
+ #
+ global python_warning
+ if (not python_warning and
+ sys.version_info.major == 3 and sys.version_info.minor < 7):
+
+ self.emit_msg(0,
+ 'Python 3.7 or later is required for correct results')
+ python_warning = True
+
+ def emit_msg(self, ln, msg, *, warning=True):
+ """Emit a message"""
+
+ if self.entry:
+ self.entry.emit_msg(ln, msg, warning=warning)
+ return
+
+ log_msg = f"{self.fname}:{ln} {msg}"
+
+ if warning:
+ self.config.log.warning(log_msg)
+ else:
+ self.config.log.info(log_msg)
+
+ def dump_section(self, start_new=True):
+ """
+ Dumps section contents to arrays/hashes intended for that purpose.
+ """
+
+ if self.entry:
+ self.entry.dump_section(start_new)
+
+ # TODO: rename it to store_declaration after removal of kernel-doc.pl
+ def output_declaration(self, dtype, name, **args):
+ """
+ Stores the entry into an entry array.
+
+ The actual output and output filters will be handled elsewhere
+ """
+
+ item = KdocItem(name, self.fname, dtype,
+ self.entry.declaration_start_line, **args)
+ item.warnings = self.entry.warnings
+
+ # Drop empty sections
+ # TODO: improve empty sections logic to emit warnings
+ sections = self.entry.sections
+ for section in ["Description", "Return"]:
+ if section in sections and not sections[section].rstrip():
+ del sections[section]
+ item.set_sections(sections, self.entry.section_start_lines)
+ item.set_params(self.entry.parameterlist, self.entry.parameterdescs,
+ self.entry.parametertypes,
+ self.entry.parameterdesc_start_lines)
+ self.entries.append(item)
+
+ self.config.log.debug("Output: %s:%s = %s", dtype, name, pformat(args))
+
+ def reset_state(self, ln):
+ """
+ Ancillary routine to create a new entry. It initializes all
+ variables used by the state machine.
+ """
+
+ #
+ # Flush the warnings out before we proceed further
+ #
+ if self.entry and self.entry not in self.entries:
+ for log_msg in self.entry.warnings:
+ self.config.log.warning(log_msg)
+
+ self.entry = KernelEntry(self.config, self.fname, ln)
+
+ # State flags
+ self.state = state.NORMAL
+
+ def push_parameter(self, ln, decl_type, param, dtype,
+ org_arg, declaration_name):
+ """
+ Store parameters and their descriptions at self.entry.
+ """
+
+ if self.entry.anon_struct_union and dtype == "" and param == "}":
+ return # Ignore the ending }; from anonymous struct/union
+
+ self.entry.anon_struct_union = False
+
+ param = KernRe(r'[\[\)].*').sub('', param, count=1)
+
+ #
+ # Look at various "anonymous type" cases.
+ #
+ if dtype == '':
+ if param.endswith("..."):
+ if len(param) > 3: # there is a name provided, use that
+ param = param[:-3]
+ if not self.entry.parameterdescs.get(param):
+ self.entry.parameterdescs[param] = "variable arguments"
+
+ elif (not param) or param == "void":
+ param = "void"
+ self.entry.parameterdescs[param] = "no arguments"
+
+ elif param in ["struct", "union"]:
+ # Handle unnamed (anonymous) union or struct
+ dtype = param
+ param = "{unnamed_" + param + "}"
+ self.entry.parameterdescs[param] = "anonymous\n"
+ self.entry.anon_struct_union = True
+
+ # Warn if parameter has no description
+ # (but ignore ones starting with # as these are not parameters
+ # but inline preprocessor statements)
+ if param not in self.entry.parameterdescs and not param.startswith("#"):
+ self.entry.parameterdescs[param] = self.undescribed
+
+ if "." not in param:
+ if decl_type == 'function':
+ dname = f"{decl_type} parameter"
+ else:
+ dname = f"{decl_type} member"
+
+ self.emit_msg(ln,
+ f"{dname} '{param}' not described in '{declaration_name}'")
+
+ # Strip spaces from param so that it is one continuous string on
+ # parameterlist. This fixes a problem where check_sections()
+ # cannot find a parameter like "addr[6 + 2]" because it actually
+ # appears as "addr[6", "+", "2]" on the parameter list.
+ # However, it's better to maintain the param string unchanged for
+ # output, so just weaken the string compare in check_sections()
+ # to ignore "[blah" in a parameter string.
+
+ self.entry.parameterlist.append(param)
+ org_arg = KernRe(r'\s\s+').sub(' ', org_arg)
+ self.entry.parametertypes[param] = org_arg
+
+
+ def create_parameter_list(self, ln, decl_type, args,
+ splitter, declaration_name):
+ """
+ Creates a list of parameters, storing them at self.entry.
+ """
+
+ # temporarily replace all commas inside function pointer definition
+ arg_expr = KernRe(r'(\([^\),]+),')
+ while arg_expr.search(args):
+ args = arg_expr.sub(r"\1#", args)
+
+ for arg in args.split(splitter):
+ # Ignore argument attributes
+ arg = KernRe(r'\sPOS0?\s').sub(' ', arg)
+
+ # Strip leading/trailing spaces
+ arg = arg.strip()
+ arg = KernRe(r'\s+').sub(' ', arg, count=1)
+
+ if arg.startswith('#'):
+ # Treat preprocessor directive as a typeless variable just to fill
+ # corresponding data structures "correctly". Catch it later in
+ # output_* subs.
+
+ # Treat preprocessor directive as a typeless variable
+ self.push_parameter(ln, decl_type, arg, "",
+ "", declaration_name)
+ #
+ # The pointer-to-function case.
+ #
+ elif KernRe(r'\(.+\)\s*\(').search(arg):
+ arg = arg.replace('#', ',')
+ r = KernRe(r'[^\(]+\(\*?\s*' # Everything up to "(*"
+ r'([\w\[\].]*)' # Capture the name and possible [array]
+ r'\s*\)') # Make sure the trailing ")" is there
+ if r.match(arg):
+ param = r.group(1)
+ else:
+ self.emit_msg(ln, f"Invalid param: {arg}")
+ param = arg
+ dtype = arg.replace(param, '')
+ self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name)
+ #
+ # The array-of-pointers case. Dig the parameter name out from the middle
+ # of the declaration.
+ #
+ elif KernRe(r'\(.+\)\s*\[').search(arg):
+ r = KernRe(r'[^\(]+\(\s*\*\s*' # Up to "(" and maybe "*"
+ r'([\w.]*?)' # The actual pointer name
+ r'\s*(\[\s*\w+\s*\]\s*)*\)') # The [array portion]
+ if r.match(arg):
+ param = r.group(1)
+ else:
+ self.emit_msg(ln, f"Invalid param: {arg}")
+ param = arg
+ dtype = arg.replace(param, '')
+ self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name)
+ elif arg:
+ #
+ # Clean up extraneous spaces and split the string at commas; the first
+ # element of the resulting list will also include the type information.
+ #
+ arg = KernRe(r'\s*:\s*').sub(":", arg)
+ arg = KernRe(r'\s*\[').sub('[', arg)
+ args = KernRe(r'\s*,\s*').split(arg)
+ args[0] = re.sub(r'(\*+)\s*', r' \1', args[0])
+ #
+ # args[0] has a string of "type a". If "a" includes an [array]
+ # declaration, we want to not be fooled by any white space inside
+ # the brackets, so detect and handle that case specially.
+ #
+ r = KernRe(r'^([^[\]]*\s+)(.*)$')
+ if r.match(args[0]):
+ args[0] = r.group(2)
+ dtype = r.group(1)
+ else:
+ # No space in args[0]; this seems wrong but preserves previous behavior
+ dtype = ''
+
+ bitfield_re = KernRe(r'(.*?):(\w+)')
+ for param in args:
+ #
+ # For pointers, shift the star(s) from the variable name to the
+ # type declaration.
+ #
+ r = KernRe(r'^(\*+)\s*(.*)')
+ if r.match(param):
+ self.push_parameter(ln, decl_type, r.group(2),
+ f"{dtype} {r.group(1)}",
+ arg, declaration_name)
+ #
+ # Perform a similar shift for bitfields.
+ #
+ elif bitfield_re.search(param):
+ if dtype != "": # Skip unnamed bit-fields
+ self.push_parameter(ln, decl_type, bitfield_re.group(1),
+ f"{dtype}:{bitfield_re.group(2)}",
+ arg, declaration_name)
+ else:
+ self.push_parameter(ln, decl_type, param, dtype,
+ arg, declaration_name)
+
+ def check_sections(self, ln, decl_name, decl_type):
+ """
+ Check for errors inside sections, emitting warnings if not found
+ parameters are described.
+ """
+ for section in self.entry.sections:
+ if section not in self.entry.parameterlist and \
+ not known_sections.search(section):
+ if decl_type == 'function':
+ dname = f"{decl_type} parameter"
+ else:
+ dname = f"{decl_type} member"
+ self.emit_msg(ln,
+ f"Excess {dname} '{section}' description in '{decl_name}'")
+
+ def check_return_section(self, ln, declaration_name, return_type):
+ """
+ If the function doesn't return void, warns about the lack of a
+ return description.
+ """
+
+ if not self.config.wreturn:
+ return
+
+ # Ignore an empty return type (It's a macro)
+ # Ignore functions with a "void" return type (but not "void *")
+ if not return_type or KernRe(r'void\s*\w*\s*$').search(return_type):
+ return
+
+ if not self.entry.sections.get("Return", None):
+ self.emit_msg(ln,
+ f"No description found for return value of '{declaration_name}'")
+
+ #
+ # Split apart a structure prototype; returns (struct|union, name, members) or None
+ #
+ def split_struct_proto(self, proto):
+ type_pattern = r'(struct|union)'
+ qualifiers = [
+ "__attribute__",
+ "__packed",
+ "__aligned",
+ "____cacheline_aligned_in_smp",
+ "____cacheline_aligned",
+ ]
+ definition_body = r'\{(.*)\}\s*' + "(?:" + '|'.join(qualifiers) + ")?"
+
+ r = KernRe(type_pattern + r'\s+(\w+)\s*' + definition_body)
+ if r.search(proto):
+ return (r.group(1), r.group(2), r.group(3))
+ else:
+ r = KernRe(r'typedef\s+' + type_pattern + r'\s*' + definition_body + r'\s*(\w+)\s*;')
+ if r.search(proto):
+ return (r.group(1), r.group(3), r.group(2))
+ return None
+ #
+ # Rewrite the members of a structure or union for easier formatting later on.
+ # Among other things, this function will turn a member like:
+ #
+ # struct { inner_members; } foo;
+ #
+ # into:
+ #
+ # struct foo; inner_members;
+ #
+ def rewrite_struct_members(self, members):
+ #
+ # Process struct/union members from the most deeply nested outward. The
+ # trick is in the ^{ below - it prevents a match of an outer struct/union
+ # until the inner one has been munged (removing the "{" in the process).
+ #
+ struct_members = KernRe(r'(struct|union)' # 0: declaration type
+ r'([^\{\};]+)' # 1: possible name
+ r'(\{)'
+ r'([^\{\}]*)' # 3: Contents of declaration
+ r'(\})'
+ r'([^\{\};]*)(;)') # 5: Remaining stuff after declaration
+ tuples = struct_members.findall(members)
+ while tuples:
+ for t in tuples:
+ newmember = ""
+ oldmember = "".join(t) # Reconstruct the original formatting
+ dtype, name, lbr, content, rbr, rest, semi = t
+ #
+ # Pass through each field name, normalizing the form and formatting.
+ #
+ for s_id in rest.split(','):
+ s_id = s_id.strip()
+ newmember += f"{dtype} {s_id}; "
+ #
+ # Remove bitfield/array/pointer info, getting the bare name.
+ #
+ s_id = KernRe(r'[:\[].*').sub('', s_id)
+ s_id = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', s_id)
+ #
+ # Pass through the members of this inner structure/union.
+ #
+ for arg in content.split(';'):
+ arg = arg.strip()
+ #
+ # Look for (type)(*name)(args) - pointer to function
+ #
+ r = KernRe(r'^([^\(]+\(\*?\s*)([\w.]*)(\s*\).*)')
+ if r.match(arg):
+ dtype, name, extra = r.group(1), r.group(2), r.group(3)
+ # Pointer-to-function
+ if not s_id:
+ # Anonymous struct/union
+ newmember += f"{dtype}{name}{extra}; "
+ else:
+ newmember += f"{dtype}{s_id}.{name}{extra}; "
+ #
+ # Otherwise a non-function member.
+ #
+ else:
+ #
+ # Remove bitmap and array portions and spaces around commas
+ #
+ arg = KernRe(r':\s*\d+\s*').sub('', arg)
+ arg = KernRe(r'\[.*\]').sub('', arg)
+ arg = KernRe(r'\s*,\s*').sub(',', arg)
+ #
+ # Look for a normal decl - "type name[,name...]"
+ #
+ r = KernRe(r'(.*)\s+([\S+,]+)')
+ if r.search(arg):
+ for name in r.group(2).split(','):
+ name = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', name)
+ if not s_id:
+ # Anonymous struct/union
+ newmember += f"{r.group(1)} {name}; "
+ else:
+ newmember += f"{r.group(1)} {s_id}.{name}; "
+ else:
+ newmember += f"{arg}; "
+ #
+ # At the end of the s_id loop, replace the original declaration with
+ # the munged version.
+ #
+ members = members.replace(oldmember, newmember)
+ #
+ # End of the tuple loop - search again and see if there are outer members
+ # that now turn up.
+ #
+ tuples = struct_members.findall(members)
+ return members
+
+ #
+ # Format the struct declaration into a standard form for inclusion in the
+ # resulting docs.
+ #
+ def format_struct_decl(self, declaration):
+ #
+ # Insert newlines, get rid of extra spaces.
+ #
+ declaration = KernRe(r'([\{;])').sub(r'\1\n', declaration)
+ declaration = KernRe(r'\}\s+;').sub('};', declaration)
+ #
+ # Format inline enums with each member on its own line.
+ #
+ r = KernRe(r'(enum\s+\{[^\}]+),([^\n])')
+ while r.search(declaration):
+ declaration = r.sub(r'\1,\n\2', declaration)
+ #
+ # Now go through and supply the right number of tabs
+ # for each line.
+ #
+ def_args = declaration.split('\n')
+ level = 1
+ declaration = ""
+ for clause in def_args:
+ clause = KernRe(r'\s+').sub(' ', clause.strip(), count=1)
+ if clause:
+ if '}' in clause and level > 1:
+ level -= 1
+ if not clause.startswith('#'):
+ declaration += "\t" * level
+ declaration += "\t" + clause + "\n"
+ if "{" in clause and "}" not in clause:
+ level += 1
+ return declaration
+
+
+ def dump_struct(self, ln, proto):
+ """
+ Store an entry for a struct or union
+ """
+ #
+ # Do the basic parse to get the pieces of the declaration.
+ #
+ struct_parts = self.split_struct_proto(proto)
+ if not struct_parts:
+ self.emit_msg(ln, f"{proto} error: Cannot parse struct or union!")
+ return
+ decl_type, declaration_name, members = struct_parts
+
+ if self.entry.identifier != declaration_name:
+ self.emit_msg(ln, f"expecting prototype for {decl_type} {self.entry.identifier}. "
+ f"Prototype was for {decl_type} {declaration_name} instead\n")
+ return
+ #
+ # Go through the list of members applying all of our transformations.
+ #
+ members = trim_private_members(members)
+ members = apply_transforms(struct_xforms, members)
+
+ nested = NestedMatch()
+ for search, sub in struct_nested_prefixes:
+ members = nested.sub(search, sub, members)
+ #
+ # Deal with embedded struct and union members, and drop enums entirely.
+ #
+ declaration = members
+ members = self.rewrite_struct_members(members)
+ members = re.sub(r'(\{[^\{\}]*\})', '', members)
+ #
+ # Output the result and we are done.
+ #
+ self.create_parameter_list(ln, decl_type, members, ';',
+ declaration_name)
+ self.check_sections(ln, declaration_name, decl_type)
+ self.output_declaration(decl_type, declaration_name,
+ definition=self.format_struct_decl(declaration),
+ purpose=self.entry.declaration_purpose)
+
+ def dump_enum(self, ln, proto):
+ """
+ Stores an enum inside self.entries array.
+ """
+ #
+ # Strip preprocessor directives. Note that this depends on the
+ # trailing semicolon we added in process_proto_type().
+ #
+ proto = KernRe(r'#\s*((define|ifdef|if)\s+|endif)[^;]*;', flags=re.S).sub('', proto)
+ #
+ # Parse out the name and members of the enum. Typedef form first.
+ #
+ r = KernRe(r'typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;')
+ if r.search(proto):
+ declaration_name = r.group(2)
+ members = trim_private_members(r.group(1))
+ #
+ # Failing that, look for a straight enum
+ #
+ else:
+ r = KernRe(r'enum\s+(\w*)\s*\{(.*)\}')
+ if r.match(proto):
+ declaration_name = r.group(1)
+ members = trim_private_members(r.group(2))
+ #
+ # OK, this isn't going to work.
+ #
+ else:
+ self.emit_msg(ln, f"{proto}: error: Cannot parse enum!")
+ return
+ #
+ # Make sure we found what we were expecting.
+ #
+ if self.entry.identifier != declaration_name:
+ if self.entry.identifier == "":
+ self.emit_msg(ln,
+ f"{proto}: wrong kernel-doc identifier on prototype")
+ else:
+ self.emit_msg(ln,
+ f"expecting prototype for enum {self.entry.identifier}. "
+ f"Prototype was for enum {declaration_name} instead")
+ return
+
+ if not declaration_name:
+ declaration_name = "(anonymous)"
+ #
+ # Parse out the name of each enum member, and verify that we
+ # have a description for it.
+ #
+ member_set = set()
+ members = KernRe(r'\([^;)]*\)').sub('', members)
+ for arg in members.split(','):
+ if not arg:
+ continue
+ arg = KernRe(r'^\s*(\w+).*').sub(r'\1', arg)
+ self.entry.parameterlist.append(arg)
+ if arg not in self.entry.parameterdescs:
+ self.entry.parameterdescs[arg] = self.undescribed
+ self.emit_msg(ln,
+ f"Enum value '{arg}' not described in enum '{declaration_name}'")
+ member_set.add(arg)
+ #
+ # Ensure that every described member actually exists in the enum.
+ #
+ for k in self.entry.parameterdescs:
+ if k not in member_set:
+ self.emit_msg(ln,
+ f"Excess enum value '@{k}' description in '{declaration_name}'")
+
+ self.output_declaration('enum', declaration_name,
+ purpose=self.entry.declaration_purpose)
+
+ def dump_declaration(self, ln, prototype):
+ """
+ Stores a data declaration inside self.entries array.
+ """
+
+ if self.entry.decl_type == "enum":
+ self.dump_enum(ln, prototype)
+ elif self.entry.decl_type == "typedef":
+ self.dump_typedef(ln, prototype)
+ elif self.entry.decl_type in ["union", "struct"]:
+ self.dump_struct(ln, prototype)
+ else:
+ # This would be a bug
+ self.emit_message(ln, f'Unknown declaration type: {self.entry.decl_type}')
+
+ def dump_function(self, ln, prototype):
+ """
+ Stores a function or function macro inside self.entries array.
+ """
+
+ found = func_macro = False
+ return_type = ''
+ decl_type = 'function'
+ #
+ # Apply the initial transformations.
+ #
+ prototype = apply_transforms(function_xforms, prototype)
+ #
+ # If we have a macro, remove the "#define" at the front.
+ #
+ new_proto = KernRe(r"^#\s*define\s+").sub("", prototype)
+ if new_proto != prototype:
+ prototype = new_proto
+ #
+ # Dispense with the simple "#define A B" case here; the key
+ # is the space after the name of the symbol being defined.
+ # NOTE that the seemingly misnamed "func_macro" indicates a
+ # macro *without* arguments.
+ #
+ r = KernRe(r'^(\w+)\s+')
+ if r.search(prototype):
+ return_type = ''
+ declaration_name = r.group(1)
+ func_macro = True
+ found = True
+
+ # Yes, this truly is vile. We are looking for:
+ # 1. Return type (may be nothing if we're looking at a macro)
+ # 2. Function name
+ # 3. Function parameters.
+ #
+ # All the while we have to watch out for function pointer parameters
+ # (which IIRC is what the two sections are for), C types (these
+ # regexps don't even start to express all the possibilities), and
+ # so on.
+ #
+ # If you mess with these regexps, it's a good idea to check that
+ # the following functions' documentation still comes out right:
+ # - parport_register_device (function pointer parameters)
+ # - atomic_set (macro)
+ # - pci_match_device, __copy_to_user (long return type)
+
+ name = r'\w+'
+ type1 = r'(?:[\w\s]+)?'
+ type2 = r'(?:[\w\s]+\*+)+'
+ #
+ # Attempt to match first on (args) with no internal parentheses; this
+ # lets us easily filter out __acquires() and other post-args stuff. If
+ # that fails, just grab the rest of the line to the last closing
+ # parenthesis.
+ #
+ proto_args = r'\(([^\(]*|.*)\)'
+ #
+ # (Except for the simple macro case) attempt to split up the prototype
+ # in the various ways we understand.
+ #
+ if not found:
+ patterns = [
+ rf'^()({name})\s*{proto_args}',
+ rf'^({type1})\s+({name})\s*{proto_args}',
+ rf'^({type2})\s*({name})\s*{proto_args}',
+ ]
+
+ for p in patterns:
+ r = KernRe(p)
+ if r.match(prototype):
+ return_type = r.group(1)
+ declaration_name = r.group(2)
+ args = r.group(3)
+ self.create_parameter_list(ln, decl_type, args, ',',
+ declaration_name)
+ found = True
+ break
+ #
+ # Parsing done; make sure that things are as we expect.
+ #
+ if not found:
+ self.emit_msg(ln,
+ f"cannot understand function prototype: '{prototype}'")
+ return
+ if self.entry.identifier != declaration_name:
+ self.emit_msg(ln, f"expecting prototype for {self.entry.identifier}(). "
+ f"Prototype was for {declaration_name}() instead")
+ return
+ self.check_sections(ln, declaration_name, "function")
+ self.check_return_section(ln, declaration_name, return_type)
+ #
+ # Store the result.
+ #
+ self.output_declaration(decl_type, declaration_name,
+ typedef=('typedef' in return_type),
+ functiontype=return_type,
+ purpose=self.entry.declaration_purpose,
+ func_macro=func_macro)
+
+
+ def dump_typedef(self, ln, proto):
+ """
+ Stores a typedef inside self.entries array.
+ """
+ #
+ # We start by looking for function typedefs.
+ #
+ typedef_type = r'typedef((?:\s+[\w*]+\b){0,7}\s+(?:\w+\b|\*+))\s*'
+ typedef_ident = r'\*?\s*(\w\S+)\s*'
+ typedef_args = r'\s*\((.*)\);'
+
+ typedef1 = KernRe(typedef_type + r'\(' + typedef_ident + r'\)' + typedef_args)
+ typedef2 = KernRe(typedef_type + typedef_ident + typedef_args)
+
+ # Parse function typedef prototypes
+ for r in [typedef1, typedef2]:
+ if not r.match(proto):
+ continue
+
+ return_type = r.group(1).strip()
+ declaration_name = r.group(2)
+ args = r.group(3)
+
+ if self.entry.identifier != declaration_name:
+ self.emit_msg(ln,
+ f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n")
+ return
+
+ self.create_parameter_list(ln, 'function', args, ',', declaration_name)
+
+ self.output_declaration('function', declaration_name,
+ typedef=True,
+ functiontype=return_type,
+ purpose=self.entry.declaration_purpose)
+ return
+ #
+ # Not a function, try to parse a simple typedef.
+ #
+ r = KernRe(r'typedef.*\s+(\w+)\s*;')
+ if r.match(proto):
+ declaration_name = r.group(1)
+
+ if self.entry.identifier != declaration_name:
+ self.emit_msg(ln,
+ f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n")
+ return
+
+ self.output_declaration('typedef', declaration_name,
+ purpose=self.entry.declaration_purpose)
+ return
+
+ self.emit_msg(ln, "error: Cannot parse typedef!")
+
+ @staticmethod
+ def process_export(function_set, line):
+ """
+ process EXPORT_SYMBOL* tags
+
+ This method doesn't use any variable from the class, so declare it
+ with a staticmethod decorator.
+ """
+
+ # We support documenting some exported symbols with different
+ # names. A horrible hack.
+ suffixes = [ '_noprof' ]
+
+ # Note: it accepts only one EXPORT_SYMBOL* per line, as having
+ # multiple export lines would violate Kernel coding style.
+
+ if export_symbol.search(line):
+ symbol = export_symbol.group(2)
+ elif export_symbol_ns.search(line):
+ symbol = export_symbol_ns.group(2)
+ else:
+ return False
+ #
+ # Found an export, trim out any special suffixes
+ #
+ for suffix in suffixes:
+ # Be backward compatible with Python < 3.9
+ if symbol.endswith(suffix):
+ symbol = symbol[:-len(suffix)]
+ function_set.add(symbol)
+ return True
+
+ def process_normal(self, ln, line):
+ """
+ STATE_NORMAL: looking for the /** to begin everything.
+ """
+
+ if not doc_start.match(line):
+ return
+
+ # start a new entry
+ self.reset_state(ln)
+
+ # next line is always the function name
+ self.state = state.NAME
+
+ def process_name(self, ln, line):
+ """
+ STATE_NAME: Looking for the "name - description" line
+ """
+ #
+ # Check for a DOC: block and handle them specially.
+ #
+ if doc_block.search(line):
+
+ if not doc_block.group(1):
+ self.entry.begin_section(ln, "Introduction")
+ else:
+ self.entry.begin_section(ln, doc_block.group(1))
+
+ self.entry.identifier = self.entry.section
+ self.state = state.DOCBLOCK
+ #
+ # Otherwise we're looking for a normal kerneldoc declaration line.
+ #
+ elif doc_decl.search(line):
+ self.entry.identifier = doc_decl.group(1)
+
+ # Test for data declaration
+ if doc_begin_data.search(line):
+ self.entry.decl_type = doc_begin_data.group(1)
+ self.entry.identifier = doc_begin_data.group(2)
+ #
+ # Look for a function description
+ #
+ elif doc_begin_func.search(line):
+ self.entry.identifier = doc_begin_func.group(1)
+ self.entry.decl_type = "function"
+ #
+ # We struck out.
+ #
+ else:
+ self.emit_msg(ln,
+ f"This comment starts with '/**', but isn't a kernel-doc comment. Refer to Documentation/doc-guide/kernel-doc.rst\n{line}")
+ self.state = state.NORMAL
+ return
+ #
+ # OK, set up for a new kerneldoc entry.
+ #
+ self.state = state.BODY
+ self.entry.identifier = self.entry.identifier.strip(" ")
+ # if there's no @param blocks need to set up default section here
+ self.entry.begin_section(ln + 1)
+ #
+ # Find the description portion, which *should* be there but
+ # isn't always.
+ # (We should be able to capture this from the previous parsing - someday)
+ #
+ r = KernRe("[-:](.*)")
+ if r.search(line):
+ self.entry.declaration_purpose = trim_whitespace(r.group(1))
+ self.state = state.DECLARATION
+ else:
+ self.entry.declaration_purpose = ""
+
+ if not self.entry.declaration_purpose and self.config.wshort_desc:
+ self.emit_msg(ln,
+ f"missing initial short description on line:\n{line}")
+
+ if not self.entry.identifier and self.entry.decl_type != "enum":
+ self.emit_msg(ln,
+ f"wrong kernel-doc identifier on line:\n{line}")
+ self.state = state.NORMAL
+
+ if self.config.verbose:
+ self.emit_msg(ln,
+ f"Scanning doc for {self.entry.decl_type} {self.entry.identifier}",
+ warning=False)
+ #
+ # Failed to find an identifier. Emit a warning
+ #
+ else:
+ self.emit_msg(ln, f"Cannot find identifier on line:\n{line}")
+
+ #
+ # Helper function to determine if a new section is being started.
+ #
+ def is_new_section(self, ln, line):
+ if doc_sect.search(line):
+ self.state = state.BODY
+ #
+ # Pick out the name of our new section, tweaking it if need be.
+ #
+ newsection = doc_sect.group(1)
+ if newsection.lower() == 'description':
+ newsection = 'Description'
+ elif newsection.lower() == 'context':
+ newsection = 'Context'
+ self.state = state.SPECIAL_SECTION
+ elif newsection.lower() in ["@return", "@returns",
+ "return", "returns"]:
+ newsection = "Return"
+ self.state = state.SPECIAL_SECTION
+ elif newsection[0] == '@':
+ self.state = state.SPECIAL_SECTION
+ #
+ # Initialize the contents, and get the new section going.
+ #
+ newcontents = doc_sect.group(2)
+ if not newcontents:
+ newcontents = ""
+ self.dump_section()
+ self.entry.begin_section(ln, newsection)
+ self.entry.leading_space = None
+
+ self.entry.add_text(newcontents.lstrip())
+ return True
+ return False
+
+ #
+ # Helper function to detect (and effect) the end of a kerneldoc comment.
+ #
+ def is_comment_end(self, ln, line):
+ if doc_end.search(line):
+ self.dump_section()
+
+ # Look for doc_com + <text> + doc_end:
+ r = KernRe(r'\s*\*\s*[a-zA-Z_0-9:.]+\*/')
+ if r.match(line):
+ self.emit_msg(ln, f"suspicious ending line: {line}")
+
+ self.entry.prototype = ""
+ self.entry.new_start_line = ln + 1
+
+ self.state = state.PROTO
+ return True
+ return False
+
+
+ def process_decl(self, ln, line):
+ """
+ STATE_DECLARATION: We've seen the beginning of a declaration
+ """
+ if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
+ return
+ #
+ # Look for anything with the " * " line beginning.
+ #
+ if doc_content.search(line):
+ cont = doc_content.group(1)
+ #
+ # A blank line means that we have moved out of the declaration
+ # part of the comment (without any "special section" parameter
+ # descriptions).
+ #
+ if cont == "":
+ self.state = state.BODY
+ #
+ # Otherwise we have more of the declaration section to soak up.
+ #
+ else:
+ self.entry.declaration_purpose = \
+ trim_whitespace(self.entry.declaration_purpose + ' ' + cont)
+ else:
+ # Unknown line, ignore
+ self.emit_msg(ln, f"bad line: {line}")
+
+
+ def process_special(self, ln, line):
+ """
+ STATE_SPECIAL_SECTION: a section ending with a blank line
+ """
+ #
+ # If we have hit a blank line (only the " * " marker), then this
+ # section is done.
+ #
+ if KernRe(r"\s*\*\s*$").match(line):
+ self.entry.begin_section(ln, dump = True)
+ self.state = state.BODY
+ return
+ #
+ # Not a blank line, look for the other ways to end the section.
+ #
+ if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
+ return
+ #
+ # OK, we should have a continuation of the text for this section.
+ #
+ if doc_content.search(line):
+ cont = doc_content.group(1)
+ #
+ # If the lines of text after the first in a special section have
+ # leading white space, we need to trim it out or Sphinx will get
+ # confused. For the second line (the None case), see what we
+ # find there and remember it.
+ #
+ if self.entry.leading_space is None:
+ r = KernRe(r'^(\s+)')
+ if r.match(cont):
+ self.entry.leading_space = len(r.group(1))
+ else:
+ self.entry.leading_space = 0
+ #
+ # Otherwise, before trimming any leading chars, be *sure*
+ # that they are white space. We should maybe warn if this
+ # isn't the case.
+ #
+ for i in range(0, self.entry.leading_space):
+ if cont[i] != " ":
+ self.entry.leading_space = i
+ break
+ #
+ # Add the trimmed result to the section and we're done.
+ #
+ self.entry.add_text(cont[self.entry.leading_space:])
+ else:
+ # Unknown line, ignore
+ self.emit_msg(ln, f"bad line: {line}")
+
+ def process_body(self, ln, line):
+ """
+ STATE_BODY: the bulk of a kerneldoc comment.
+ """
+ if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
+ return
+
+ if doc_content.search(line):
+ cont = doc_content.group(1)
+ self.entry.add_text(cont)
+ else:
+ # Unknown line, ignore
+ self.emit_msg(ln, f"bad line: {line}")
+
+ def process_inline_name(self, ln, line):
+ """STATE_INLINE_NAME: beginning of docbook comments within a prototype."""
+
+ if doc_inline_sect.search(line):
+ self.entry.begin_section(ln, doc_inline_sect.group(1))
+ self.entry.add_text(doc_inline_sect.group(2).lstrip())
+ self.state = state.INLINE_TEXT
+ elif doc_inline_end.search(line):
+ self.dump_section()
+ self.state = state.PROTO
+ elif doc_content.search(line):
+ self.emit_msg(ln, f"Incorrect use of kernel-doc format: {line}")
+ self.state = state.PROTO
+ # else ... ??
+
+ def process_inline_text(self, ln, line):
+ """STATE_INLINE_TEXT: docbook comments within a prototype."""
+
+ if doc_inline_end.search(line):
+ self.dump_section()
+ self.state = state.PROTO
+ elif doc_content.search(line):
+ self.entry.add_text(doc_content.group(1))
+ # else ... ??
+
+ def syscall_munge(self, ln, proto): # pylint: disable=W0613
+ """
+ Handle syscall definitions
+ """
+
+ is_void = False
+
+ # Strip newlines/CR's
+ proto = re.sub(r'[\r\n]+', ' ', proto)
+
+ # Check if it's a SYSCALL_DEFINE0
+ if 'SYSCALL_DEFINE0' in proto:
+ is_void = True
+
+ # Replace SYSCALL_DEFINE with correct return type & function name
+ proto = KernRe(r'SYSCALL_DEFINE.*\(').sub('long sys_', proto)
+
+ r = KernRe(r'long\s+(sys_.*?),')
+ if r.search(proto):
+ proto = KernRe(',').sub('(', proto, count=1)
+ elif is_void:
+ proto = KernRe(r'\)').sub('(void)', proto, count=1)
+
+ # Now delete all of the odd-numbered commas in the proto
+ # so that argument types & names don't have a comma between them
+ count = 0
+ length = len(proto)
+
+ if is_void:
+ length = 0 # skip the loop if is_void
+
+ for ix in range(length):
+ if proto[ix] == ',':
+ count += 1
+ if count % 2 == 1:
+ proto = proto[:ix] + ' ' + proto[ix + 1:]
+
+ return proto
+
+ def tracepoint_munge(self, ln, proto):
+ """
+ Handle tracepoint definitions
+ """
+
+ tracepointname = None
+ tracepointargs = None
+
+ # Match tracepoint name based on different patterns
+ r = KernRe(r'TRACE_EVENT\((.*?),')
+ if r.search(proto):
+ tracepointname = r.group(1)
+
+ r = KernRe(r'DEFINE_SINGLE_EVENT\((.*?),')
+ if r.search(proto):
+ tracepointname = r.group(1)
+
+ r = KernRe(r'DEFINE_EVENT\((.*?),(.*?),')
+ if r.search(proto):
+ tracepointname = r.group(2)
+
+ if tracepointname:
+ tracepointname = tracepointname.lstrip()
+
+ r = KernRe(r'TP_PROTO\((.*?)\)')
+ if r.search(proto):
+ tracepointargs = r.group(1)
+
+ if not tracepointname or not tracepointargs:
+ self.emit_msg(ln,
+ f"Unrecognized tracepoint format:\n{proto}\n")
+ else:
+ proto = f"static inline void trace_{tracepointname}({tracepointargs})"
+ self.entry.identifier = f"trace_{self.entry.identifier}"
+
+ return proto
+
+ def process_proto_function(self, ln, line):
+ """Ancillary routine to process a function prototype"""
+
+ # strip C99-style comments to end of line
+ line = KernRe(r"//.*$", re.S).sub('', line)
+ #
+ # Soak up the line's worth of prototype text, stopping at { or ; if present.
+ #
+ if KernRe(r'\s*#\s*define').match(line):
+ self.entry.prototype = line
+ elif not line.startswith('#'): # skip other preprocessor stuff
+ r = KernRe(r'([^\{]*)')
+ if r.match(line):
+ self.entry.prototype += r.group(1) + " "
+ #
+ # If we now have the whole prototype, clean it up and declare victory.
+ #
+ if '{' in line or ';' in line or KernRe(r'\s*#\s*define').match(line):
+ # strip comments and surrounding spaces
+ self.entry.prototype = KernRe(r'/\*.*\*/').sub('', self.entry.prototype).strip()
+ #
+ # Handle self.entry.prototypes for function pointers like:
+ # int (*pcs_config)(struct foo)
+ # by turning it into
+ # int pcs_config(struct foo)
+ #
+ r = KernRe(r'^(\S+\s+)\(\s*\*(\S+)\)')
+ self.entry.prototype = r.sub(r'\1\2', self.entry.prototype)
+ #
+ # Handle special declaration syntaxes
+ #
+ if 'SYSCALL_DEFINE' in self.entry.prototype:
+ self.entry.prototype = self.syscall_munge(ln,
+ self.entry.prototype)
+ else:
+ r = KernRe(r'TRACE_EVENT|DEFINE_EVENT|DEFINE_SINGLE_EVENT')
+ if r.search(self.entry.prototype):
+ self.entry.prototype = self.tracepoint_munge(ln,
+ self.entry.prototype)
+ #
+ # ... and we're done
+ #
+ self.dump_function(ln, self.entry.prototype)
+ self.reset_state(ln)
+
+ def process_proto_type(self, ln, line):
+ """Ancillary routine to process a type"""
+
+ # Strip C99-style comments and surrounding whitespace
+ line = KernRe(r"//.*$", re.S).sub('', line).strip()
+ if not line:
+ return # nothing to see here
+
+ # To distinguish preprocessor directive from regular declaration later.
+ if line.startswith('#'):
+ line += ";"
+ #
+ # Split the declaration on any of { } or ;, and accumulate pieces
+ # until we hit a semicolon while not inside {brackets}
+ #
+ r = KernRe(r'(.*?)([{};])')
+ for chunk in r.split(line):
+ if chunk: # Ignore empty matches
+ self.entry.prototype += chunk
+ #
+ # This cries out for a match statement ... someday after we can
+ # drop Python 3.9 ...
+ #
+ if chunk == '{':
+ self.entry.brcount += 1
+ elif chunk == '}':
+ self.entry.brcount -= 1
+ elif chunk == ';' and self.entry.brcount <= 0:
+ self.dump_declaration(ln, self.entry.prototype)
+ self.reset_state(ln)
+ return
+ #
+ # We hit the end of the line while still in the declaration; put
+ # in a space to represent the newline.
+ #
+ self.entry.prototype += ' '
+
+ def process_proto(self, ln, line):
+ """STATE_PROTO: reading a function/whatever prototype."""
+
+ if doc_inline_oneline.search(line):
+ self.entry.begin_section(ln, doc_inline_oneline.group(1))
+ self.entry.add_text(doc_inline_oneline.group(2))
+ self.dump_section()
+
+ elif doc_inline_start.search(line):
+ self.state = state.INLINE_NAME
+
+ elif self.entry.decl_type == 'function':
+ self.process_proto_function(ln, line)
+
+ else:
+ self.process_proto_type(ln, line)
+
+ def process_docblock(self, ln, line):
+ """STATE_DOCBLOCK: within a DOC: block."""
+
+ if doc_end.search(line):
+ self.dump_section()
+ self.output_declaration("doc", self.entry.identifier)
+ self.reset_state(ln)
+
+ elif doc_content.search(line):
+ self.entry.add_text(doc_content.group(1))
+
+ def parse_export(self):
+ """
+ Parses EXPORT_SYMBOL* macros from a single Kernel source file.
+ """
+
+ export_table = set()
+
+ try:
+ with open(self.fname, "r", encoding="utf8",
+ errors="backslashreplace") as fp:
+
+ for line in fp:
+ self.process_export(export_table, line)
+
+ except IOError:
+ return None
+
+ return export_table
+
+ #
+ # The state/action table telling us which function to invoke in
+ # each state.
+ #
+ state_actions = {
+ state.NORMAL: process_normal,
+ state.NAME: process_name,
+ state.BODY: process_body,
+ state.DECLARATION: process_decl,
+ state.SPECIAL_SECTION: process_special,
+ state.INLINE_NAME: process_inline_name,
+ state.INLINE_TEXT: process_inline_text,
+ state.PROTO: process_proto,
+ state.DOCBLOCK: process_docblock,
+ }
+
+ def parse_kdoc(self):
+ """
+ Open and process each line of a C source file.
+ The parsing is controlled via a state machine, and the line is passed
+ to a different process function depending on the state. The process
+ function may update the state as needed.
+
+ Besides parsing kernel-doc tags, it also parses export symbols.
+ """
+
+ prev = ""
+ prev_ln = None
+ export_table = set()
+
+ try:
+ with open(self.fname, "r", encoding="utf8",
+ errors="backslashreplace") as fp:
+ for ln, line in enumerate(fp):
+
+ line = line.expandtabs().strip("\n")
+
+ # Group continuation lines on prototypes
+ if self.state == state.PROTO:
+ if line.endswith("\\"):
+ prev += line.rstrip("\\")
+ if not prev_ln:
+ prev_ln = ln
+ continue
+
+ if prev:
+ ln = prev_ln
+ line = prev + line
+ prev = ""
+ prev_ln = None
+
+ self.config.log.debug("%d %s: %s",
+ ln, state.name[self.state],
+ line)
+
+ # This is an optimization over the original script.
+ # There, when export_file was used for the same file,
+ # it was read twice. Here, we use the already-existing
+ # loop to parse exported symbols as well.
+ #
+ if (self.state != state.NORMAL) or \
+ not self.process_export(export_table, line):
+ # Hand this line to the appropriate state handler
+ self.state_actions[self.state](self, ln, line)
+
+ except OSError:
+ self.config.log.error(f"Error: Cannot open file {self.fname}")
+
+ return export_table, self.entries
diff --git a/tools/lib/python/kdoc/kdoc_re.py b/tools/lib/python/kdoc/kdoc_re.py
new file mode 100644
index 000000000000..2dfa1bf83d64
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_re.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+
+"""
+Regular expression ancillary classes.
+
+Those help caching regular expressions and do matching for kernel-doc.
+"""
+
+import re
+
+# Local cache for regular expressions
+re_cache = {}
+
+
+class KernRe:
+ """
+ Helper class to simplify regex declaration and usage.
+
+ It calls re.compile for a given pattern. It also allows adding
+ regular expressions and define sub at class init time.
+
+ Regular expressions can be cached via an argument, helping to speedup
+ searches.
+ """
+
+ def _add_regex(self, string, flags):
+ """
+ Adds a new regex or reuses it from the cache.
+ """
+ self.regex = re_cache.get(string, None)
+ if not self.regex:
+ self.regex = re.compile(string, flags=flags)
+ if self.cache:
+ re_cache[string] = self.regex
+
+ def __init__(self, string, cache=True, flags=0):
+ """
+ Compile a regular expression and initialize internal vars.
+ """
+
+ self.cache = cache
+ self.last_match = None
+
+ self._add_regex(string, flags)
+
+ def __str__(self):
+ """
+ Return the regular expression pattern.
+ """
+ return self.regex.pattern
+
+ def __add__(self, other):
+ """
+ Allows adding two regular expressions into one.
+ """
+
+ return KernRe(str(self) + str(other), cache=self.cache or other.cache,
+ flags=self.regex.flags | other.regex.flags)
+
+ def match(self, string):
+ """
+ Handles a re.match storing its results
+ """
+
+ self.last_match = self.regex.match(string)
+ return self.last_match
+
+ def search(self, string):
+ """
+ Handles a re.search storing its results
+ """
+
+ self.last_match = self.regex.search(string)
+ return self.last_match
+
+ def findall(self, string):
+ """
+ Alias to re.findall
+ """
+
+ return self.regex.findall(string)
+
+ def split(self, string):
+ """
+ Alias to re.split
+ """
+
+ return self.regex.split(string)
+
+ def sub(self, sub, string, count=0):
+ """
+ Alias to re.sub
+ """
+
+ return self.regex.sub(sub, string, count=count)
+
+ def group(self, num):
+ """
+ Returns the group results of the last match
+ """
+
+ return self.last_match.group(num)
+
+
+class NestedMatch:
+ """
+ Finding nested delimiters is hard with regular expressions. It is
+ even harder on Python with its normal re module, as there are several
+ advanced regular expressions that are missing.
+
+ This is the case of this pattern:
+
+ '\\bSTRUCT_GROUP(\\(((?:(?>[^)(]+)|(?1))*)\\))[^;]*;'
+
+ which is used to properly match open/close parentheses of the
+ string search STRUCT_GROUP(),
+
+ Add a class that counts pairs of delimiters, using it to match and
+ replace nested expressions.
+
+ The original approach was suggested by:
+ https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
+
+ Although I re-implemented it to make it more generic and match 3 types
+ of delimiters. The logic checks if delimiters are paired. If not, it
+ will ignore the search string.
+ """
+
+ # TODO: make NestedMatch handle multiple match groups
+ #
+ # Right now, regular expressions to match it are defined only up to
+ # the start delimiter, e.g.:
+ #
+ # \bSTRUCT_GROUP\(
+ #
+ # is similar to: STRUCT_GROUP\((.*)\)
+ # except that the content inside the match group is delimiter-aligned.
+ #
+ # The content inside parentheses is converted into a single replace
+ # group (e.g. r`\1').
+ #
+ # It would be nice to change such definition to support multiple
+ # match groups, allowing a regex equivalent to:
+ #
+ # FOO\((.*), (.*), (.*)\)
+ #
+ # it is probably easier to define it not as a regular expression, but
+ # with some lexical definition like:
+ #
+ # FOO(arg1, arg2, arg3)
+
+ DELIMITER_PAIRS = {
+ '{': '}',
+ '(': ')',
+ '[': ']',
+ }
+
+ RE_DELIM = re.compile(r'[\{\}\[\]\(\)]')
+
+ def _search(self, regex, line):
+ """
+ Finds paired blocks for a regex that ends with a delimiter.
+
+ The suggestion of using finditer to match pairs came from:
+ https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
+ but I ended using a different implementation to align all three types
+ of delimiters and seek for an initial regular expression.
+
+ The algorithm seeks for open/close paired delimiters and places them
+ into a stack, yielding a start/stop position of each match when the
+ stack is zeroed.
+
+ The algorithm should work fine for properly paired lines, but will
+ silently ignore end delimiters that precede a start delimiter.
+ This should be OK for kernel-doc parser, as unaligned delimiters
+ would cause compilation errors. So, we don't need to raise exceptions
+ to cover such issues.
+ """
+
+ stack = []
+
+ for match_re in regex.finditer(line):
+ start = match_re.start()
+ offset = match_re.end()
+
+ d = line[offset - 1]
+ if d not in self.DELIMITER_PAIRS:
+ continue
+
+ end = self.DELIMITER_PAIRS[d]
+ stack.append(end)
+
+ for match in self.RE_DELIM.finditer(line[offset:]):
+ pos = match.start() + offset
+
+ d = line[pos]
+
+ if d in self.DELIMITER_PAIRS:
+ end = self.DELIMITER_PAIRS[d]
+
+ stack.append(end)
+ continue
+
+ # Does the end delimiter match what is expected?
+ if stack and d == stack[-1]:
+ stack.pop()
+
+ if not stack:
+ yield start, offset, pos + 1
+ break
+
+ def search(self, regex, line):
+ """
+ This is similar to re.search:
+
+ It matches a regex that it is followed by a delimiter,
+ returning occurrences only if all delimiters are paired.
+ """
+
+ for t in self._search(regex, line):
+
+ yield line[t[0]:t[2]]
+
+ def sub(self, regex, sub, line, count=0):
+ """
+ This is similar to re.sub:
+
+ It matches a regex that it is followed by a delimiter,
+ replacing occurrences only if all delimiters are paired.
+
+ if r'\1' is used, it works just like re: it places there the
+ matched paired data with the delimiter stripped.
+
+ If count is different than zero, it will replace at most count
+ items.
+ """
+ out = ""
+
+ cur_pos = 0
+ n = 0
+
+ for start, end, pos in self._search(regex, line):
+ out += line[cur_pos:start]
+
+ # Value, ignoring start/end delimiters
+ value = line[end:pos - 1]
+
+ # replaces \1 at the sub string, if \1 is used there
+ new_sub = sub
+ new_sub = new_sub.replace(r'\1', value)
+
+ out += new_sub
+
+ # Drop end ';' if any
+ if line[pos] == ';':
+ pos += 1
+
+ cur_pos = pos
+ n += 1
+
+ if count and count >= n:
+ break
+
+ # Append the remaining string
+ l = len(line)
+ out += line[cur_pos:l]
+
+ return out
diff --git a/tools/lib/python/kdoc/latex_fonts.py b/tools/lib/python/kdoc/latex_fonts.py
new file mode 100755
index 000000000000..29317f8006ea
--- /dev/null
+++ b/tools/lib/python/kdoc/latex_fonts.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) Akira Yokosawa, 2024
+#
+# Ported to Python by (c) Mauro Carvalho Chehab, 2025
+
+"""
+Detect problematic Noto CJK variable fonts.
+
+For "make pdfdocs", reports of build errors of translations.pdf started
+arriving early 2024 [1, 2]. It turned out that Fedora and openSUSE
+tumbleweed have started deploying variable-font [3] format of "Noto CJK"
+fonts [4, 5]. For PDF, a LaTeX package named xeCJK is used for CJK
+(Chinese, Japanese, Korean) pages. xeCJK requires XeLaTeX/XeTeX, which
+does not (and likely never will) understand variable fonts for historical
+reasons.
+
+The build error happens even when both of variable- and non-variable-format
+fonts are found on the build system. To make matters worse, Fedora enlists
+variable "Noto CJK" fonts in the requirements of langpacks-ja, -ko, -zh_CN,
+-zh_TW, etc. Hence developers who have interest in CJK pages are more
+likely to encounter the build errors.
+
+This script is invoked from the error path of "make pdfdocs" and emits
+suggestions if variable-font files of "Noto CJK" fonts are in the list of
+fonts accessible from XeTeX.
+
+References:
+[1]: https://lore.kernel.org/r/8734tqsrt7.fsf@meer.lwn.net/
+[2]: https://lore.kernel.org/r/1708585803.600323099@f111.i.mail.ru/
+[3]: https://en.wikipedia.org/wiki/Variable_font
+[4]: https://fedoraproject.org/wiki/Changes/Noto_CJK_Variable_Fonts
+[5]: https://build.opensuse.org/request/show/1157217
+
+#===========================================================================
+Workarounds for building translations.pdf
+#===========================================================================
+
+* Denylist "variable font" Noto CJK fonts.
+ - Create $HOME/deny-vf/fontconfig/fonts.conf from template below, with
+ tweaks if necessary. Remove leading "".
+ - Path of fontconfig/fonts.conf can be overridden by setting an env
+ variable FONTS_CONF_DENY_VF.
+
+ * Template:
+-----------------------------------------------------------------
+<?xml version="1.0"?>
+<!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd">
+<fontconfig>
+<!--
+ Ignore variable-font glob (not to break xetex)
+-->
+ <selectfont>
+ <rejectfont>
+ <!--
+ for Fedora
+ -->
+ <glob>/usr/share/fonts/google-noto-*-cjk-vf-fonts</glob>
+ <!--
+ for openSUSE tumbleweed
+ -->
+ <glob>/usr/share/fonts/truetype/Noto*CJK*-VF.otf</glob>
+ </rejectfont>
+ </selectfont>
+</fontconfig>
+-----------------------------------------------------------------
+
+ The denylisting is activated for "make pdfdocs".
+
+* For skipping CJK pages in PDF
+ - Uninstall texlive-xecjk.
+ Denylisting is not needed in this case.
+
+* For printing CJK pages in PDF
+ - Need non-variable "Noto CJK" fonts.
+ * Fedora
+ - google-noto-sans-cjk-fonts
+ - google-noto-serif-cjk-fonts
+ * openSUSE tumbleweed
+ - Non-variable "Noto CJK" fonts are not available as distro packages
+ as of April, 2024. Fetch a set of font files from upstream Noto
+ CJK Font released at:
+ https://github.com/notofonts/noto-cjk/tree/main/Sans#super-otc
+ and at:
+ https://github.com/notofonts/noto-cjk/tree/main/Serif#super-otc
+ , then uncompress and deploy them.
+ - Remember to update fontconfig cache by running fc-cache.
+
+!!! Caution !!!
+ Uninstalling "variable font" packages can be dangerous.
+ They might be depended upon by other packages important for your work.
+ Denylisting should be less invasive, as it is effective only while
+ XeLaTeX runs in "make pdfdocs".
+"""
+
+import os
+import re
+import subprocess
+import textwrap
+import sys
+
+class LatexFontChecker:
+ """
+ Detect problems with CJK variable fonts that affect PDF builds for
+ translations.
+ """
+
+ def __init__(self, deny_vf=None):
+ if not deny_vf:
+ deny_vf = os.environ.get('FONTS_CONF_DENY_VF', "~/deny-vf")
+
+ self.environ = os.environ.copy()
+ self.environ['XDG_CONFIG_HOME'] = os.path.expanduser(deny_vf)
+
+ self.re_cjk = re.compile(r"([^:]+):\s*Noto\s+(Sans|Sans Mono|Serif) CJK")
+
+ def description(self):
+ return __doc__
+
+ def get_noto_cjk_vf_fonts(self):
+ """Get Noto CJK fonts"""
+
+ cjk_fonts = set()
+ cmd = ["fc-list", ":", "file", "family", "variable"]
+ try:
+ result = subprocess.run(cmd,stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ env=self.environ,
+ check=True)
+
+ except subprocess.CalledProcessError as exc:
+ sys.exit(f"Error running fc-list: {repr(exc)}")
+
+ for line in result.stdout.splitlines():
+ if 'variable=True' not in line:
+ continue
+
+ match = self.re_cjk.search(line)
+ if match:
+ cjk_fonts.add(match.group(1))
+
+ return sorted(cjk_fonts)
+
+ def check(self):
+ """Check for problems with CJK fonts"""
+
+ fonts = textwrap.indent("\n".join(self.get_noto_cjk_vf_fonts()), " ")
+ if not fonts:
+ return None
+
+ rel_file = os.path.relpath(__file__, os.getcwd())
+
+ msg = "=" * 77 + "\n"
+ msg += 'XeTeX is confused by "variable font" files listed below:\n'
+ msg += fonts + "\n"
+ msg += textwrap.dedent(f"""
+ For CJK pages in PDF, they need to be hidden from XeTeX by denylisting.
+ Or, CJK pages can be skipped by uninstalling texlive-xecjk.
+
+ For more info on denylisting, other options, and variable font, run:
+
+ tools/docs/check-variable-fonts.py -h
+ """)
+ msg += "=" * 77
+
+ return msg
diff --git a/tools/docs/lib/parse_data_structs.py b/tools/lib/python/kdoc/parse_data_structs.py
index a5aa2e182052..25361996cd20 100755
--- a/tools/docs/lib/parse_data_structs.py
+++ b/tools/lib/python/kdoc/parse_data_structs.py
@@ -53,11 +53,19 @@ class ParseDataStructs:
replace <type> <old_symbol> <new_reference>
- Replaces how old_symbol with a new reference. The new_reference can be:
+ Replaces how old_symbol with a new reference. The new_reference can be:
+
- A simple symbol name;
- A full Sphinx reference.
- On both cases, <type> can be:
+ 3. Namespace rules
+
+ namespace <namespace>
+
+ Sets C namespace to be used during cross-reference generation. Can
+ be overridden by replace rules.
+
+ On ignore and replace rules, <type> can be:
- ioctl: for defines that end with _IO*, e.g. ioctl definitions
- define: for other defines
- symbol: for symbols defined within enums;
@@ -71,6 +79,8 @@ class ParseDataStructs:
ignore ioctl VIDIOC_ENUM_FMT
replace ioctl VIDIOC_DQBUF vidioc_qbuf
replace define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ :c:type:`v4l2_event_motion_det`
+
+ namespace MC
"""
# Parser regexes with multiple ways to capture enums and structs
@@ -140,10 +150,96 @@ class ParseDataStructs:
self.symbols = {}
+ self.namespace = None
+ self.ignore = []
+ self.replace = []
+
for symbol_type in self.DEF_SYMBOL_TYPES:
self.symbols[symbol_type] = {}
- def store_type(self, symbol_type: str, symbol: str,
+ def read_exceptions(self, fname: str):
+ if not fname:
+ return
+
+ name = os.path.basename(fname)
+
+ with open(fname, "r", encoding="utf-8", errors="backslashreplace") as f:
+ for ln, line in enumerate(f):
+ ln += 1
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+
+ # ignore rules
+ match = re.match(r"^ignore\s+(\w+)\s+(\S+)", line)
+
+ if match:
+ self.ignore.append((ln, match.group(1), match.group(2)))
+ continue
+
+ # replace rules
+ match = re.match(r"^replace\s+(\S+)\s+(\S+)\s+(\S+)", line)
+ if match:
+ self.replace.append((ln, match.group(1), match.group(2),
+ match.group(3)))
+ continue
+
+ match = re.match(r"^namespace\s+(\S+)", line)
+ if match:
+ self.namespace = match.group(1)
+ continue
+
+ sys.exit(f"{name}:{ln}: invalid line: {line}")
+
+ def apply_exceptions(self):
+ """
+ Process exceptions file with rules to ignore or replace references.
+ """
+
+ # Handle ignore rules
+ for ln, c_type, symbol in self.ignore:
+ if c_type not in self.DEF_SYMBOL_TYPES:
+ sys.exit(f"{name}:{ln}: {c_type} is invalid")
+
+ d = self.symbols[c_type]
+ if symbol in d:
+ del d[symbol]
+
+ # Handle replace rules
+ for ln, c_type, old, new in self.replace:
+ if c_type not in self.DEF_SYMBOL_TYPES:
+ sys.exit(f"{name}:{ln}: {c_type} is invalid")
+
+ reftype = None
+
+ # Parse reference type when the type is specified
+
+ match = re.match(r"^\:c\:(\w+)\:\`(.+)\`", new)
+ if match:
+ reftype = f":c:{match.group(1)}"
+ new = match.group(2)
+ else:
+ match = re.search(r"(\:ref)\:\`(.+)\`", new)
+ if match:
+ reftype = match.group(1)
+ new = match.group(2)
+
+ # If the replacement rule doesn't have a type, get default
+ if not reftype:
+ reftype = self.DEF_SYMBOL_TYPES[c_type].get("ref_type")
+ if not reftype:
+ reftype = self.DEF_SYMBOL_TYPES[c_type].get("real_type")
+
+ new_ref = f"{reftype}:`{old} <{new}>`"
+
+ # Change self.symbols to use the replacement rule
+ if old in self.symbols[c_type]:
+ (_, ln) = self.symbols[c_type][old]
+ self.symbols[c_type][old] = (new_ref, ln)
+ else:
+ print(f"{name}:{ln}: Warning: can't find {old} {c_type}")
+
+ def store_type(self, ln, symbol_type: str, symbol: str,
ref_name: str = None, replace_underscores: bool = True):
"""
Stores a new symbol at self.symbols under symbol_type.
@@ -157,35 +253,42 @@ class ParseDataStructs:
ref_type = defs.get("ref_type")
# Determine ref_link based on symbol type
- if ref_type:
- if symbol_type == "enum":
- ref_link = f"{ref_type}:`{symbol}`"
- else:
- if not ref_name:
- ref_name = symbol.lower()
+ if ref_type or self.namespace:
+ if not ref_name:
+ ref_name = symbol.lower()
+
+ # c-type references don't support hash
+ if ref_type == ":ref" and replace_underscores:
+ ref_name = ref_name.replace("_", "-")
- # c-type references don't support hash
- if ref_type == ":ref" and replace_underscores:
- ref_name = ref_name.replace("_", "-")
+ # C domain references may have namespaces
+ if ref_type.startswith(":c:"):
+ if self.namespace:
+ ref_name = f"{self.namespace}.{ref_name}"
+ if ref_type:
ref_link = f"{ref_type}:`{symbol} <{ref_name}>`"
+ else:
+ ref_link = f"`{symbol} <{ref_name}>`"
else:
ref_link = symbol
- self.symbols[symbol_type][symbol] = f"{prefix}{ref_link}{suffix}"
+ self.symbols[symbol_type][symbol] = (f"{prefix}{ref_link}{suffix}", ln)
def store_line(self, line):
"""Stores a line at self.data, properly indented"""
line = " " + line.expandtabs()
self.data += line.rstrip(" ")
- def parse_file(self, file_in: str):
+ def parse_file(self, file_in: str, exceptions: str = None):
"""Reads a C source file and get identifiers"""
self.data = ""
is_enum = False
is_comment = False
multiline = ""
+ self.read_exceptions(exceptions)
+
with open(file_in, "r",
encoding="utf-8", errors="backslashreplace") as f:
for line_no, line in enumerate(f):
@@ -240,20 +343,20 @@ class ParseDataStructs:
if is_enum:
match = re.match(r"^\s*([_\w][\w\d_]+)\s*[\,=]?", line)
if match:
- self.store_type("symbol", match.group(1))
+ self.store_type(line_no, "symbol", match.group(1))
if "}" in line:
is_enum = False
continue
match = re.match(r"^\s*#\s*define\s+([\w_]+)\s+_IO", line)
if match:
- self.store_type("ioctl", match.group(1),
+ self.store_type(line_no, "ioctl", match.group(1),
replace_underscores=False)
continue
match = re.match(r"^\s*#\s*define\s+([\w_]+)(\s+|$)", line)
if match:
- self.store_type("define", match.group(1))
+ self.store_type(line_no, "define", match.group(1))
continue
match = re.match(r"^\s*typedef\s+([_\w][\w\d_]+)\s+(.*)\s+([_\w][\w\d_]+);",
@@ -261,90 +364,23 @@ class ParseDataStructs:
if match:
name = match.group(2).strip()
symbol = match.group(3)
- self.store_type("typedef", symbol, ref_name=name)
+ self.store_type(line_no, "typedef", symbol, ref_name=name)
continue
for re_enum in self.RE_ENUMS:
match = re_enum.match(line)
if match:
- self.store_type("enum", match.group(1))
+ self.store_type(line_no, "enum", match.group(1))
is_enum = True
break
for re_struct in self.RE_STRUCTS:
match = re_struct.match(line)
if match:
- self.store_type("struct", match.group(1))
+ self.store_type(line_no, "struct", match.group(1))
break
- def process_exceptions(self, fname: str):
- """
- Process exceptions file with rules to ignore or replace references.
- """
- if not fname:
- return
-
- name = os.path.basename(fname)
-
- with open(fname, "r", encoding="utf-8", errors="backslashreplace") as f:
- for ln, line in enumerate(f):
- ln += 1
- line = line.strip()
- if not line or line.startswith("#"):
- continue
-
- # Handle ignore rules
- match = re.match(r"^ignore\s+(\w+)\s+(\S+)", line)
- if match:
- c_type = match.group(1)
- symbol = match.group(2)
-
- if c_type not in self.DEF_SYMBOL_TYPES:
- sys.exit(f"{name}:{ln}: {c_type} is invalid")
-
- d = self.symbols[c_type]
- if symbol in d:
- del d[symbol]
-
- continue
-
- # Handle replace rules
- match = re.match(r"^replace\s+(\S+)\s+(\S+)\s+(\S+)", line)
- if not match:
- sys.exit(f"{name}:{ln}: invalid line: {line}")
-
- c_type, old, new = match.groups()
-
- if c_type not in self.DEF_SYMBOL_TYPES:
- sys.exit(f"{name}:{ln}: {c_type} is invalid")
-
- reftype = None
-
- # Parse reference type when the type is specified
-
- match = re.match(r"^\:c\:(data|func|macro|type)\:\`(.+)\`", new)
- if match:
- reftype = f":c:{match.group(1)}"
- new = match.group(2)
- else:
- match = re.search(r"(\:ref)\:\`(.+)\`", new)
- if match:
- reftype = match.group(1)
- new = match.group(2)
-
- # If the replacement rule doesn't have a type, get default
- if not reftype:
- reftype = self.DEF_SYMBOL_TYPES[c_type].get("ref_type")
- if not reftype:
- reftype = self.DEF_SYMBOL_TYPES[c_type].get("real_type")
-
- new_ref = f"{reftype}:`{old} <{new}>`"
-
- # Change self.symbols to use the replacement rule
- if old in self.symbols[c_type]:
- self.symbols[c_type][old] = new_ref
- else:
- print(f"{name}:{ln}: Warning: can't find {old} {c_type}")
+ self.apply_exceptions()
def debug_print(self):
"""
@@ -360,8 +396,8 @@ class ParseDataStructs:
print(f"{c_type}:")
- for symbol, ref in sorted(refs.items()):
- print(f" {symbol} -> {ref}")
+ for symbol, (ref, ln) in sorted(refs.items()):
+ print(f" #{ln:<5d} {symbol} -> {ref}")
print()
@@ -384,7 +420,7 @@ class ParseDataStructs:
# Process all reference types
for ref_dict in self.symbols.values():
- for symbol, replacement in ref_dict.items():
+ for symbol, (replacement, _) in ref_dict.items():
symbol = re.escape(re.sub(r"([\_\`\*\<\>\&\\\\:\/])", r"\\\1", symbol))
text = re.sub(fr'{start_delim}{symbol}{end_delim}',
fr'\1{replacement}\2', text)
@@ -397,16 +433,10 @@ class ParseDataStructs:
def gen_toc(self):
"""
- Create a TOC table pointing to each symbol from the header
+ Create a list of symbols to be part of a TOC contents table
"""
text = []
- # Add header
- text.append(".. contents:: Table of Contents")
- text.append(" :depth: 2")
- text.append(" :local:")
- text.append("")
-
# Sort symbol types per description
symbol_descriptions = []
for k, v in self.DEF_SYMBOL_TYPES.items():
@@ -426,8 +456,8 @@ class ParseDataStructs:
text.append("")
# Sort symbols alphabetically
- for symbol, ref in sorted(refs.items()):
- text.append(f"* :{ref}:")
+ for symbol, (ref, ln) in sorted(refs.items()):
+ text.append(f"- LINENO_{ln}: {ref}")
text.append("") # Add empty line between categories
diff --git a/tools/lib/python/kdoc/python_version.py b/tools/lib/python/kdoc/python_version.py
new file mode 100644
index 000000000000..e83088013db2
--- /dev/null
+++ b/tools/lib/python/kdoc/python_version.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2017-2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+
+"""
+Handle Python version check logic.
+
+Not all Python versions are supported by scripts. Yet, on some cases,
+like during documentation build, a newer version of python could be
+available.
+
+This class allows checking if the minimal requirements are followed.
+
+Better than that, PythonVersion.check_python() not only checks the minimal
+requirements, but it automatically switches to a the newest available
+Python version if present.
+
+"""
+
+import os
+import re
+import subprocess
+import shlex
+import sys
+
+from glob import glob
+from textwrap import indent
+
+class PythonVersion:
+ """
+ Ancillary methods that checks for missing dependencies for different
+ types of types, like binaries, python modules, rpm deps, etc.
+ """
+
+ def __init__(self, version):
+ """Ïnitialize self.version tuple from a version string"""
+ self.version = self.parse_version(version)
+
+ @staticmethod
+ def parse_version(version):
+ """Convert a major.minor.patch version into a tuple"""
+ return tuple(int(x) for x in version.split("."))
+
+ @staticmethod
+ def ver_str(version):
+ """Returns a version tuple as major.minor.patch"""
+ return ".".join([str(x) for x in version])
+
+ @staticmethod
+ def cmd_print(cmd, max_len=80):
+ cmd_line = []
+
+ for w in cmd:
+ w = shlex.quote(w)
+
+ if cmd_line:
+ if not max_len or len(cmd_line[-1]) + len(w) < max_len:
+ cmd_line[-1] += " " + w
+ continue
+ else:
+ cmd_line[-1] += " \\"
+ cmd_line.append(w)
+ else:
+ cmd_line.append(w)
+
+ return "\n ".join(cmd_line)
+
+ def __str__(self):
+ """Returns a version tuple as major.minor.patch from self.version"""
+ return self.ver_str(self.version)
+
+ @staticmethod
+ def get_python_version(cmd):
+ """
+ Get python version from a Python binary. As we need to detect if
+ are out there newer python binaries, we can't rely on sys.release here.
+ """
+
+ kwargs = {}
+ if sys.version_info < (3, 7):
+ kwargs['universal_newlines'] = True
+ else:
+ kwargs['text'] = True
+
+ result = subprocess.run([cmd, "--version"],
+ stdout = subprocess.PIPE,
+ stderr = subprocess.PIPE,
+ **kwargs, check=False)
+
+ version = result.stdout.strip()
+
+ match = re.search(r"(\d+\.\d+\.\d+)", version)
+ if match:
+ return PythonVersion.parse_version(match.group(1))
+
+ print(f"Can't parse version {version}")
+ return (0, 0, 0)
+
+ @staticmethod
+ def find_python(min_version):
+ """
+ Detect if are out there any python 3.xy version newer than the
+ current one.
+
+ Note: this routine is limited to up to 2 digits for python3. We
+ may need to update it one day, hopefully on a distant future.
+ """
+ patterns = [
+ "python3.[0-9][0-9]",
+ "python3.[0-9]",
+ ]
+
+ python_cmd = []
+
+ # Seek for a python binary newer than min_version
+ for path in os.getenv("PATH", "").split(":"):
+ for pattern in patterns:
+ for cmd in glob(os.path.join(path, pattern)):
+ if os.path.isfile(cmd) and os.access(cmd, os.X_OK):
+ version = PythonVersion.get_python_version(cmd)
+ if version >= min_version:
+ python_cmd.append((version, cmd))
+
+ return sorted(python_cmd, reverse=True)
+
+ @staticmethod
+ def check_python(min_version, show_alternatives=False, bail_out=False,
+ success_on_error=False):
+ """
+ Check if the current python binary satisfies our minimal requirement
+ for Sphinx build. If not, re-run with a newer version if found.
+ """
+ cur_ver = sys.version_info[:3]
+ if cur_ver >= min_version:
+ ver = PythonVersion.ver_str(cur_ver)
+ return
+
+ python_ver = PythonVersion.ver_str(cur_ver)
+
+ available_versions = PythonVersion.find_python(min_version)
+ if not available_versions:
+ print(f"ERROR: Python version {python_ver} is not supported anymore\n")
+ print(" Can't find a new version. This script may fail")
+ return
+
+ script_path = os.path.abspath(sys.argv[0])
+
+ # Check possible alternatives
+ if available_versions:
+ new_python_cmd = available_versions[0][1]
+ else:
+ new_python_cmd = None
+
+ if show_alternatives and available_versions:
+ print("You could run, instead:")
+ for _, cmd in available_versions:
+ args = [cmd, script_path] + sys.argv[1:]
+
+ cmd_str = indent(PythonVersion.cmd_print(args), " ")
+ print(f"{cmd_str}\n")
+
+ if bail_out:
+ msg = f"Python {python_ver} not supported. Bailing out"
+ if success_on_error:
+ print(msg, file=sys.stderr)
+ sys.exit(0)
+ else:
+ sys.exit(msg)
+
+ print(f"Python {python_ver} not supported. Changing to {new_python_cmd}")
+
+ # Restart script using the newer version
+ args = [new_python_cmd, script_path] + sys.argv[1:]
+
+ try:
+ os.execv(new_python_cmd, args)
+ except OSError as e:
+ sys.exit(f"Failed to restart with {new_python_cmd}: {e}")
diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile
index d68780e2e03d..e4bda2474060 100644
--- a/tools/sched_ext/Makefile
+++ b/tools/sched_ext/Makefile
@@ -133,6 +133,7 @@ $(MAKE_DIRS):
$(call msg,MKDIR,,$@)
$(Q)mkdir -p $@
+ifneq ($(CROSS_COMPILE),)
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(OBJ_DIR)/libbpf
@@ -141,6 +142,7 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
EXTRA_CFLAGS='-g -O0 -fPIC' \
LDFLAGS="$(LDFLAGS)" \
DESTDIR=$(OUTPUT_DIR) prefix= all install_headers
+endif
$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
@@ -187,7 +189,7 @@ $(INCLUDE_DIR)/%.bpf.skel.h: $(SCXOBJ_DIR)/%.bpf.o $(INCLUDE_DIR)/vmlinux.h $(BP
SCX_COMMON_DEPS := include/scx/common.h include/scx/user_exit_info.h | $(BINDIR)
-c-sched-targets = scx_simple scx_qmap scx_central scx_flatcg
+c-sched-targets = scx_simple scx_cpu0 scx_qmap scx_central scx_flatcg
$(addprefix $(BINDIR)/,$(c-sched-targets)): \
$(BINDIR)/%: \
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 06e2551033cb..821d5791bd42 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -60,21 +60,15 @@ static inline void ___vmlinux_h_sanity_check___(void)
s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym;
s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym;
-s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
- const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
-void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
-void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
+s32 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed,
+ struct scx_bpf_select_cpu_and_args *args) __ksym __weak;
+bool __scx_bpf_dsq_insert_vtime(struct task_struct *p, struct scx_bpf_dsq_insert_vtime_args *args) __ksym __weak;
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
void scx_bpf_dispatch_cancel(void) __ksym;
-bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak;
-void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
-void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
-bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
-bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
-u32 scx_bpf_reenqueue_local(void) __ksym;
void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym;
void scx_bpf_destroy_dsq(u64 dsq_id) __ksym;
+struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) __ksym __weak;
int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak;
struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak;
void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak;
@@ -105,7 +99,6 @@ s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
struct rq *scx_bpf_locked_rq(void) __ksym;
struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym __weak;
-struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
u64 scx_bpf_now(void) __ksym __weak;
void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
index dd9144624dc9..f2969c3061a7 100644
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -16,119 +16,92 @@
})
/* v6.12: 819513666966 ("sched_ext: Add cgroup support") */
-#define __COMPAT_scx_bpf_task_cgroup(p) \
- (bpf_ksym_exists(scx_bpf_task_cgroup) ? \
- scx_bpf_task_cgroup((p)) : NULL)
+struct cgroup *scx_bpf_task_cgroup___new(struct task_struct *p) __ksym __weak;
+
+#define scx_bpf_task_cgroup(p) \
+ (bpf_ksym_exists(scx_bpf_task_cgroup___new) ? \
+ scx_bpf_task_cgroup___new((p)) : NULL)
/*
* v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are
* renamed to unload the verb.
*
- * Build error is triggered if old names are used. New binaries work with both
- * new and old names. The compat macros will be removed on v6.15 release.
- *
* scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by
* 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").
- * Preserve __COMPAT macros until v6.15.
*/
-void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
-void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
-bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
-void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
-void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
-bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
-bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
-int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
-
-#define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \
- (bpf_ksym_exists(scx_bpf_dsq_insert) ? \
- scx_bpf_dsq_insert((p), (dsq_id), (slice), (enq_flags)) : \
- scx_bpf_dispatch___compat((p), (dsq_id), (slice), (enq_flags)))
-
-#define scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags) \
- (bpf_ksym_exists(scx_bpf_dsq_insert_vtime) ? \
- scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \
- scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags)))
+bool scx_bpf_dsq_move_to_local___new(u64 dsq_id) __ksym __weak;
+void scx_bpf_dsq_move_set_slice___new(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
+void scx_bpf_dsq_move_set_vtime___new(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
+bool scx_bpf_dsq_move___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
+bool scx_bpf_dsq_move_vtime___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
+
+bool scx_bpf_consume___old(u64 dsq_id) __ksym __weak;
+void scx_bpf_dispatch_from_dsq_set_slice___old(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
+void scx_bpf_dispatch_from_dsq_set_vtime___old(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
+bool scx_bpf_dispatch_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
+bool scx_bpf_dispatch_vtime_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
#define scx_bpf_dsq_move_to_local(dsq_id) \
- (bpf_ksym_exists(scx_bpf_dsq_move_to_local) ? \
- scx_bpf_dsq_move_to_local((dsq_id)) : \
- scx_bpf_consume___compat((dsq_id)))
-
-#define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice) \
- (bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ? \
- scx_bpf_dsq_move_set_slice((it__iter), (slice)) : \
- (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ? \
- scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) : \
+ (bpf_ksym_exists(scx_bpf_dsq_move_to_local___new) ? \
+ scx_bpf_dsq_move_to_local___new((dsq_id)) : \
+ scx_bpf_consume___old((dsq_id)))
+
+#define scx_bpf_dsq_move_set_slice(it__iter, slice) \
+ (bpf_ksym_exists(scx_bpf_dsq_move_set_slice___new) ? \
+ scx_bpf_dsq_move_set_slice___new((it__iter), (slice)) : \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___old) ? \
+ scx_bpf_dispatch_from_dsq_set_slice___old((it__iter), (slice)) : \
+ (void)0))
+
+#define scx_bpf_dsq_move_set_vtime(it__iter, vtime) \
+ (bpf_ksym_exists(scx_bpf_dsq_move_set_vtime___new) ? \
+ scx_bpf_dsq_move_set_vtime___new((it__iter), (vtime)) : \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___old) ? \
+ scx_bpf_dispatch_from_dsq_set_vtime___old((it__iter), (vtime)) : \
(void)0))
-#define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime) \
- (bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ? \
- scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) : \
- (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ? \
- scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) : \
- (void) 0))
-
-#define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \
- (bpf_ksym_exists(scx_bpf_dsq_move) ? \
- scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) : \
- (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ? \
- scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
+#define scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \
+ (bpf_ksym_exists(scx_bpf_dsq_move___new) ? \
+ scx_bpf_dsq_move___new((it__iter), (p), (dsq_id), (enq_flags)) : \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___old) ? \
+ scx_bpf_dispatch_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \
false))
-#define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \
- (bpf_ksym_exists(scx_bpf_dsq_move_vtime) ? \
- scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) : \
- (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ? \
- scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
+#define scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \
+ (bpf_ksym_exists(scx_bpf_dsq_move_vtime___new) ? \
+ scx_bpf_dsq_move_vtime___new((it__iter), (p), (dsq_id), (enq_flags)) : \
+ (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___old) ? \
+ scx_bpf_dispatch_vtime_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \
false))
+/*
+ * v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits")
+ *
+ * Compat macro will be dropped on v6.19 release.
+ */
+int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
+
#define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \
(bpf_ksym_exists(bpf_cpumask_populate) ? \
(bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)
-#define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \
- _Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")
-
-#define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags) \
- _Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()")
-
-#define scx_bpf_consume(dsq_id) ({ \
- _Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \
- false; \
-})
-
-#define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
- _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()")
-
-#define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
- _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()")
-
-#define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
- _Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \
- false; \
-})
-
-#define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
- _Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \
- false; \
-})
-
-#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
- _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()")
-
-#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
- _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()")
-
-#define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
- _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \
- false; \
-})
-
-#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
- _Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \
- false; \
-})
+/*
+ * v6.19: Introduce lockless peek API for user DSQs.
+ *
+ * Preserve the following macro until v6.21.
+ */
+static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id)
+{
+ struct task_struct *p = NULL;
+ struct bpf_iter_scx_dsq it;
+
+ if (bpf_ksym_exists(scx_bpf_dsq_peek))
+ return scx_bpf_dsq_peek(dsq_id);
+ if (!bpf_iter_scx_dsq_new(&it, dsq_id, 0))
+ p = bpf_iter_scx_dsq_next(&it);
+ bpf_iter_scx_dsq_destroy(&it);
+ return p;
+}
/**
* __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
@@ -248,6 +221,161 @@ static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
}
/*
+ * v6.19: To work around BPF maximum parameter limit, the following kfuncs are
+ * replaced with variants that pack scalar arguments in a struct. Wrappers are
+ * provided to maintain source compatibility.
+ *
+ * v6.13: scx_bpf_dsq_insert_vtime() renaming is also handled here. See the
+ * block on dispatch renaming above for more details.
+ *
+ * The kernel will carry the compat variants until v6.23 to maintain binary
+ * compatibility. After v6.23 release, remove the compat handling and move the
+ * wrappers to common.bpf.h.
+ */
+s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+ const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
+void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
+void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
+
+/**
+ * scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p
+ * @p: task_struct to select a CPU for
+ * @prev_cpu: CPU @p was on previously
+ * @wake_flags: %SCX_WAKE_* flags
+ * @cpus_allowed: cpumask of allowed CPUs
+ * @flags: %SCX_PICK_IDLE* flags
+ *
+ * Inline wrapper that packs scalar arguments into a struct and calls
+ * __scx_bpf_select_cpu_and(). See __scx_bpf_select_cpu_and() for details.
+ */
+static inline s32
+scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+ const struct cpumask *cpus_allowed, u64 flags)
+{
+ if (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args)) {
+ struct scx_bpf_select_cpu_and_args args = {
+ .prev_cpu = prev_cpu,
+ .wake_flags = wake_flags,
+ .flags = flags,
+ };
+
+ return __scx_bpf_select_cpu_and(p, cpus_allowed, &args);
+ } else {
+ return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags,
+ cpus_allowed, flags);
+ }
+}
+
+/**
+ * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
+ * @p: task_struct to insert
+ * @dsq_id: DSQ to insert into
+ * @slice: duration @p can run for in nsecs, 0 to keep the current value
+ * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Inline wrapper that packs scalar arguments into a struct and calls
+ * __scx_bpf_dsq_insert_vtime(). See __scx_bpf_dsq_insert_vtime() for details.
+ */
+static inline bool
+scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime,
+ u64 enq_flags)
+{
+ if (bpf_core_type_exists(struct scx_bpf_dsq_insert_vtime_args)) {
+ struct scx_bpf_dsq_insert_vtime_args args = {
+ .dsq_id = dsq_id,
+ .slice = slice,
+ .vtime = vtime,
+ .enq_flags = enq_flags,
+ };
+
+ return __scx_bpf_dsq_insert_vtime(p, &args);
+ } else if (bpf_ksym_exists(scx_bpf_dsq_insert_vtime___compat)) {
+ scx_bpf_dsq_insert_vtime___compat(p, dsq_id, slice, vtime,
+ enq_flags);
+ return true;
+ } else {
+ scx_bpf_dispatch_vtime___compat(p, dsq_id, slice, vtime,
+ enq_flags);
+ return true;
+ }
+}
+
+/*
+ * v6.19: scx_bpf_dsq_insert() now returns bool instead of void. Move
+ * scx_bpf_dsq_insert() decl to common.bpf.h and drop compat helper after v6.22.
+ * The extra ___compat suffix is to work around libbpf not ignoring __SUFFIX on
+ * kernel side. The entire suffix can be dropped later.
+ *
+ * v6.13: scx_bpf_dsq_insert() renaming is also handled here. See the block on
+ * dispatch renaming above for more details.
+ */
+bool scx_bpf_dsq_insert___v2___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
+void scx_bpf_dsq_insert___v1(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
+void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
+
+static inline bool
+scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags)
+{
+ if (bpf_ksym_exists(scx_bpf_dsq_insert___v2___compat)) {
+ return scx_bpf_dsq_insert___v2___compat(p, dsq_id, slice, enq_flags);
+ } else if (bpf_ksym_exists(scx_bpf_dsq_insert___v1)) {
+ scx_bpf_dsq_insert___v1(p, dsq_id, slice, enq_flags);
+ return true;
+ } else {
+ scx_bpf_dispatch___compat(p, dsq_id, slice, enq_flags);
+ return true;
+ }
+}
+
+/*
+ * v6.19: scx_bpf_task_set_slice() and scx_bpf_task_set_dsq_vtime() added to for
+ * sub-sched authority checks. Drop the wrappers and move the decls to
+ * common.bpf.h after v6.22.
+ */
+bool scx_bpf_task_set_slice___new(struct task_struct *p, u64 slice) __ksym __weak;
+bool scx_bpf_task_set_dsq_vtime___new(struct task_struct *p, u64 vtime) __ksym __weak;
+
+static inline void scx_bpf_task_set_slice(struct task_struct *p, u64 slice)
+{
+ if (bpf_ksym_exists(scx_bpf_task_set_slice___new))
+ scx_bpf_task_set_slice___new(p, slice);
+ else
+ p->scx.slice = slice;
+}
+
+static inline void scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime)
+{
+ if (bpf_ksym_exists(scx_bpf_task_set_dsq_vtime___new))
+ scx_bpf_task_set_dsq_vtime___new(p, vtime);
+ else
+ p->scx.dsq_vtime = vtime;
+}
+
+/*
+ * v6.19: The new void variant can be called from anywhere while the older v1
+ * variant can only be called from ops.cpu_release(). The double ___ prefixes on
+ * the v2 variant need to be removed once libbpf is updated to ignore ___ prefix
+ * on kernel side. Drop the wrapper and move the decl to common.bpf.h after
+ * v6.22.
+ */
+u32 scx_bpf_reenqueue_local___v1(void) __ksym __weak;
+void scx_bpf_reenqueue_local___v2___compat(void) __ksym __weak;
+
+static inline bool __COMPAT_scx_bpf_reenqueue_local_from_anywhere(void)
+{
+ return bpf_ksym_exists(scx_bpf_reenqueue_local___v2___compat);
+}
+
+static inline void scx_bpf_reenqueue_local(void)
+{
+ if (__COMPAT_scx_bpf_reenqueue_local_from_anywhere())
+ scx_bpf_reenqueue_local___v2___compat();
+ else
+ scx_bpf_reenqueue_local___v1();
+}
+
+/*
* Define sched_ext_ops. This may be expanded to define multiple variants for
* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
*/
diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h
index 35c67c5174ac..8b4897fc8b99 100644
--- a/tools/sched_ext/include/scx/compat.h
+++ b/tools/sched_ext/include/scx/compat.h
@@ -151,6 +151,10 @@ static inline long scx_hotplug_seq(void)
*
* ec7e3b0463e1 ("implement-ops") in https://github.com/sched-ext/sched_ext is
* the current minimum required kernel version.
+ *
+ * COMPAT:
+ * - v6.17: ops.cgroup_set_bandwidth()
+ * - v6.19: ops.cgroup_set_idle()
*/
#define SCX_OPS_OPEN(__ops_name, __scx_name) ({ \
struct __scx_name *__skel; \
@@ -162,6 +166,16 @@ static inline long scx_hotplug_seq(void)
SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \
__skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \
SCX_ENUM_INIT(__skel); \
+ if (__skel->struct_ops.__ops_name->cgroup_set_bandwidth && \
+ !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_bandwidth")) { \
+ fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_bandwidth()\n"); \
+ __skel->struct_ops.__ops_name->cgroup_set_bandwidth = NULL; \
+ } \
+ if (__skel->struct_ops.__ops_name->cgroup_set_idle && \
+ !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_idle")) { \
+ fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_idle()\n"); \
+ __skel->struct_ops.__ops_name->cgroup_set_idle = NULL; \
+ } \
__skel; \
})
diff --git a/tools/sched_ext/scx_cpu0.bpf.c b/tools/sched_ext/scx_cpu0.bpf.c
new file mode 100644
index 000000000000..6326ce598c8e
--- /dev/null
+++ b/tools/sched_ext/scx_cpu0.bpf.c
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A CPU0 scheduler.
+ *
+ * This scheduler queues all tasks to a shared DSQ and only dispatches them on
+ * CPU0 in FIFO order. This is useful for testing bypass behavior when many
+ * tasks are concentrated on a single CPU. If the load balancer doesn't work,
+ * bypass mode can trigger task hangs or RCU stalls as the queue is long and
+ * there's only one CPU working on it.
+ *
+ * - Statistics tracking how many tasks are queued to local and CPU0 DSQs.
+ * - Termination notification for userspace.
+ *
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Tejun Heo <tj@kernel.org>
+ */
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */
+
+UEI_DEFINE(uei);
+
+/*
+ * We create a custom DSQ with ID 0 that we dispatch to and consume from on
+ * CPU0.
+ */
+#define DSQ_CPU0 0
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u64));
+ __uint(max_entries, 2); /* [local, cpu0] */
+} stats SEC(".maps");
+
+static void stat_inc(u32 idx)
+{
+ u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx);
+ if (cnt_p)
+ (*cnt_p)++;
+}
+
+s32 BPF_STRUCT_OPS(cpu0_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
+{
+ return 0;
+}
+
+void BPF_STRUCT_OPS(cpu0_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ /*
+ * select_cpu() always picks CPU0. If @p is not on CPU0, it can't run on
+ * CPU 0. Queue on whichever CPU it's currently only.
+ */
+ if (scx_bpf_task_cpu(p) != 0) {
+ stat_inc(0); /* count local queueing */
+ scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
+ return;
+ }
+
+ stat_inc(1); /* count cpu0 queueing */
+ scx_bpf_dsq_insert(p, DSQ_CPU0, SCX_SLICE_DFL, enq_flags);
+}
+
+void BPF_STRUCT_OPS(cpu0_dispatch, s32 cpu, struct task_struct *prev)
+{
+ if (cpu == 0)
+ scx_bpf_dsq_move_to_local(DSQ_CPU0);
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(cpu0_init)
+{
+ return scx_bpf_create_dsq(DSQ_CPU0, -1);
+}
+
+void BPF_STRUCT_OPS(cpu0_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SCX_OPS_DEFINE(cpu0_ops,
+ .select_cpu = (void *)cpu0_select_cpu,
+ .enqueue = (void *)cpu0_enqueue,
+ .dispatch = (void *)cpu0_dispatch,
+ .init = (void *)cpu0_init,
+ .exit = (void *)cpu0_exit,
+ .name = "cpu0");
diff --git a/tools/sched_ext/scx_cpu0.c b/tools/sched_ext/scx_cpu0.c
new file mode 100644
index 000000000000..1e4fa4ab8da9
--- /dev/null
+++ b/tools/sched_ext/scx_cpu0.c
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Tejun Heo <tj@kernel.org>
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+#include <assert.h>
+#include <libgen.h>
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include "scx_cpu0.bpf.skel.h"
+
+const char help_fmt[] =
+"A cpu0 sched_ext scheduler.\n"
+"\n"
+"See the top-level comment in .bpf.c for more details.\n"
+"\n"
+"Usage: %s [-v]\n"
+"\n"
+" -v Print libbpf debug messages\n"
+" -h Display this help and exit\n";
+
+static bool verbose;
+static volatile int exit_req;
+
+static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !verbose)
+ return 0;
+ return vfprintf(stderr, format, args);
+}
+
+static void sigint_handler(int sig)
+{
+ exit_req = 1;
+}
+
+static void read_stats(struct scx_cpu0 *skel, __u64 *stats)
+{
+ int nr_cpus = libbpf_num_possible_cpus();
+ assert(nr_cpus > 0);
+ __u64 cnts[2][nr_cpus];
+ __u32 idx;
+
+ memset(stats, 0, sizeof(stats[0]) * 2);
+
+ for (idx = 0; idx < 2; idx++) {
+ int ret, cpu;
+
+ ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats),
+ &idx, cnts[idx]);
+ if (ret < 0)
+ continue;
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ stats[idx] += cnts[idx][cpu];
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct scx_cpu0 *skel;
+ struct bpf_link *link;
+ __u32 opt;
+ __u64 ecode;
+
+ libbpf_set_print(libbpf_print_fn);
+ signal(SIGINT, sigint_handler);
+ signal(SIGTERM, sigint_handler);
+restart:
+ skel = SCX_OPS_OPEN(cpu0_ops, scx_cpu0);
+
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ while ((opt = getopt(argc, argv, "vh")) != -1) {
+ switch (opt) {
+ case 'v':
+ verbose = true;
+ break;
+ default:
+ fprintf(stderr, help_fmt, basename(argv[0]));
+ return opt != 'h';
+ }
+ }
+
+ SCX_OPS_LOAD(skel, cpu0_ops, scx_cpu0, uei);
+ link = SCX_OPS_ATTACH(skel, cpu0_ops, scx_cpu0);
+
+ while (!exit_req && !UEI_EXITED(skel, uei)) {
+ __u64 stats[2];
+
+ read_stats(skel, stats);
+ printf("local=%llu cpu0=%llu\n", stats[0], stats[1]);
+ fflush(stdout);
+ sleep(1);
+ }
+
+ bpf_link__destroy(link);
+ ecode = UEI_REPORT(skel, uei);
+ scx_cpu0__destroy(skel);
+
+ if (UEI_ECODE_RESTART(ecode))
+ goto restart;
+ return 0;
+}
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c
index 2c720e3ecad5..43126858b8e4 100644
--- a/tools/sched_ext/scx_flatcg.bpf.c
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -382,7 +382,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
return;
}
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp);
if (!cgc)
goto out_release;
@@ -508,7 +508,7 @@ void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags)
{
struct cgroup *cgrp;
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
update_active_weight_sums(cgrp, true);
bpf_cgroup_release(cgrp);
}
@@ -521,7 +521,7 @@ void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
if (fifo_sched)
return;
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp);
if (cgc) {
/*
@@ -564,7 +564,7 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
if (!taskc->bypassed_at)
return;
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp);
if (cgc) {
__sync_fetch_and_add(&cgc->cvtime_delta,
@@ -578,7 +578,7 @@ void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags)
{
struct cgroup *cgrp;
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
update_active_weight_sums(cgrp, false);
bpf_cgroup_release(cgrp);
}
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
index 3072b593f898..df21fad0c438 100644
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -202,6 +202,9 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags)
void *ring;
s32 cpu;
+ if (enq_flags & SCX_ENQ_REENQ)
+ __sync_fetch_and_add(&nr_reenqueued, 1);
+
if (p->flags & PF_KTHREAD) {
if (stall_kernel_nth && !(++kernel_cnt % stall_kernel_nth))
return;
@@ -320,12 +323,9 @@ static bool dispatch_highpri(bool from_timer)
if (tctx->highpri) {
/* exercise the set_*() and vtime interface too */
- __COMPAT_scx_bpf_dsq_move_set_slice(
- BPF_FOR_EACH_ITER, slice_ns * 2);
- __COMPAT_scx_bpf_dsq_move_set_vtime(
- BPF_FOR_EACH_ITER, highpri_seq++);
- __COMPAT_scx_bpf_dsq_move_vtime(
- BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
+ scx_bpf_dsq_move_set_slice(BPF_FOR_EACH_ITER, slice_ns * 2);
+ scx_bpf_dsq_move_set_vtime(BPF_FOR_EACH_ITER, highpri_seq++);
+ scx_bpf_dsq_move_vtime(BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
}
}
@@ -342,9 +342,8 @@ static bool dispatch_highpri(bool from_timer)
else
cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0);
- if (__COMPAT_scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p,
- SCX_DSQ_LOCAL_ON | cpu,
- SCX_ENQ_PREEMPT)) {
+ if (scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, SCX_DSQ_LOCAL_ON | cpu,
+ SCX_ENQ_PREEMPT)) {
if (cpu == this_cpu) {
dispatched = true;
__sync_fetch_and_add(&nr_expedited_local, 1);
@@ -533,20 +532,35 @@ bool BPF_STRUCT_OPS(qmap_core_sched_before,
return task_qdist(a) > task_qdist(b);
}
-void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args)
+SEC("tp_btf/sched_switch")
+int BPF_PROG(qmap_sched_switch, bool preempt, struct task_struct *prev,
+ struct task_struct *next, unsigned long prev_state)
{
- u32 cnt;
+ if (!__COMPAT_scx_bpf_reenqueue_local_from_anywhere())
+ return 0;
/*
- * Called when @cpu is taken by a higher priority scheduling class. This
- * makes @cpu no longer available for executing sched_ext tasks. As we
- * don't want the tasks in @cpu's local dsq to sit there until @cpu
- * becomes available again, re-enqueue them into the global dsq. See
- * %SCX_ENQ_REENQ handling in qmap_enqueue().
+ * If @cpu is taken by a higher priority scheduling class, it is no
+ * longer available for executing sched_ext tasks. As we don't want the
+ * tasks in @cpu's local dsq to sit there until @cpu becomes available
+ * again, re-enqueue them into the global dsq. See %SCX_ENQ_REENQ
+ * handling in qmap_enqueue().
*/
- cnt = scx_bpf_reenqueue_local();
- if (cnt)
- __sync_fetch_and_add(&nr_reenqueued, cnt);
+ switch (next->policy) {
+ case 1: /* SCHED_FIFO */
+ case 2: /* SCHED_RR */
+ case 6: /* SCHED_DEADLINE */
+ scx_bpf_reenqueue_local();
+ }
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args)
+{
+ /* see qmap_sched_switch() to learn how to do this on newer kernels */
+ if (!__COMPAT_scx_bpf_reenqueue_local_from_anywhere())
+ scx_bpf_reenqueue_local();
}
s32 BPF_STRUCT_OPS(qmap_init_task, struct task_struct *p,
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index a360e2eb2eef..1d778c8b7764 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -923,8 +923,10 @@ struct corecg_test {
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), &nsdelegate)) {
if (setup_named_v1_root(root, sizeof(root), CG_NAMED_NAME))
ksft_exit_skip("cgroup v2 isn't mounted and could not setup named v1 hierarchy\n");
@@ -946,12 +948,11 @@ post_v2_setup:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
cleanup_named_v1_root(root);
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
index d54e2317efff..b1b30e82dd7c 100644
--- a/tools/testing/selftests/cgroup/test_cpu.c
+++ b/tools/testing/selftests/cgroup/test_cpu.c
@@ -796,8 +796,10 @@ struct cpucg_test {
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -814,11 +816,10 @@ int main(int argc, char *argv[])
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_cpuset.c b/tools/testing/selftests/cgroup/test_cpuset.c
index 4034d14ba69a..8086d2ea394f 100644
--- a/tools/testing/selftests/cgroup/test_cpuset.c
+++ b/tools/testing/selftests/cgroup/test_cpuset.c
@@ -247,8 +247,10 @@ struct cpuset_test {
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -265,11 +267,10 @@ int main(int argc, char *argv[])
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
index dfb763819581..465cdad2bfca 100644
--- a/tools/testing/selftests/cgroup/test_freezer.c
+++ b/tools/testing/selftests/cgroup/test_freezer.c
@@ -1488,8 +1488,10 @@ struct cgfreezer_test {
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
@@ -1501,11 +1503,10 @@ int main(int argc, char *argv[])
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_kill.c b/tools/testing/selftests/cgroup/test_kill.c
index 0e5bb6c7307a..ed590b150a17 100644
--- a/tools/testing/selftests/cgroup/test_kill.c
+++ b/tools/testing/selftests/cgroup/test_kill.c
@@ -274,8 +274,10 @@ struct cgkill_test {
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
@@ -287,11 +289,10 @@ int main(int argc, char *argv[])
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c
index 63b3c9aad399..d4c4a514ee43 100644
--- a/tools/testing/selftests/cgroup/test_kmem.c
+++ b/tools/testing/selftests/cgroup/test_kmem.c
@@ -421,8 +421,10 @@ struct kmem_test {
int main(int argc, char **argv)
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -446,11 +448,10 @@ int main(int argc, char **argv)
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index a680f773f2d5..b117325c0439 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -1650,8 +1650,10 @@ struct memcg_test {
int main(int argc, char **argv)
{
char root[PATH_MAX];
- int i, proc_status, ret = EXIT_SUCCESS;
+ int i, proc_status;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -1685,11 +1687,10 @@ int main(int argc, char **argv)
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c
index e1f578ca2841..86a8930b47e3 100644
--- a/tools/testing/selftests/cgroup/test_zswap.c
+++ b/tools/testing/selftests/cgroup/test_zswap.c
@@ -597,8 +597,10 @@ static bool zswap_configured(void)
int main(int argc, char **argv)
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -625,11 +627,10 @@ int main(int argc, char **argv)
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c
index b12f1f9babf8..b925756373ce 100644
--- a/tools/testing/selftests/dma/dma_map_benchmark.c
+++ b/tools/testing/selftests/dma/dma_map_benchmark.c
@@ -118,7 +118,7 @@ int main(int argc, char **argv)
}
printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n",
- threads, seconds, node, dir[directions], granule);
+ threads, seconds, node, directions[dir], granule);
printf("average map latency(us):%.1f standard deviation:%.1f\n",
map.avg_map_100ns/10.0, map.map_stddev/10.0);
printf("average unmap latency(us):%.1f standard deviation:%.1f\n",
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/trace_marker_raw.tc b/tools/testing/selftests/ftrace/test.d/00basic/trace_marker_raw.tc
new file mode 100644
index 000000000000..7daf7292209e
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/trace_marker_raw.tc
@@ -0,0 +1,107 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Basic tests on writing to trace_marker_raw
+# requires: trace_marker_raw
+# flags: instance
+
+is_little_endian() {
+ if lscpu | grep -q 'Little Endian'; then
+ echo 1;
+ else
+ echo 0;
+ fi
+}
+
+little=`is_little_endian`
+
+make_str() {
+ id=$1
+ cnt=$2
+
+ if [ $little -eq 1 ]; then
+ val=`printf "\\%03o\\%03o\\%03o\\%03o" \
+ $(($id & 0xff)) \
+ $((($id >> 8) & 0xff)) \
+ $((($id >> 16) & 0xff)) \
+ $((($id >> 24) & 0xff))`
+ else
+ val=`printf "\\%03o\\%03o\\%03o\\%03o" \
+ $((($id >> 24) & 0xff)) \
+ $((($id >> 16) & 0xff)) \
+ $((($id >> 8) & 0xff)) \
+ $(($id & 0xff))`
+ fi
+
+ data=`printf -- 'X%.0s' $(seq $cnt)`
+
+ printf "${val}${data}"
+}
+
+write_buffer() {
+ id=$1
+ size=$2
+
+ # write the string into the raw marker
+ make_str $id $size > trace_marker_raw
+}
+
+
+test_multiple_writes() {
+
+ # Write a bunch of data where the id is the count of
+ # data to write
+ for i in `seq 1 10` `seq 101 110` `seq 1001 1010`; do
+ write_buffer $i $i
+ done
+
+ # add a little buffer
+ echo stop > trace_marker
+
+ # Check to make sure the number of entries is the id (rounded up by 4)
+ awk '/.*: # [0-9a-f]* / {
+ print;
+ cnt = -1;
+ for (i = 0; i < NF; i++) {
+ # The counter is after the "#" marker
+ if ( $i == "#" ) {
+ i++;
+ cnt = strtonum("0x" $i);
+ num = NF - (i + 1);
+ # The number of items is always rounded up by 4
+ cnt2 = int((cnt + 3) / 4) * 4;
+ if (cnt2 != num) {
+ exit 1;
+ }
+ break;
+ }
+ }
+ }
+ // { if (NR > 30) { exit 0; } } ' trace_pipe;
+}
+
+
+get_buffer_data_size() {
+ sed -ne 's/^.*data.*size:\([0-9][0-9]*\).*/\1/p' events/header_page
+}
+
+test_buffer() {
+
+ # The id must be four bytes, test that 3 bytes fails a write
+ if echo -n abc > ./trace_marker_raw ; then
+ echo "Too small of write expected to fail but did not"
+ exit_fail
+ fi
+
+ size=`get_buffer_data_size`
+ echo size = $size
+
+ # Now add a little more than what it can handle
+
+ if write_buffer 0xdeadbeef $size ; then
+ echo "Too big of write expected to fail but did not"
+ exit_fail
+ fi
+}
+
+test_buffer
+test_multiple_writes
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
index 2506f464811b..47067a5e3cb0 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
@@ -28,25 +28,21 @@ test -d events/fprobes/myevent1
test -d events/fprobes/myevent2
echo 1 > events/fprobes/myevent1/enable
-# Make sure the event is attached and is the only one
+# Make sure the event is attached.
grep -q $PLACE enabled_functions
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne $((ocnt + 1)) ]; then
+if [ $cnt -eq $ocnt ]; then
exit_fail
fi
echo 1 > events/fprobes/myevent2/enable
-# It should till be the only attached function
-cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne $((ocnt + 1)) ]; then
- exit_fail
-fi
+cnt2=`cat enabled_functions | wc -l`
echo 1 > events/fprobes/myevent3/enable
# If the function is different, the attached function should be increased
grep -q $PLACE2 enabled_functions
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne $((ocnt + 2)) ]; then
+if [ $cnt -eq $cnt2 ]; then
exit_fail
fi
@@ -56,12 +52,6 @@ echo "-:myevent2" >> dynamic_events
grep -q myevent1 dynamic_events
! grep -q myevent2 dynamic_events
-# should still have 2 left
-cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne $((ocnt + 2)) ]; then
- exit_fail
-fi
-
echo 0 > events/fprobes/enable
echo > dynamic_events
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/enable_disable_tprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/enable_disable_tprobe.tc
new file mode 100644
index 000000000000..c1f1cafa30f3
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/enable_disable_tprobe.tc
@@ -0,0 +1,40 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Generic dynamic event - enable/disable tracepoint probe events
+# requires: dynamic_events "t[:[<group>/][<event>]] <tracepoint> [<args>]":README
+
+echo 0 > events/enable
+echo > dynamic_events
+
+TRACEPOINT=sched_switch
+ENABLEFILE=events/tracepoints/myprobe/enable
+
+:;: "Add tracepoint event on $TRACEPOINT" ;:
+
+echo "t:myprobe ${TRACEPOINT}" >> dynamic_events
+
+:;: "Check enable/disable to ensure it works" ;:
+
+echo 1 > $ENABLEFILE
+
+grep -q $TRACEPOINT trace
+
+echo 0 > $ENABLEFILE
+
+echo > trace
+
+! grep -q $TRACEPOINT trace
+
+:;: "Repeat enable/disable to ensure it works" ;:
+
+echo 1 > $ENABLEFILE
+
+grep -q $TRACEPOINT trace
+
+echo 0 > $ENABLEFILE
+
+echo > trace
+
+! grep -q $TRACEPOINT trace
+
+exit 0
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
index 2c3c58e65a41..3a62039fa621 100644
--- a/tools/testing/selftests/kselftest/runner.sh
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -44,6 +44,12 @@ tap_timeout()
fi
}
+report_failure()
+{
+ echo "not ok $*"
+ echo "$*" >> "$kselftest_failures_file"
+}
+
run_one()
{
DIR="$1"
@@ -105,7 +111,7 @@ run_one()
echo "# $TEST_HDR_MSG"
if [ ! -e "$TEST" ]; then
echo "# Warning: file $TEST is missing!"
- echo "not ok $test_num $TEST_HDR_MSG"
+ report_failure "$test_num $TEST_HDR_MSG"
else
if [ -x /usr/bin/stdbuf ]; then
stdbuf="/usr/bin/stdbuf --output=L "
@@ -123,7 +129,7 @@ run_one()
interpreter=$(head -n 1 "$TEST" | cut -c 3-)
cmd="$stdbuf $interpreter ./$BASENAME_TEST"
else
- echo "not ok $test_num $TEST_HDR_MSG"
+ report_failure "$test_num $TEST_HDR_MSG"
return
fi
fi
@@ -137,9 +143,9 @@ run_one()
echo "ok $test_num $TEST_HDR_MSG # SKIP"
elif [ $rc -eq $timeout_rc ]; then \
echo "#"
- echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT $kselftest_timeout seconds"
+ report_failure "$test_num $TEST_HDR_MSG # TIMEOUT $kselftest_timeout seconds"
else
- echo "not ok $test_num $TEST_HDR_MSG # exit=$rc"
+ report_failure "$test_num $TEST_HDR_MSG # exit=$rc"
fi)
cd - >/dev/null
fi
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index 46991a029f7c..8ec0cb64ad94 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -10,7 +10,11 @@ SYSFS_KERNEL_DIR="/sys/kernel"
SYSFS_KLP_DIR="$SYSFS_KERNEL_DIR/livepatch"
SYSFS_DEBUG_DIR="$SYSFS_KERNEL_DIR/debug"
SYSFS_KPROBES_DIR="$SYSFS_DEBUG_DIR/kprobes"
-SYSFS_TRACING_DIR="$SYSFS_DEBUG_DIR/tracing"
+if [[ -e /sys/kernel/tracing/trace ]]; then
+ SYSFS_TRACING_DIR="$SYSFS_KERNEL_DIR/tracing"
+else
+ SYSFS_TRACING_DIR="$SYSFS_DEBUG_DIR/tracing"
+fi
# Kselftest framework requirement - SKIP code is 4
ksft_skip=4
diff --git a/tools/testing/selftests/nolibc/Makefile.nolibc b/tools/testing/selftests/nolibc/Makefile.nolibc
index 9416ae952e18..f9d43cbdc894 100644
--- a/tools/testing/selftests/nolibc/Makefile.nolibc
+++ b/tools/testing/selftests/nolibc/Makefile.nolibc
@@ -225,6 +225,7 @@ CFLAGS_mipsn32le = -EL -mabi=n32 -fPIC -march=mips64r2
CFLAGS_mipsn32be = -EB -mabi=n32 -march=mips64r6
CFLAGS_mips64le = -EL -mabi=64 -march=mips64r6
CFLAGS_mips64be = -EB -mabi=64 -march=mips64r2
+CFLAGS_loongarch = $(if $(LLVM),-fuse-ld=lld)
CFLAGS_sparc32 = $(call cc-option,-m32)
CFLAGS_sh4 = -ml -m4
ifeq ($(origin XARCH),command line)
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
index 29de21595fc9..3c5a226dad3a 100644
--- a/tools/testing/selftests/nolibc/nolibc-test.c
+++ b/tools/testing/selftests/nolibc/nolibc-test.c
@@ -25,6 +25,7 @@
#include <sys/sysmacros.h>
#include <sys/time.h>
#include <sys/timerfd.h>
+#include <sys/uio.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <dirent.h>
@@ -1282,6 +1283,10 @@ int run_syscall(int min, int max)
int proc;
int test;
int tmp;
+ struct iovec iov_one = {
+ .iov_base = &tmp,
+ .iov_len = 1,
+ };
int ret = 0;
void *p1, *p2;
int has_gettid = 1;
@@ -1343,6 +1348,8 @@ int run_syscall(int min, int max)
CASE_TEST(dup3_0); tmp = dup3(0, 100, 0); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
CASE_TEST(dup3_m1); tmp = dup3(-1, 100, 0); EXPECT_SYSER(1, tmp, -1, EBADF); if (tmp != -1) close(tmp); break;
CASE_TEST(execve_root); EXPECT_SYSER(1, execve("/", (char*[]){ [0] = "/", [1] = NULL }, NULL), -1, EACCES); break;
+ CASE_TEST(fchdir_stdin); EXPECT_SYSER(1, fchdir(STDIN_FILENO), -1, ENOTDIR); break;
+ CASE_TEST(fchdir_badfd); EXPECT_SYSER(1, fchdir(-1), -1, EBADF); break;
CASE_TEST(file_stream); EXPECT_SYSZR(1, test_file_stream()); break;
CASE_TEST(fork); EXPECT_SYSZR(1, test_fork(FORK_STANDARD)); break;
CASE_TEST(getdents64_root); EXPECT_SYSNE(1, test_getdents64("/"), -1); break;
@@ -1395,6 +1402,10 @@ int run_syscall(int min, int max)
CASE_TEST(waitpid_child); EXPECT_SYSER(1, waitpid(getpid(), &tmp, WNOHANG), -1, ECHILD); break;
CASE_TEST(write_badf); EXPECT_SYSER(1, write(-1, &tmp, 1), -1, EBADF); break;
CASE_TEST(write_zero); EXPECT_SYSZR(1, write(1, &tmp, 0)); break;
+ CASE_TEST(readv_badf); EXPECT_SYSER(1, readv(-1, &iov_one, 1), -1, EBADF); break;
+ CASE_TEST(readv_zero); EXPECT_SYSZR(1, readv(1, NULL, 0)); break;
+ CASE_TEST(writev_badf); EXPECT_SYSER(1, writev(-1, &iov_one, 1), -1, EBADF); break;
+ CASE_TEST(writev_zero); EXPECT_SYSZR(1, writev(1, NULL, 0)); break;
CASE_TEST(syscall_noargs); EXPECT_SYSEQ(1, syscall(__NR_getpid), getpid()); break;
CASE_TEST(syscall_args); EXPECT_SYSER(1, syscall(__NR_statx, 0, NULL, 0, 0, NULL), -1, EFAULT); break;
CASE_TEST(namespace); EXPECT_SYSZR(euid0 && proc, test_namespace()); break;
@@ -1540,6 +1551,8 @@ int run_stdlib(int min, int max)
CASE_TEST(abs); EXPECT_EQ(1, abs(-10), 10); break;
CASE_TEST(abs_noop); EXPECT_EQ(1, abs(10), 10); break;
CASE_TEST(difftime); EXPECT_ZR(1, test_difftime()); break;
+ CASE_TEST(memchr_foobar6_o); EXPECT_STREQ(1, memchr("foobar", 'o', 6), "oobar"); break;
+ CASE_TEST(memchr_foobar3_b); EXPECT_STRZR(1, memchr("foobar", 'b', 3)); break;
case __LINE__:
return ret; /* must be last */
diff --git a/tools/testing/selftests/nolibc/run-tests.sh b/tools/testing/selftests/nolibc/run-tests.sh
index 210abe715ed9..3917cfb8fdc4 100755
--- a/tools/testing/selftests/nolibc/run-tests.sh
+++ b/tools/testing/selftests/nolibc/run-tests.sh
@@ -169,7 +169,7 @@ test_arch() {
cross_compile=$(realpath "${download_location}gcc-${crosstool_version}-nolibc/${ct_arch}-${ct_abi}/bin/${ct_arch}-${ct_abi}-")
build_dir="${build_location}/${arch}"
if [ "$werror" -ne 0 ]; then
- CFLAGS_EXTRA="$CFLAGS_EXTRA -Werror"
+ CFLAGS_EXTRA="$CFLAGS_EXTRA -Werror -Wl,--fatal-warnings"
fi
MAKE=(make -f Makefile.nolibc -j"${nproc}" XARCH="${arch}" CROSS_COMPILE="${cross_compile}" LLVM="${llvm}" O="${build_dir}")
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-again.sh b/tools/testing/selftests/rcutorture/bin/kvm-again.sh
index 88ca4e368489..b5239b52cb5d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-again.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-again.sh
@@ -31,7 +31,7 @@ fi
if ! cp "$oldrun/scenarios" $T/scenarios.oldrun
then
# Later on, can reconstitute this from console.log files.
- echo Prior run batches file does not exist: $oldrun/batches
+ echo Prior run scenarios file does not exist: $oldrun/scenarios
exit 1
fi
@@ -68,7 +68,7 @@ usage () {
echo " --datestamp string"
echo " --dryrun"
echo " --duration minutes | <seconds>s | <hours>h | <days>d"
- echo " --link hard|soft|copy"
+ echo " --link hard|soft|copy|inplace|inplace-force"
echo " --remote"
echo " --rundir /new/res/path"
echo "Command line: $scriptname $args"
@@ -121,7 +121,7 @@ do
shift
;;
--link)
- checkarg --link "hard|soft|copy" "$#" "$2" 'hard\|soft\|copy' '^--'
+ checkarg --link "hard|soft|copy|inplace|inplace-force" "$#" "$2" 'hard\|soft\|copy\|inplace\|inplace-force' '^--'
case "$2" in
copy)
arg_link="cp -R"
@@ -132,6 +132,14 @@ do
soft)
arg_link="cp -Rs"
;;
+ inplace)
+ arg_link="inplace"
+ rundir="$oldrun"
+ ;;
+ inplace-force)
+ arg_link="inplace-force"
+ rundir="$oldrun"
+ ;;
esac
shift
;;
@@ -172,21 +180,37 @@ fi
echo ---- Re-run results directory: $rundir
-# Copy old run directory tree over and adjust.
-mkdir -p "`dirname "$rundir"`"
-if ! $arg_link "$oldrun" "$rundir"
-then
- echo "Cannot copy from $oldrun to $rundir."
- usage
-fi
-rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
-touch "$rundir/log"
-echo $scriptname $args | tee -a "$rundir/log"
-echo $oldrun > "$rundir/re-run"
-if ! test -d "$rundir/../../bin"
+if test "$oldrun" != "$rundir"
then
- $arg_link "$oldrun/../../bin" "$rundir/../.."
+ # Copy old run directory tree over and adjust.
+ mkdir -p "`dirname "$rundir"`"
+ if ! $arg_link "$oldrun" "$rundir"
+ then
+ echo "Cannot copy from $oldrun to $rundir."
+ usage
+ fi
+ rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
+ touch "$rundir/log"
+ echo $scriptname $args | tee -a "$rundir/log"
+ echo $oldrun > "$rundir/re-run"
+ if ! test -d "$rundir/../../bin"
+ then
+ $arg_link "$oldrun/../../bin" "$rundir/../.."
+ fi
+else
+ # Check for a run having already happened.
+ find "$rundir" -name console.log -print > $T/oldrun-console.log
+ if test -s $T/oldrun-console.log
+ then
+ echo Run already took place in $rundir
+ if test "$arg_link" = inplace
+ then
+ usage
+ fi
+ fi
fi
+
+# Find runs to be done based on their qemu-cmd files.
for i in $rundir/*/qemu-cmd
do
cp "$i" $T
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-series.sh b/tools/testing/selftests/rcutorture/bin/kvm-series.sh
new file mode 100755
index 000000000000..2ff905a1853b
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-series.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Usage: kvm-series.sh config-list commit-id-list [ kvm.sh parameters ]
+#
+# Tests the specified list of unadorned configs ("TREE01 SRCU-P" but not
+# "CFLIST" or "3*TRACE01") and an indication of a set of commits to test,
+# then runs each commit through the specified list of commits using kvm.sh.
+# The runs are grouped into a -series/config/commit directory tree.
+# Each run defaults to a duration of one minute.
+#
+# Run in top-level Linux source directory. Please note that this is in
+# no way a replacement for "git bisect"!!!
+#
+# This script is intended to replace kvm-check-branches.sh by providing
+# ease of use and faster execution.
+
+T="`mktemp -d ${TMPDIR-/tmp}/kvm-series.sh.XXXXXX`"
+trap 'rm -rf $T' 0
+
+scriptname=$0
+args="$*"
+
+config_list="${1}"
+if test -z "${config_list}"
+then
+ echo "$0: Need a quoted list of --config arguments for first argument."
+ exit 1
+fi
+if test -z "${config_list}" || echo "${config_list}" | grep -q '\*'
+then
+ echo "$0: Repetition ('*') not allowed in config list."
+ exit 1
+fi
+
+commit_list="${2}"
+if test -z "${commit_list}"
+then
+ echo "$0: Need a list of commits (e.g., HEAD^^^..) for second argument."
+ exit 2
+fi
+git log --pretty=format:"%h" "${commit_list}" > $T/commits
+ret=$?
+if test "${ret}" -ne 0
+then
+ echo "$0: Invalid commit list ('${commit_list}')."
+ exit 2
+fi
+sha1_list=`cat $T/commits`
+
+shift
+shift
+
+RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
+PATH=${RCUTORTURE}/bin:$PATH; export PATH
+. functions.sh
+
+ret=0
+nfail=0
+nsuccess=0
+faillist=
+successlist=
+cursha1="`git rev-parse --abbrev-ref HEAD`"
+ds="`date +%Y.%m.%d-%H.%M.%S`-series"
+startdate="`date`"
+starttime="`get_starttime`"
+
+echo " --- " $scriptname $args | tee -a $T/log
+echo " --- Results directory: " $ds | tee -a $T/log
+
+for config in ${config_list}
+do
+ sha_n=0
+ for sha in ${sha1_list}
+ do
+ sha1=${sha_n}.${sha} # Enable "sort -k1nr" to list commits in order.
+ echo Starting ${config}/${sha1} at `date` | tee -a $T/log
+ git checkout "${sha}"
+ time tools/testing/selftests/rcutorture/bin/kvm.sh --configs "$config" --datestamp "$ds/${config}/${sha1}" --duration 1 "$@"
+ curret=$?
+ if test "${curret}" -ne 0
+ then
+ nfail=$((nfail+1))
+ faillist="$faillist ${config}/${sha1}(${curret})"
+ else
+ nsuccess=$((nsuccess+1))
+ successlist="$successlist ${config}/${sha1}"
+ # Successful run, so remove large files.
+ rm -f ${RCUTORTURE}/$ds/${config}/${sha1}/{vmlinux,bzImage,System.map,Module.symvers}
+ fi
+ if test "${ret}" -eq 0
+ then
+ ret=${curret}
+ fi
+ sha_n=$((sha_n+1))
+ done
+done
+git checkout "${cursha1}"
+
+echo ${nsuccess} SUCCESSES: | tee -a $T/log
+echo ${successlist} | fmt | tee -a $T/log
+echo | tee -a $T/log
+echo ${nfail} FAILURES: | tee -a $T/log
+echo ${faillist} | fmt | tee -a $T/log
+if test -n "${faillist}"
+then
+ echo | tee -a $T/log
+ echo Failures across commits: | tee -a $T/log
+ echo ${faillist} | tr ' ' '\012' | sed -e 's,^[^/]*/,,' -e 's/([0-9]*)//' |
+ sort | uniq -c | sort -k2n | tee -a $T/log
+fi
+echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
+echo Summary: Successes: ${nsuccess} Failures: ${nfail} | tee -a $T/log
+cp $T/log tools/testing/selftests/rcutorture/res/${ds}
+
+exit "${ret}"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 617cba339d28..fff15821c44c 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -199,7 +199,7 @@ do
fi
;;
--kconfig|--kconfigs)
- checkarg --kconfig "(Kconfig options)" $# "$2" '^\(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\|"[^"]*"\)\( \+\(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\|"[^"]*"\)\)* *$' '^error$'
+ checkarg --kconfig "(Kconfig options)" $# "$2" '^\(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|-\?[0-9]\+\|"[^"]*"\)\( \+\(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|-\?[0-9]\+\|"[^"]*"\)\)* *$' '^error$'
TORTURE_KCONFIG_ARG="`echo "$TORTURE_KCONFIG_ARG $2" | sed -e 's/^ *//' -e 's/ *$//'`"
shift
;;
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
index dc4985064b3a..67caf4276bb0 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
@@ -16,3 +16,4 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_RCU_EXPERT=y
CONFIG_RCU_EQS_DEBUG=y
CONFIG_RCU_LAZY=y
+CONFIG_RCU_DYNTICKS_TORTURE=y
diff --git a/tools/testing/selftests/run_kselftest.sh b/tools/testing/selftests/run_kselftest.sh
index 0443beacf362..d4be97498b32 100755
--- a/tools/testing/selftests/run_kselftest.sh
+++ b/tools/testing/selftests/run_kselftest.sh
@@ -33,6 +33,7 @@ Usage: $0 [OPTIONS]
-c | --collection COLLECTION Run all tests from COLLECTION
-l | --list List the available collection:test entries
-d | --dry-run Don't actually run any tests
+ -f | --no-error-on-fail Don't exit with an error just because tests failed
-n | --netns Run each test in namespace
-h | --help Show this usage info
-o | --override-timeout Number of seconds after which we timeout
@@ -44,6 +45,7 @@ COLLECTIONS=""
TESTS=""
dryrun=""
kselftest_override_timeout=""
+ERROR_ON_FAIL=true
while true; do
case "$1" in
-s | --summary)
@@ -65,6 +67,9 @@ while true; do
-d | --dry-run)
dryrun="echo"
shift ;;
+ -f | --no-error-on-fail)
+ ERROR_ON_FAIL=false
+ shift ;;
-n | --netns)
RUN_IN_NETNS=1
shift ;;
@@ -105,9 +110,18 @@ if [ -n "$TESTS" ]; then
available="$(echo "$valid" | sed -e 's/ /\n/g')"
fi
+kselftest_failures_file="$(mktemp --tmpdir kselftest-failures-XXXXXX)"
+export kselftest_failures_file
+
collections=$(echo "$available" | cut -d: -f1 | sort | uniq)
for collection in $collections ; do
[ -w /dev/kmsg ] && echo "kselftest: Running tests in $collection" >> /dev/kmsg
tests=$(echo "$available" | grep "^$collection:" | cut -d: -f2)
($dryrun cd "$collection" && $dryrun run_many $tests)
done
+
+failures="$(cat "$kselftest_failures_file")"
+rm "$kselftest_failures_file"
+if "$ERROR_ON_FAIL" && [ "$failures" ]; then
+ exit 1
+fi
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
index 9d9d6b4c38b0..5fe45f9c5f8f 100644
--- a/tools/testing/selftests/sched_ext/Makefile
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -174,6 +174,7 @@ auto-test-targets := \
minimal \
numa \
allowed_cpus \
+ peek_dsq \
prog_run \
reload_loop \
select_cpu_dfl \
diff --git a/tools/testing/selftests/sched_ext/peek_dsq.bpf.c b/tools/testing/selftests/sched_ext/peek_dsq.bpf.c
new file mode 100644
index 000000000000..a3faf5bb49d6
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/peek_dsq.bpf.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A BPF program for testing DSQ operations and peek in particular.
+ *
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Ryan Newton <ryan.newton@alum.mit.edu>
+ */
+
+#include <scx/common.bpf.h>
+#include <scx/compat.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei); /* Error handling */
+
+#define MAX_SAMPLES 100
+#define MAX_CPUS 512
+#define DSQ_POOL_SIZE 8
+int max_samples = MAX_SAMPLES;
+int max_cpus = MAX_CPUS;
+int dsq_pool_size = DSQ_POOL_SIZE;
+
+/* Global variables to store test results */
+int dsq_peek_result1 = -1;
+long dsq_inserted_pid = -1;
+int insert_test_cpu = -1; /* Set to the cpu that performs the test */
+long dsq_peek_result2 = -1;
+long dsq_peek_result2_pid = -1;
+long dsq_peek_result2_expected = -1;
+int test_dsq_id = 1234; /* Use a simple ID like create_dsq example */
+int real_dsq_id = 1235; /* DSQ for normal operation */
+int enqueue_count = -1;
+int dispatch_count = -1;
+bool debug_ksym_exists;
+
+/* DSQ pool for stress testing */
+int dsq_pool_base_id = 2000;
+int phase1_complete = -1;
+long total_peek_attempts = -1;
+long successful_peeks = -1;
+
+/* BPF map for sharing peek results with userspace */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, MAX_SAMPLES);
+ __type(key, u32);
+ __type(value, long);
+} peek_results SEC(".maps");
+
+static int get_random_dsq_id(void)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ return dsq_pool_base_id + (time % DSQ_POOL_SIZE);
+}
+
+static void record_peek_result(long pid)
+{
+ u32 slot_key;
+ long *slot_pid_ptr;
+ int ix;
+
+ if (pid <= 0)
+ return;
+
+ /* Find an empty slot or one with the same PID */
+ bpf_for(ix, 0, 10) {
+ slot_key = (pid + ix) % MAX_SAMPLES;
+ slot_pid_ptr = bpf_map_lookup_elem(&peek_results, &slot_key);
+ if (!slot_pid_ptr)
+ continue;
+
+ if (*slot_pid_ptr == -1 || *slot_pid_ptr == pid) {
+ *slot_pid_ptr = pid;
+ break;
+ }
+ }
+}
+
+/* Scan all DSQs in the pool and try to move a task to local */
+static int scan_dsq_pool(void)
+{
+ struct task_struct *task;
+ int moved = 0;
+ int i;
+
+ bpf_for(i, 0, DSQ_POOL_SIZE) {
+ int dsq_id = dsq_pool_base_id + i;
+
+ total_peek_attempts++;
+
+ task = __COMPAT_scx_bpf_dsq_peek(dsq_id);
+ if (task) {
+ successful_peeks++;
+ record_peek_result(task->pid);
+
+ /* Try to move this task to local */
+ if (!moved && scx_bpf_dsq_move_to_local(dsq_id) == 0) {
+ moved = 1;
+ break;
+ }
+ }
+ }
+ return moved;
+}
+
+/* Struct_ops scheduler for testing DSQ peek operations */
+void BPF_STRUCT_OPS(peek_dsq_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ struct task_struct *peek_result;
+ int last_insert_test_cpu, cpu;
+
+ enqueue_count++;
+ cpu = bpf_get_smp_processor_id();
+ last_insert_test_cpu = __sync_val_compare_and_swap(&insert_test_cpu, -1, cpu);
+
+ /* Phase 1: Simple insert-then-peek test (only on first task) */
+ if (last_insert_test_cpu == -1) {
+ bpf_printk("peek_dsq_enqueue beginning phase 1 peek test on cpu %d", cpu);
+
+ /* Test 1: Peek empty DSQ - should return NULL */
+ peek_result = __COMPAT_scx_bpf_dsq_peek(test_dsq_id);
+ dsq_peek_result1 = (long)peek_result; /* Should be 0 (NULL) */
+
+ /* Test 2: Insert task into test DSQ for testing in dispatch callback */
+ dsq_inserted_pid = p->pid;
+ scx_bpf_dsq_insert(p, test_dsq_id, 0, enq_flags);
+ dsq_peek_result2_expected = (long)p; /* Expected the task we just inserted */
+ } else if (!phase1_complete) {
+ /* Still in phase 1, use real DSQ */
+ scx_bpf_dsq_insert(p, real_dsq_id, 0, enq_flags);
+ } else {
+ /* Phase 2: Random DSQ insertion for stress testing */
+ int random_dsq_id = get_random_dsq_id();
+
+ scx_bpf_dsq_insert(p, random_dsq_id, 0, enq_flags);
+ }
+}
+
+void BPF_STRUCT_OPS(peek_dsq_dispatch, s32 cpu, struct task_struct *prev)
+{
+ dispatch_count++;
+
+ /* Phase 1: Complete the simple peek test if we inserted a task but
+ * haven't tested peek yet
+ */
+ if (insert_test_cpu == cpu && dsq_peek_result2 == -1) {
+ struct task_struct *peek_result;
+
+ bpf_printk("peek_dsq_dispatch completing phase 1 peek test on cpu %d", cpu);
+
+ /* Test 3: Peek DSQ after insert - should return the task we inserted */
+ peek_result = __COMPAT_scx_bpf_dsq_peek(test_dsq_id);
+ /* Store the PID of the peeked task for comparison */
+ dsq_peek_result2 = (long)peek_result;
+ dsq_peek_result2_pid = peek_result ? peek_result->pid : -1;
+
+ /* Now consume the task since we've peeked at it */
+ scx_bpf_dsq_move_to_local(test_dsq_id);
+
+ /* Mark phase 1 as complete */
+ phase1_complete = 1;
+ bpf_printk("Phase 1 complete, starting phase 2 stress testing");
+ } else if (!phase1_complete) {
+ /* Still in phase 1, use real DSQ */
+ scx_bpf_dsq_move_to_local(real_dsq_id);
+ } else {
+ /* Phase 2: Scan all DSQs in the pool and try to move a task */
+ if (!scan_dsq_pool()) {
+ /* No tasks found in DSQ pool, fall back to real DSQ */
+ scx_bpf_dsq_move_to_local(real_dsq_id);
+ }
+ }
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(peek_dsq_init)
+{
+ s32 err;
+ int i;
+
+ /* Always set debug values so we can see which version we're using */
+ debug_ksym_exists = bpf_ksym_exists(scx_bpf_dsq_peek) ? 1 : 0;
+
+ /* Initialize state first */
+ insert_test_cpu = -1;
+ enqueue_count = 0;
+ dispatch_count = 0;
+ phase1_complete = 0;
+ total_peek_attempts = 0;
+ successful_peeks = 0;
+
+ /* Create the test and real DSQs */
+ err = scx_bpf_create_dsq(test_dsq_id, -1);
+ if (err) {
+ scx_bpf_error("Failed to create DSQ %d: %d", test_dsq_id, err);
+ return err;
+ }
+ err = scx_bpf_create_dsq(real_dsq_id, -1);
+ if (err) {
+ scx_bpf_error("Failed to create DSQ %d: %d", test_dsq_id, err);
+ return err;
+ }
+
+ /* Create the DSQ pool for stress testing */
+ bpf_for(i, 0, DSQ_POOL_SIZE) {
+ int dsq_id = dsq_pool_base_id + i;
+
+ err = scx_bpf_create_dsq(dsq_id, -1);
+ if (err) {
+ scx_bpf_error("Failed to create DSQ pool entry %d: %d", dsq_id, err);
+ return err;
+ }
+ }
+
+ /* Initialize the peek results map */
+ bpf_for(i, 0, MAX_SAMPLES) {
+ u32 key = i;
+ long pid = -1;
+
+ bpf_map_update_elem(&peek_results, &key, &pid, BPF_ANY);
+ }
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(peek_dsq_exit, struct scx_exit_info *ei)
+{
+ int i;
+
+ /* Destroy the primary DSQs */
+ scx_bpf_destroy_dsq(test_dsq_id);
+ scx_bpf_destroy_dsq(real_dsq_id);
+
+ /* Destroy the DSQ pool */
+ bpf_for(i, 0, DSQ_POOL_SIZE) {
+ int dsq_id = dsq_pool_base_id + i;
+
+ scx_bpf_destroy_dsq(dsq_id);
+ }
+
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops peek_dsq_ops = {
+ .enqueue = (void *)peek_dsq_enqueue,
+ .dispatch = (void *)peek_dsq_dispatch,
+ .init = (void *)peek_dsq_init,
+ .exit = (void *)peek_dsq_exit,
+ .name = "peek_dsq",
+};
diff --git a/tools/testing/selftests/sched_ext/peek_dsq.c b/tools/testing/selftests/sched_ext/peek_dsq.c
new file mode 100644
index 000000000000..a717384a3224
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/peek_dsq.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for DSQ operations including create, destroy, and peek operations.
+ *
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Ryan Newton <ryan.newton@alum.mit.edu>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <string.h>
+#include <sched.h>
+#include "peek_dsq.bpf.skel.h"
+#include "scx_test.h"
+
+#define NUM_WORKERS 4
+
+static bool workload_running = true;
+static pthread_t workload_threads[NUM_WORKERS];
+
+/**
+ * Background workload thread that sleeps and wakes rapidly to exercise
+ * the scheduler's enqueue operations and ensure DSQ operations get tested.
+ */
+static void *workload_thread_fn(void *arg)
+{
+ while (workload_running) {
+ /* Sleep for a very short time to trigger scheduler activity */
+ usleep(1000); /* 1ms sleep */
+ /* Yield to ensure we go through the scheduler */
+ sched_yield();
+ }
+ return NULL;
+}
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct peek_dsq *skel;
+
+ skel = peek_dsq__open();
+ SCX_FAIL_IF(!skel, "Failed to open");
+ SCX_ENUM_INIT(skel);
+ SCX_FAIL_IF(peek_dsq__load(skel), "Failed to load skel");
+
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static int print_observed_pids(struct bpf_map *map, int max_samples, const char *dsq_name)
+{
+ long count = 0;
+
+ printf("Observed %s DSQ peek pids:\n", dsq_name);
+ for (int i = 0; i < max_samples; i++) {
+ long pid;
+ int err;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(map), &i, &pid);
+ if (err == 0) {
+ if (pid == 0) {
+ printf(" Sample %d: NULL peek\n", i);
+ } else if (pid > 0) {
+ printf(" Sample %d: pid %ld\n", i, pid);
+ count++;
+ }
+ } else {
+ printf(" Sample %d: error reading pid (err=%d)\n", i, err);
+ }
+ }
+ printf("Observed ~%ld pids in the %s DSQ(s)\n", count, dsq_name);
+ return count;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct peek_dsq *skel = ctx;
+ bool failed = false;
+ int seconds = 3;
+ int err;
+
+ /* Enable the scheduler to test DSQ operations */
+ printf("Enabling scheduler to test DSQ insert operations...\n");
+
+ struct bpf_link *link =
+ bpf_map__attach_struct_ops(skel->maps.peek_dsq_ops);
+
+ if (!link) {
+ SCX_ERR("Failed to attach struct_ops");
+ return SCX_TEST_FAIL;
+ }
+
+ printf("Starting %d background workload threads...\n", NUM_WORKERS);
+ workload_running = true;
+ for (int i = 0; i < NUM_WORKERS; i++) {
+ err = pthread_create(&workload_threads[i], NULL, workload_thread_fn, NULL);
+ if (err) {
+ SCX_ERR("Failed to create workload thread %d: %s", i, strerror(err));
+ /* Stop already created threads */
+ workload_running = false;
+ for (int j = 0; j < i; j++)
+ pthread_join(workload_threads[j], NULL);
+ bpf_link__destroy(link);
+ return SCX_TEST_FAIL;
+ }
+ }
+
+ printf("Waiting for enqueue events.\n");
+ sleep(seconds);
+ while (skel->data->enqueue_count <= 0) {
+ printf(".");
+ fflush(stdout);
+ sleep(1);
+ seconds++;
+ if (seconds >= 30) {
+ printf("\n\u2717 Timeout waiting for enqueue events\n");
+ /* Stop workload threads and cleanup */
+ workload_running = false;
+ for (int i = 0; i < NUM_WORKERS; i++)
+ pthread_join(workload_threads[i], NULL);
+ bpf_link__destroy(link);
+ return SCX_TEST_FAIL;
+ }
+ }
+
+ workload_running = false;
+ for (int i = 0; i < NUM_WORKERS; i++) {
+ err = pthread_join(workload_threads[i], NULL);
+ if (err) {
+ SCX_ERR("Failed to join workload thread %d: %s", i, strerror(err));
+ bpf_link__destroy(link);
+ return SCX_TEST_FAIL;
+ }
+ }
+ printf("Background workload threads stopped.\n");
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE));
+
+ /* Detach the scheduler */
+ bpf_link__destroy(link);
+
+ printf("Enqueue/dispatch count over %d seconds: %d / %d\n", seconds,
+ skel->data->enqueue_count, skel->data->dispatch_count);
+ printf("Debug: ksym_exists=%d\n",
+ skel->bss->debug_ksym_exists);
+
+ /* Check DSQ insert result */
+ printf("DSQ insert test done on cpu: %d\n", skel->data->insert_test_cpu);
+ if (skel->data->insert_test_cpu != -1)
+ printf("\u2713 DSQ insert succeeded !\n");
+ else {
+ printf("\u2717 DSQ insert failed or not attempted\n");
+ failed = true;
+ }
+
+ /* Check DSQ peek results */
+ printf(" DSQ peek result 1 (before insert): %d\n",
+ skel->data->dsq_peek_result1);
+ if (skel->data->dsq_peek_result1 == 0)
+ printf("\u2713 DSQ peek verification success: peek returned NULL!\n");
+ else {
+ printf("\u2717 DSQ peek verification failed\n");
+ failed = true;
+ }
+
+ printf(" DSQ peek result 2 (after insert): %ld\n",
+ skel->data->dsq_peek_result2);
+ printf(" DSQ peek result 2, expected: %ld\n",
+ skel->data->dsq_peek_result2_expected);
+ if (skel->data->dsq_peek_result2 ==
+ skel->data->dsq_peek_result2_expected)
+ printf("\u2713 DSQ peek verification success: peek returned the inserted task!\n");
+ else {
+ printf("\u2717 DSQ peek verification failed\n");
+ failed = true;
+ }
+
+ printf(" Inserted test task -> pid: %ld\n", skel->data->dsq_inserted_pid);
+ printf(" DSQ peek result 2 -> pid: %ld\n", skel->data->dsq_peek_result2_pid);
+
+ int pid_count;
+
+ pid_count = print_observed_pids(skel->maps.peek_results,
+ skel->data->max_samples, "DSQ pool");
+ printf("Total non-null peek observations: %ld out of %ld\n",
+ skel->data->successful_peeks, skel->data->total_peek_attempts);
+
+ if (skel->bss->debug_ksym_exists && pid_count == 0) {
+ printf("\u2717 DSQ pool test failed: no successful peeks in native mode\n");
+ failed = true;
+ }
+ if (skel->bss->debug_ksym_exists && pid_count > 0)
+ printf("\u2713 DSQ pool test success: observed successful peeks in native mode\n");
+
+ if (failed)
+ return SCX_TEST_FAIL;
+ else
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct peek_dsq *skel = ctx;
+
+ if (workload_running) {
+ workload_running = false;
+ for (int i = 0; i < NUM_WORKERS; i++)
+ pthread_join(workload_threads[i], NULL);
+ }
+
+ peek_dsq__destroy(skel);
+}
+
+struct scx_test peek_dsq = {
+ .name = "peek_dsq",
+ .description =
+ "Test DSQ create/destroy operations and future peek functionality",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&peek_dsq)