diff --git a/AGENTS.md b/AGENTS.md index 4e31e0e65..0806bbc82 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -126,9 +126,13 @@ Example format: ### Imports +**For standard library modules:** - Use namespace imports: `import enum` instead of `from enum import Enum` - For typing, use `import typing as t` and access via namespace: `t.NamedTuple`, etc. -- Use `from __future__ import annotations` at the top of all Python files + +**For third-party packages:** Use idiomatic import styles for each library (e.g., `from pygments.token import Token` is fine). + +**Always:** Use `from __future__ import annotations` at the top of all Python files. ### Docstrings diff --git a/docs/_ext/__init__.py b/docs/_ext/__init__.py new file mode 100644 index 000000000..7a9e6898b --- /dev/null +++ b/docs/_ext/__init__.py @@ -0,0 +1,3 @@ +"""Sphinx extensions for vcspull documentation.""" + +from __future__ import annotations diff --git a/docs/_ext/argparse_lexer.py b/docs/_ext/argparse_lexer.py new file mode 100644 index 000000000..64fcf083f --- /dev/null +++ b/docs/_ext/argparse_lexer.py @@ -0,0 +1,367 @@ +"""Pygments lexers for argparse help output. + +This module provides custom Pygments lexers for highlighting argparse-generated +command-line help text, including usage lines, section headers, and full help output. + +Three lexer classes are provided: +- ArgparseUsageLexer: For usage lines only +- ArgparseHelpLexer: For full -h output (delegates usage to ArgparseUsageLexer) +- ArgparseLexer: Smart auto-detecting wrapper +""" + +from __future__ import annotations + +from pygments.lexer import RegexLexer, bygroups, include +from pygments.token import Generic, Name, Operator, Punctuation, Text, Whitespace + + +class ArgparseUsageLexer(RegexLexer): + """Lexer for argparse usage lines only. + + Handles patterns like: + - usage: PROG [-h] [--foo FOO] bar {a,b,c} + - Mutually exclusive: [-a | -b], (--foo | --bar) + - Choices: {json,yaml,table} + - Variadic: FILE ..., [FILE ...], [--foo [FOO]] + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = ArgparseUsageLexer() + >>> tokens = list(lexer.get_tokens("usage: cmd [-h]")) + >>> tokens[0] + (Token.Generic.Heading, 'usage:') + >>> tokens[2] + (Token.Name.Label, 'cmd') + """ + + name = "Argparse Usage" + aliases = ["argparse-usage"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-argparse-usage"] # noqa: RUF012 + + tokens = { # noqa: RUF012 + "root": [ + # "usage:" at start of line + (r"^(usage:)(\s+)", bygroups(Generic.Heading, Whitespace)), # type: ignore[no-untyped-call] + # Continuation lines (leading whitespace for wrapped usage) + (r"^(\s+)(?=\S)", Whitespace), + include("inline"), + ], + "inline": [ + # Whitespace + (r"\s+", Whitespace), + # Ellipsis for variadic args (before other patterns) + (r"\.\.\.", Punctuation), + # Long options with = value (e.g., --log-level=VALUE) + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with space-separated value (e.g., -S socket-path) + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # Opening brace - enter choices state + (r"\{", Punctuation, "choices"), + # Opening bracket - enter optional state + (r"\[", Punctuation, "optional"), + # Closing bracket (fallback for unmatched) + (r"\]", Punctuation), + # Opening paren - enter required mutex state + (r"\(", Punctuation, "required"), + # Closing paren (fallback for unmatched) + (r"\)", Punctuation), + # Choice separator (pipe) for mutex groups + (r"\|", Operator), + # UPPERCASE meta-variables (COMMAND, FILE, PATH) + (r"\b[A-Z][A-Z0-9_]*\b", Name.Variable), + # Positional/command names (lowercase with dashes) + (r"\b[a-z][-a-z0-9]*\b", Name.Label), + # Catch-all for any other text + (r"[^\s\[\]|(){},]+", Text), + ], + "optional": [ + # Nested optional bracket + (r"\[", Punctuation, "#push"), + # End optional + (r"\]", Punctuation, "#pop"), + # Contents use inline rules + include("inline"), + ], + "required": [ + # Nested required paren + (r"\(", Punctuation, "#push"), + # End required + (r"\)", Punctuation, "#pop"), + # Contents use inline rules + include("inline"), + ], + "choices": [ + # Choice values (comma-separated inside braces) + (r"[a-zA-Z0-9][-a-zA-Z0-9_]*", Name.Constant), + # Comma separator + (r",", Punctuation), + # End choices + (r"\}", Punctuation, "#pop"), + # Whitespace + (r"\s+", Whitespace), + ], + } + + +class ArgparseHelpLexer(RegexLexer): + """Lexer for full argparse -h help output. + + Handles: + - Usage lines (delegates to ArgparseUsageLexer patterns) + - Section headers (positional arguments:, options:, etc.) + - Option entries with help text + - Indented descriptions + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = ArgparseHelpLexer() + >>> tokens = list(lexer.get_tokens("positional arguments:")) + >>> any(t[0] == Token.Generic.Subheading for t in tokens) + True + >>> tokens = list(lexer.get_tokens(" -h, --help show help")) + >>> any(t[0] == Token.Name.Attribute for t in tokens) + True + """ + + name = "Argparse Help" + aliases = ["argparse-help"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-argparse-help"] # noqa: RUF012 + + tokens = { # noqa: RUF012 + "root": [ + # "usage:" line - switch to usage mode + (r"^(usage:)(\s+)", bygroups(Generic.Heading, Whitespace), "usage"), # type: ignore[no-untyped-call] + # Section headers (e.g., "positional arguments:", "options:") + (r"^([a-zA-Z][-a-zA-Z0-9_ ]*:)\s*$", Generic.Subheading), + # Option entry lines (indented with spaces/tabs, not just newlines) + (r"^([ \t]+)", Whitespace, "option_line"), + # Continuation of usage (leading spaces/tabs followed by content) + (r"^([ \t]+)(?=\S)", Whitespace), + # Anything else (must match at least one char to avoid infinite loop) + (r".+\n?", Text), + # Standalone newlines + (r"\n", Whitespace), + ], + "usage": [ + # End of usage on blank line or section header + (r"\n(?=[a-zA-Z][-a-zA-Z0-9_ ]*:\s*$)", Text, "#pop"), + (r"\n(?=\n)", Text, "#pop"), + # Usage content - use ArgparseUsageLexer inline rules + include("usage_inline"), + # Line continuation + (r"\n", Text), + ], + "usage_inline": [ + # Whitespace + (r"\s+", Whitespace), + # Ellipsis for variadic args + (r"\.\.\.", Punctuation), + # Long options with = value + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with value + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # Choices in braces + (r"\{", Punctuation, "choices"), + # Optional brackets + (r"\[", Punctuation, "optional"), + (r"\]", Punctuation), + # Required parens (mutex) + (r"\(", Punctuation, "required"), + (r"\)", Punctuation), + # Pipe for mutex + (r"\|", Operator), + # UPPERCASE metavars + (r"\b[A-Z][A-Z0-9_]*\b", Name.Variable), + # Command/positional names + (r"\b[a-z][-a-z0-9]*\b", Name.Label), + # Other text + (r"[^\s\[\]|(){},\n]+", Text), + ], + "option_line": [ + # Short option with comma (e.g., "-h, --help") + ( + r"(-[a-zA-Z0-9])(,)(\s*)(--[a-zA-Z0-9][-a-zA-Z0-9]*)", + bygroups(Name.Attribute, Punctuation, Whitespace, Name.Tag), # type: ignore[no-untyped-call] + ), + # Long options with = value + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options with space-separated metavar + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(\s+)([A-Z][A-Z0-9_]+)", + bygroups(Name.Tag, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with metavar + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]+)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # Choices in braces + (r"\{", Punctuation, "option_choices"), + # Help text (everything after double space or large gap) + (r"([ \t]{2,})(.+)$", bygroups(Whitespace, Text)), # type: ignore[no-untyped-call] + # End of line - MUST come before \s+ to properly pop on newlines + (r"\n", Text, "#pop"), + # Other whitespace (spaces/tabs only, not newlines) + (r"[ \t]+", Whitespace), + # UPPERCASE metavars + (r"\b[A-Z][A-Z0-9_]*\b", Name.Variable), + # Anything else on the line + (r"[^\s\n]+", Text), + ], + "optional": [ + (r"\[", Punctuation, "#push"), + (r"\]", Punctuation, "#pop"), + include("usage_inline"), + ], + "required": [ + (r"\(", Punctuation, "#push"), + (r"\)", Punctuation, "#pop"), + include("usage_inline"), + ], + "choices": [ + (r"[a-zA-Z0-9][-a-zA-Z0-9_]*", Name.Constant), + (r",", Punctuation), + (r"\}", Punctuation, "#pop"), + (r"\s+", Whitespace), + ], + "option_choices": [ + (r"[a-zA-Z0-9][-a-zA-Z0-9_]*", Name.Constant), + (r",", Punctuation), + (r"\}", Punctuation, "#pop"), + (r"\s+", Whitespace), + ], + } + + +class ArgparseLexer(ArgparseHelpLexer): + """Smart auto-detecting lexer for argparse output. + + Inherits from ArgparseHelpLexer to properly handle Pygments' metaclass + token processing. Using inheritance (not token dict copying) avoids + shared mutable state that causes memory corruption. + + This is the recommended lexer for general argparse highlighting. + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = ArgparseLexer() + + Usage line detection: + + >>> tokens = list(lexer.get_tokens("usage: cmd [-h]")) + >>> tokens[0] + (Token.Generic.Heading, 'usage:') + + Section header detection: + + >>> tokens = list(lexer.get_tokens("positional arguments:")) + >>> any(t[0] == Token.Generic.Subheading for t in tokens) + True + + Option highlighting: + + >>> tokens = list(lexer.get_tokens("-h")) + >>> any(t[0] == Token.Name.Attribute and t[1] == '-h' for t in tokens) + True + """ + + name = "Argparse" + aliases = ["argparse"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-argparse"] # noqa: RUF012 + + # Tokens inherited from ArgparseHelpLexer - do NOT redefine or copy + + +def tokenize_argparse(text: str) -> list[tuple[str, str]]: + """Tokenize argparse text and return list of (token_type, value) tuples. + + Parameters + ---------- + text : str + Argparse help or usage text to tokenize. + + Returns + ------- + list[tuple[str, str]] + List of (token_type_name, text_value) tuples. + + Examples + -------- + >>> result = tokenize_argparse("usage: cmd [-h]") + >>> result[0] + ('Token.Generic.Heading', 'usage:') + >>> result[2] + ('Token.Name.Label', 'cmd') + + >>> result = tokenize_argparse("positional arguments:") + >>> any(t == ('Token.Generic.Subheading', 'positional arguments:') for t in result) + True + """ + lexer = ArgparseLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] + + +def tokenize_usage(text: str) -> list[tuple[str, str]]: + """Tokenize usage text and return list of (token_type, value) tuples. + + Parameters + ---------- + text : str + CLI usage text to tokenize. + + Returns + ------- + list[tuple[str, str]] + List of (token_type_name, text_value) tuples. + + Examples + -------- + >>> result = tokenize_usage("usage: cmd [-h]") + >>> result[0] + ('Token.Generic.Heading', 'usage:') + >>> result[2] + ('Token.Name.Label', 'cmd') + >>> result[4] + ('Token.Punctuation', '[') + >>> result[5] + ('Token.Name.Attribute', '-h') + """ + lexer = ArgparseUsageLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] diff --git a/docs/_ext/argparse_roles.py b/docs/_ext/argparse_roles.py new file mode 100644 index 000000000..3a71f7d54 --- /dev/null +++ b/docs/_ext/argparse_roles.py @@ -0,0 +1,370 @@ +"""Docutils inline roles for CLI/argparse highlighting. + +This module provides custom docutils roles for inline highlighting of CLI +elements in reStructuredText and MyST documentation. + +Available roles: +- :cli-option: - CLI options (--verbose, -h) +- :cli-metavar: - Metavar placeholders (FILE, PATH) +- :cli-command: - Command names (sync, add) +- :cli-default: - Default values (None, "default") +- :cli-choice: - Choice values (json, yaml) +""" + +from __future__ import annotations + +import typing as t + +from docutils import nodes +from docutils.parsers.rst import roles + +if t.TYPE_CHECKING: + from docutils.parsers.rst.states import Inliner + + +def normalize_options(options: dict[str, t.Any] | None) -> dict[str, t.Any]: + """Normalize role options, converting None to empty dict. + + Parameters + ---------- + options : dict | None + Options passed to the role. + + Returns + ------- + dict + Normalized options dict (never None). + + Examples + -------- + >>> normalize_options(None) + {} + >>> normalize_options({"class": "custom"}) + {'class': 'custom'} + """ + return options if options is not None else {} + + +def cli_option_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI options like --foo or -h. + + Generates a literal node with appropriate CSS classes for styling. + Long options (--foo) get 'cli-option-long', short options (-h) get + 'cli-option-short'. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role (has .reporter, .document). + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_option_role( + ... "cli-option", ":cli-option:`--verbose`", "--verbose", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-option', 'cli-option-long'] + + >>> node_list, messages = cli_option_role( + ... "cli-option", ":cli-option:`-h`", "-h", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-option', 'cli-option-short'] + + >>> node_list, messages = cli_option_role( + ... "cli-option", ":cli-option:`--no-color`", "--no-color", + ... 1, None + ... ) + >>> node_list[0].astext() + '--no-color' + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-option"]) + + if text.startswith("--"): + node["classes"].append("cli-option-long") + elif text.startswith("-"): + node["classes"].append("cli-option-short") + + return [node], [] + + +def cli_metavar_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI metavar placeholders like FILE or PATH. + + Generates a literal node with 'cli-metavar' CSS class for styling. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role. + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_metavar_role( + ... "cli-metavar", ":cli-metavar:`FILE`", "FILE", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-metavar'] + >>> node_list[0].astext() + 'FILE' + + >>> node_list, messages = cli_metavar_role( + ... "cli-metavar", ":cli-metavar:`PATH`", "PATH", + ... 1, None + ... ) + >>> "cli-metavar" in node_list[0]["classes"] + True + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-metavar"]) + return [node], [] + + +def cli_command_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI command names like sync or add. + + Generates a literal node with 'cli-command' CSS class for styling. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role. + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_command_role( + ... "cli-command", ":cli-command:`sync`", "sync", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-command'] + >>> node_list[0].astext() + 'sync' + + >>> node_list, messages = cli_command_role( + ... "cli-command", ":cli-command:`vcspull`", "vcspull", + ... 1, None + ... ) + >>> "cli-command" in node_list[0]["classes"] + True + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-command"]) + return [node], [] + + +def cli_default_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI default values like None or "default". + + Generates a literal node with 'cli-default' CSS class for styling. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role. + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_default_role( + ... "cli-default", ":cli-default:`None`", "None", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-default'] + >>> node_list[0].astext() + 'None' + + >>> node_list, messages = cli_default_role( + ... "cli-default", ':cli-default:`"auto"`', '"auto"', + ... 1, None + ... ) + >>> "cli-default" in node_list[0]["classes"] + True + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-default"]) + return [node], [] + + +def cli_choice_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI choice values like json or yaml. + + Generates a literal node with 'cli-choice' CSS class for styling. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role. + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_choice_role( + ... "cli-choice", ":cli-choice:`json`", "json", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-choice'] + >>> node_list[0].astext() + 'json' + + >>> node_list, messages = cli_choice_role( + ... "cli-choice", ":cli-choice:`yaml`", "yaml", + ... 1, None + ... ) + >>> "cli-choice" in node_list[0]["classes"] + True + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-choice"]) + return [node], [] + + +def register_roles() -> None: + """Register all CLI roles with docutils. + + This function registers the following roles: + - cli-option: For CLI options (--verbose, -h) + - cli-metavar: For metavar placeholders (FILE, PATH) + - cli-command: For command names (sync, add) + - cli-default: For default values (None, "default") + - cli-choice: For choice values (json, yaml) + + Examples + -------- + >>> register_roles() + >>> # Roles are now available in docutils RST parsing + """ + roles.register_local_role("cli-option", cli_option_role) + roles.register_local_role("cli-metavar", cli_metavar_role) + roles.register_local_role("cli-command", cli_command_role) + roles.register_local_role("cli-default", cli_default_role) + roles.register_local_role("cli-choice", cli_choice_role) diff --git a/docs/_ext/cli_usage_lexer.py b/docs/_ext/cli_usage_lexer.py new file mode 100644 index 000000000..40170e317 --- /dev/null +++ b/docs/_ext/cli_usage_lexer.py @@ -0,0 +1,115 @@ +"""Pygments lexer for CLI usage/help output. + +This module provides a custom Pygments lexer for highlighting command-line +usage text typically generated by argparse, getopt, or similar libraries. +""" + +from __future__ import annotations + +from pygments.lexer import RegexLexer, bygroups, include +from pygments.token import Generic, Name, Operator, Punctuation, Text, Whitespace + + +class CLIUsageLexer(RegexLexer): + """Lexer for CLI usage/help text (argparse, etc.). + + Highlights usage patterns including options, arguments, and meta-variables. + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = CLIUsageLexer() + >>> tokens = list(lexer.get_tokens("usage: cmd [-h]")) + >>> tokens[0] + (Token.Generic.Heading, 'usage:') + >>> tokens[2] + (Token.Name.Label, 'cmd') + """ + + name = "CLI Usage" + aliases = ["cli-usage", "usage"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-cli-usage"] # noqa: RUF012 + + tokens = { # noqa: RUF012 + "root": [ + # "usage:" at start of line + (r"^(usage:)(\s+)", bygroups(Generic.Heading, Whitespace)), # type: ignore[no-untyped-call] + # Continuation lines (leading whitespace for wrapped usage) + (r"^(\s+)(?=\S)", Whitespace), + include("inline"), + ], + "inline": [ + # Whitespace + (r"\s+", Whitespace), + # Long options with = value (e.g., --log-level=VALUE) + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with space-separated value (e.g., -S socket-path) + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # UPPERCASE meta-variables (COMMAND, FILE, PATH) + (r"\b[A-Z][A-Z0-9_]+\b", Name.Constant), + # Opening bracket - enter optional state + (r"\[", Punctuation, "optional"), + # Closing bracket (fallback for unmatched) + (r"\]", Punctuation), + # Choice separator (pipe) + (r"\|", Operator), + # Parentheses for grouping + (r"[()]", Punctuation), + # Positional/command names (lowercase with dashes) + (r"\b[a-z][-a-z0-9]*\b", Name.Label), + # Catch-all for any other text + (r"[^\s\[\]|()]+", Text), + ], + "optional": [ + # Nested optional bracket + (r"\[", Punctuation, "#push"), + # End optional + (r"\]", Punctuation, "#pop"), + # Contents use inline rules + include("inline"), + ], + } + + +def tokenize_usage(text: str) -> list[tuple[str, str]]: + """Tokenize usage text and return list of (token_type, value) tuples. + + Parameters + ---------- + text : str + CLI usage text to tokenize. + + Returns + ------- + list[tuple[str, str]] + List of (token_type_name, text_value) tuples. + + Examples + -------- + >>> result = tokenize_usage("usage: cmd [-h]") + >>> result[0] + ('Token.Generic.Heading', 'usage:') + >>> result[2] + ('Token.Name.Label', 'cmd') + >>> result[4] + ('Token.Punctuation', '[') + >>> result[5] + ('Token.Name.Attribute', '-h') + >>> result[6] + ('Token.Punctuation', ']') + """ + lexer = CLIUsageLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] diff --git a/docs/_ext/pretty_argparse.py b/docs/_ext/pretty_argparse.py new file mode 100644 index 000000000..b28fc4a83 --- /dev/null +++ b/docs/_ext/pretty_argparse.py @@ -0,0 +1,789 @@ +"""Enhanced sphinx_argparse_neo output formatting. + +This extension wraps sphinx_argparse_neo's directive to: +1. Remove ANSI escape codes that may be present when FORCE_COLOR is set +2. Convert "examples:" definition lists into proper documentation sections +3. Nest category-specific examples under a parent Examples section +4. Apply cli-usage syntax highlighting to usage blocks +5. Reorder sections so usage appears before examples +""" + +from __future__ import annotations + +import re +import typing as t + +from docutils import nodes +from sphinx_argparse_neo.directive import ArgparseDirective + +if t.TYPE_CHECKING: + from sphinx.application import Sphinx + +_ANSI_RE = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") + +# Match asterisks that would trigger RST emphasis (preceded by delimiter like +# - or space) but NOT asterisks already escaped or in code/literal contexts +_RST_EMPHASIS_RE = re.compile(r"(?<=[^\s\\])-\*(?=[^\s*]|$)") + + +def escape_rst_emphasis(text: str) -> str: + r"""Escape asterisks that would trigger RST inline emphasis. + + In reStructuredText, ``*text*`` creates emphasis. When argparse help text + contains patterns like ``django-*``, the dash (a delimiter character) followed + by asterisk triggers emphasis detection, causing warnings like: + "Inline emphasis start-string without end-string." + + This function escapes such asterisks with a backslash so they render literally. + + Parameters + ---------- + text : str + Text potentially containing problematic asterisks. + + Returns + ------- + str + Text with asterisks escaped where needed. + + Examples + -------- + >>> escape_rst_emphasis('vcspull list "django-*"') + 'vcspull list "django-\\*"' + >>> escape_rst_emphasis("plain text") + 'plain text' + >>> escape_rst_emphasis("already \\* escaped") + 'already \\* escaped' + >>> escape_rst_emphasis("*emphasis* is ok") + '*emphasis* is ok' + """ + return _RST_EMPHASIS_RE.sub(r"-\*", text) + + +def strip_ansi(text: str) -> str: + r"""Remove ANSI escape codes from text. + + Parameters + ---------- + text : str + Text potentially containing ANSI codes. + + Returns + ------- + str + Text with ANSI codes removed. + + Examples + -------- + >>> strip_ansi("plain text") + 'plain text' + >>> strip_ansi("\033[32mgreen\033[0m") + 'green' + >>> strip_ansi("\033[1;34mbold blue\033[0m") + 'bold blue' + """ + return _ANSI_RE.sub("", text) + + +def is_examples_term(term_text: str) -> bool: + """Check if a definition term is an examples header. + + Parameters + ---------- + term_text : str + The text content of a definition term. + + Returns + ------- + bool + True if this is an examples header. + + Examples + -------- + >>> is_examples_term("examples:") + True + >>> is_examples_term("Machine-readable output examples:") + True + >>> is_examples_term("Usage:") + False + """ + return term_text.lower().rstrip(":").endswith("examples") + + +def is_base_examples_term(term_text: str) -> bool: + """Check if a definition term is a base "examples:" header (no prefix). + + Parameters + ---------- + term_text : str + The text content of a definition term. + + Returns + ------- + bool + True if this is just "examples:" with no category prefix. + + Examples + -------- + >>> is_base_examples_term("examples:") + True + >>> is_base_examples_term("Examples") + True + >>> is_base_examples_term("Field-scoped examples:") + False + """ + return term_text.lower().rstrip(":").strip() == "examples" + + +def make_section_id( + term_text: str, + counter: int = 0, + *, + is_subsection: bool = False, + page_prefix: str = "", +) -> str: + """Generate a section ID from an examples term. + + Parameters + ---------- + term_text : str + The examples term text (e.g., "Machine-readable output: examples:") + counter : int + Counter for uniqueness if multiple examples sections exist. + is_subsection : bool + If True, omit "-examples" suffix for cleaner nested IDs. + page_prefix : str + Optional prefix from the page name (e.g., "sync", "add") to ensure + uniqueness across different documentation pages. + + Returns + ------- + str + A normalized section ID. + + Examples + -------- + >>> make_section_id("examples:") + 'examples' + >>> make_section_id("examples:", page_prefix="sync") + 'sync-examples' + >>> make_section_id("Machine-readable output examples:") + 'machine-readable-output-examples' + >>> make_section_id("Field-scoped examples:", is_subsection=True) + 'field-scoped' + >>> make_section_id("examples:", counter=1) + 'examples-1' + """ + # Extract prefix before "examples" (e.g., "Machine-readable output") + lower_text = term_text.lower().rstrip(":") + if "examples" in lower_text: + prefix = lower_text.rsplit("examples", 1)[0].strip() + # Remove trailing colon from prefix (handles ": examples" pattern) + prefix = prefix.rstrip(":").strip() + if prefix: + normalized_prefix = prefix.replace(" ", "-") + # Subsections don't need "-examples" suffix + if is_subsection: + section_id = normalized_prefix + else: + section_id = f"{normalized_prefix}-examples" + else: + # Plain "examples" - add page prefix if provided for uniqueness + section_id = f"{page_prefix}-examples" if page_prefix else "examples" + else: + section_id = "examples" + + # Add counter suffix for uniqueness + if counter > 0: + section_id = f"{section_id}-{counter}" + + return section_id + + +def make_section_title(term_text: str, *, is_subsection: bool = False) -> str: + """Generate a section title from an examples term. + + Parameters + ---------- + term_text : str + The examples term text (e.g., "Machine-readable output: examples:") + is_subsection : bool + If True, omit "Examples" suffix for cleaner nested titles. + + Returns + ------- + str + A proper title (e.g., "Machine-readable Output Examples" or just + "Machine-Readable Output" if is_subsection=True). + + Examples + -------- + >>> make_section_title("examples:") + 'Examples' + >>> make_section_title("Machine-readable output examples:") + 'Machine-Readable Output Examples' + >>> make_section_title("Field-scoped examples:", is_subsection=True) + 'Field-Scoped' + """ + # Remove trailing colon and normalize + text = term_text.rstrip(":").strip() + # Handle base "examples:" case + if text.lower() == "examples": + return "Examples" + + # Extract the prefix (category name) before "examples" + lower = text.lower() + if lower.endswith(": examples"): + prefix = text[: -len(": examples")] + elif lower.endswith(" examples"): + prefix = text[: -len(" examples")] + else: + prefix = text + + # Title case the prefix + titled_prefix = prefix.title() + + # For subsections, just use the prefix (cleaner nested titles) + if is_subsection: + return titled_prefix + + # For top-level sections, append "Examples" + return f"{titled_prefix} Examples" + + +def _create_example_section( + term_text: str, + def_node: nodes.definition, + *, + is_subsection: bool = False, + page_prefix: str = "", +) -> nodes.section: + """Create a section node for an examples item. + + Parameters + ---------- + term_text : str + The examples term text. + def_node : nodes.definition + The definition node containing example commands. + is_subsection : bool + If True, create a subsection with simpler title/id. + page_prefix : str + Optional prefix from the page name for unique section IDs. + + Returns + ------- + nodes.section + A section node with title and code blocks. + """ + section_id = make_section_id( + term_text, is_subsection=is_subsection, page_prefix=page_prefix + ) + section_title = make_section_title(term_text, is_subsection=is_subsection) + + section = nodes.section() + section["ids"] = [section_id] + section["names"] = [nodes.fully_normalize_name(section_title)] + + title = nodes.title(text=section_title) + section += title + + # Extract commands from definition and create separate code blocks + def_text = strip_ansi(def_node.astext()) + for line in def_text.split("\n"): + line = line.strip() + if line: + code_block = nodes.literal_block( + text=f"$ {line}", + classes=["highlight-console"], + ) + code_block["language"] = "console" + section += code_block + + return section + + +def transform_definition_list( + dl_node: nodes.definition_list, *, page_prefix: str = "" +) -> list[nodes.Node]: + """Transform a definition list, converting examples items to code blocks. + + If there's a base "examples:" item followed by category-specific examples + (e.g., "Field-scoped: examples:"), the categories are nested under the + parent Examples section for cleaner ToC structure. + + Parameters + ---------- + dl_node : nodes.definition_list + A definition list node. + page_prefix : str + Optional prefix from the page name for unique section IDs. + + Returns + ------- + list[nodes.Node] + Transformed nodes - code blocks for examples, original for others. + """ + # First pass: collect examples and non-examples items separately + example_items: list[tuple[str, nodes.definition]] = [] # (term_text, def_node) + non_example_items: list[nodes.Node] = [] + base_examples_index: int | None = None + + for item in dl_node.children: + if not isinstance(item, nodes.definition_list_item): + continue + + # Get the term and definition + term_node = None + def_node = None + for child in item.children: + if isinstance(child, nodes.term): + term_node = child + elif isinstance(child, nodes.definition): + def_node = child + + if term_node is None or def_node is None: + non_example_items.append(item) + continue + + term_text = strip_ansi(term_node.astext()) + + if is_examples_term(term_text): + if is_base_examples_term(term_text): + base_examples_index = len(example_items) + example_items.append((term_text, def_node)) + else: + non_example_items.append(item) + + # Build result nodes + result_nodes: list[nodes.Node] = [] + + # Flush non-example items first (if any appeared before examples) + if non_example_items: + new_dl = nodes.definition_list() + new_dl.extend(non_example_items) + result_nodes.append(new_dl) + + # Determine nesting strategy + # Nest if: there's a base "examples:" AND at least one other example category + should_nest = base_examples_index is not None and len(example_items) > 1 + + if should_nest and base_examples_index is not None: + # Create parent "Examples" section + base_term, base_def = example_items[base_examples_index] + parent_section = _create_example_section( + base_term, base_def, is_subsection=False, page_prefix=page_prefix + ) + + # Add other examples as nested subsections + for i, (term_text, def_node) in enumerate(example_items): + if i == base_examples_index: + continue # Skip the base (already used as parent) + subsection = _create_example_section( + term_text, def_node, is_subsection=True, page_prefix=page_prefix + ) + parent_section += subsection + + result_nodes.append(parent_section) + else: + # No nesting - create flat sections (backwards compatible) + for term_text, def_node in example_items: + section = _create_example_section( + term_text, def_node, is_subsection=False, page_prefix=page_prefix + ) + result_nodes.append(section) + + return result_nodes + + +def process_node( + node: nodes.Node, *, page_prefix: str = "" +) -> nodes.Node | list[nodes.Node]: + """Process a node: strip ANSI codes and transform examples. + + Parameters + ---------- + node : nodes.Node + A docutils node to process. + page_prefix : str + Optional prefix from the page name for unique section IDs. + + Returns + ------- + nodes.Node | list[nodes.Node] + The processed node(s). + """ + # Handle text nodes - strip ANSI + if isinstance(node, nodes.Text): + cleaned = strip_ansi(node.astext()) + if cleaned != node.astext(): + return nodes.Text(cleaned) + return node + + # Handle definition lists - transform examples + if isinstance(node, nodes.definition_list): + # Check if any items are examples + has_examples = False + for item in node.children: + if isinstance(item, nodes.definition_list_item): + for child in item.children: + if isinstance(child, nodes.term) and is_examples_term( + strip_ansi(child.astext()) + ): + has_examples = True + break + if has_examples: + break + + if has_examples: + return transform_definition_list(node, page_prefix=page_prefix) + + # Handle literal_block nodes - strip ANSI and apply usage highlighting + if isinstance(node, nodes.literal_block): + text = strip_ansi(node.astext()) + needs_update = text != node.astext() + + # Check if this is a usage block (starts with "usage:") + is_usage_block = text.lstrip().lower().startswith("usage:") + + if needs_update or is_usage_block: + new_block = nodes.literal_block(text=text) + # Preserve attributes + for attr in ("language", "classes"): + if attr in node: + new_block[attr] = node[attr] + # Apply cli-usage language to usage blocks + if is_usage_block: + new_block["language"] = "cli-usage" + return new_block + return node + + # Handle paragraph nodes - strip ANSI and lift sections out + if isinstance(node, nodes.paragraph): + # Process children and check if any become sections + processed_children: list[nodes.Node] = [] + changed = False + has_sections = False + + for child in node.children: + if isinstance(child, nodes.Text): + cleaned = strip_ansi(child.astext()) + if cleaned != child.astext(): + processed_children.append(nodes.Text(cleaned)) + changed = True + else: + processed_children.append(child) + else: + result = process_node(child, page_prefix=page_prefix) + if isinstance(result, list): + processed_children.extend(result) + changed = True + # Check if any results are sections + if any(isinstance(r, nodes.section) for r in result): + has_sections = True + elif result is not child: + processed_children.append(result) + changed = True + if isinstance(result, nodes.section): + has_sections = True + else: + processed_children.append(child) + + if not changed: + return node + + # If no sections, return a normal paragraph + if not has_sections: + new_para = nodes.paragraph() + new_para.extend(processed_children) + return new_para + + # Sections found - lift them out of the paragraph + # Return a list: [para_before, section1, section2, ..., para_after] + result_nodes: list[nodes.Node] = [] + current_para_children: list[nodes.Node] = [] + + for child in processed_children: + if isinstance(child, nodes.section): + # Flush current paragraph content + if current_para_children: + para = nodes.paragraph() + para.extend(current_para_children) + result_nodes.append(para) + current_para_children = [] + # Add section as a sibling + result_nodes.append(child) + else: + current_para_children.append(child) + + # Flush remaining paragraph content + if current_para_children: + para = nodes.paragraph() + para.extend(current_para_children) + result_nodes.append(para) + + return result_nodes + + # Recursively process children for other node types + if hasattr(node, "children"): + new_children: list[nodes.Node] = [] + children_changed = False + for child in node.children: + result = process_node(child, page_prefix=page_prefix) + if isinstance(result, list): + new_children.extend(result) + children_changed = True + elif result is not child: + new_children.append(result) + children_changed = True + else: + new_children.append(child) + if children_changed: + node.children = new_children + + return node + + +def _is_usage_block(node: nodes.Node) -> bool: + """Check if a node is a usage literal block. + + Parameters + ---------- + node : nodes.Node + A docutils node to check. + + Returns + ------- + bool + True if this is a usage block (literal_block starting with "usage:"). + + Examples + -------- + >>> from docutils import nodes + >>> _is_usage_block(nodes.literal_block(text="usage: cmd [-h]")) + True + >>> _is_usage_block(nodes.literal_block(text="Usage: vcspull sync")) + True + >>> _is_usage_block(nodes.literal_block(text=" usage: cmd")) + True + >>> _is_usage_block(nodes.literal_block(text="some other text")) + False + >>> _is_usage_block(nodes.paragraph(text="usage: cmd")) + False + >>> _is_usage_block(nodes.section()) + False + """ + if not isinstance(node, nodes.literal_block): + return False + text = node.astext() + return text.lstrip().lower().startswith("usage:") + + +def _is_examples_section(node: nodes.Node) -> bool: + """Check if a node is an examples section. + + Parameters + ---------- + node : nodes.Node + A docutils node to check. + + Returns + ------- + bool + True if this is an examples section (section with "examples" in its ID). + + Examples + -------- + >>> from docutils import nodes + >>> section = nodes.section() + >>> section["ids"] = ["examples"] + >>> _is_examples_section(section) + True + >>> section2 = nodes.section() + >>> section2["ids"] = ["machine-readable-output-examples"] + >>> _is_examples_section(section2) + True + >>> section3 = nodes.section() + >>> section3["ids"] = ["positional-arguments"] + >>> _is_examples_section(section3) + False + >>> _is_examples_section(nodes.paragraph()) + False + >>> _is_examples_section(nodes.literal_block(text="examples")) + False + """ + if not isinstance(node, nodes.section): + return False + ids: list[str] = node.get("ids", []) + return any("examples" in id_str.lower() for id_str in ids) + + +def _reorder_nodes(processed: list[nodes.Node]) -> list[nodes.Node]: + """Reorder nodes so usage blocks appear before examples sections. + + This ensures the CLI usage synopsis appears above examples in the + documentation, making it easier to understand command syntax before + seeing example invocations. + + Parameters + ---------- + processed : list[nodes.Node] + List of processed docutils nodes. + + Returns + ------- + list[nodes.Node] + Reordered nodes with usage before examples. + + Examples + -------- + >>> from docutils import nodes + + Create test nodes: + + >>> desc = nodes.paragraph(text="Description") + >>> examples = nodes.section() + >>> examples["ids"] = ["examples"] + >>> usage = nodes.literal_block(text="usage: cmd [-h]") + >>> args = nodes.section() + >>> args["ids"] = ["arguments"] + + When usage appears after examples, it gets moved before: + + >>> result = _reorder_nodes([desc, examples, usage, args]) + >>> [type(n).__name__ for n in result] + ['paragraph', 'literal_block', 'section', 'section'] + + When no examples exist, order is unchanged: + + >>> result = _reorder_nodes([desc, usage, args]) + >>> [type(n).__name__ for n in result] + ['paragraph', 'literal_block', 'section'] + + When usage already before examples, order is preserved: + + >>> result = _reorder_nodes([desc, usage, examples, args]) + >>> [type(n).__name__ for n in result] + ['paragraph', 'literal_block', 'section', 'section'] + + Empty list returns empty: + + >>> _reorder_nodes([]) + [] + """ + # First pass: check if there are any examples sections + has_examples = any(_is_examples_section(node) for node in processed) + if not has_examples: + # No examples, preserve original order + return processed + + usage_blocks: list[nodes.Node] = [] + examples_sections: list[nodes.Node] = [] + other_before_examples: list[nodes.Node] = [] + other_after_examples: list[nodes.Node] = [] + + seen_examples = False + for node in processed: + if _is_usage_block(node): + usage_blocks.append(node) + elif _is_examples_section(node): + examples_sections.append(node) + seen_examples = True + elif not seen_examples: + other_before_examples.append(node) + else: + other_after_examples.append(node) + + # Order: before_examples → usage → examples → after_examples + return ( + other_before_examples + usage_blocks + examples_sections + other_after_examples + ) + + +class CleanArgParseDirective(ArgparseDirective): + """ArgParse directive that strips ANSI codes and formats examples.""" + + def run(self) -> list[nodes.Node]: + """Run the directive, clean output, format examples, and reorder.""" + result = super().run() + + # Extract page name for unique section IDs across different CLI pages + page_prefix = "" + if hasattr(self.state, "document"): + settings = self.state.document.settings + if hasattr(settings, "env") and hasattr(settings.env, "docname"): + # docname is like "cli/sync" - extract "sync" + docname = settings.env.docname + page_prefix = docname.split("/")[-1] + + processed: list[nodes.Node] = [] + for node in result: + processed_node = process_node(node, page_prefix=page_prefix) + if isinstance(processed_node, list): + processed.extend(processed_node) + else: + processed.append(processed_node) + + # Reorder: usage blocks before examples sections + return _reorder_nodes(processed) + + +def setup(app: Sphinx) -> dict[str, t.Any]: + """Register the clean argparse directive, lexers, and CLI roles. + + Parameters + ---------- + app : Sphinx + The Sphinx application object. + + Returns + ------- + dict + Extension metadata. + """ + # Load the base sphinx_argparse_neo extension first + + app.setup_extension("sphinx_argparse_neo") + + # Override the argparse directive with our enhanced version + app.add_directive("argparse", CleanArgParseDirective, override=True) + + # Register CLI usage lexer for usage block highlighting + from cli_usage_lexer import CLIUsageLexer + + app.add_lexer("cli-usage", CLIUsageLexer) + + # Register argparse lexers for help output highlighting + from argparse_lexer import ( + ArgparseHelpLexer, + ArgparseLexer, + ArgparseUsageLexer, + ) + + app.add_lexer("argparse", ArgparseLexer) + app.add_lexer("argparse-usage", ArgparseUsageLexer) + app.add_lexer("argparse-help", ArgparseHelpLexer) + + # Register CLI inline roles for documentation + from argparse_roles import register_roles + + register_roles() + + # Register vcspull output lexer for command output highlighting + from vcspull_output_lexer import ( # type: ignore[import-not-found] + VcspullOutputLexer, + ) + + app.add_lexer("vcspull-output", VcspullOutputLexer) + + # Register vcspull console lexer for session highlighting + from vcspull_console_lexer import ( # type: ignore[import-not-found] + VcspullConsoleLexer, + ) + + app.add_lexer("vcspull-console", VcspullConsoleLexer) + + # Add CSS file for argparse highlighting styles + app.add_css_file("css/argparse-highlight.css") + + return {"version": "3.0", "parallel_read_safe": True} diff --git a/docs/_ext/sphinx_argparse_neo/__init__.py b/docs/_ext/sphinx_argparse_neo/__init__.py new file mode 100644 index 000000000..28376d433 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/__init__.py @@ -0,0 +1,104 @@ +"""sphinx_argparse_neo - Modern sphinx-argparse replacement. + +A Sphinx extension for documenting argparse-based CLI tools that: +- Works with Sphinx 8.x AND 9.x (no autodoc.mock dependency) +- Fixes long-standing sphinx-argparse issues (TOC pollution, heading levels) +- Provides configurable output (rubrics vs sections, flattened subcommands) +- Supports extensibility via renderer classes +- Optional MyST markdown support in help text +""" + +from __future__ import annotations + +import typing as t + +from sphinx_argparse_neo.directive import ArgparseDirective +from sphinx_argparse_neo.nodes import ( + argparse_argument, + argparse_group, + argparse_program, + argparse_subcommand, + argparse_subcommands, + argparse_usage, + depart_argparse_argument_html, + depart_argparse_group_html, + depart_argparse_program_html, + depart_argparse_subcommand_html, + depart_argparse_subcommands_html, + depart_argparse_usage_html, + visit_argparse_argument_html, + visit_argparse_group_html, + visit_argparse_program_html, + visit_argparse_subcommand_html, + visit_argparse_subcommands_html, + visit_argparse_usage_html, +) + +if t.TYPE_CHECKING: + from sphinx.application import Sphinx + +__version__ = "1.0.0" + + +def setup(app: Sphinx) -> dict[str, t.Any]: + """Register the argparse directive and configuration options. + + Parameters + ---------- + app : Sphinx + The Sphinx application object. + + Returns + ------- + dict[str, t.Any] + Extension metadata. + """ + # Configuration options + app.add_config_value("argparse_heading_level", 2, "html") + app.add_config_value("argparse_use_rubric", False, "html") + app.add_config_value("argparse_group_title_prefix", "", "html") + app.add_config_value("argparse_include_in_toc", True, "html") + app.add_config_value("argparse_toc_depth", 2, "html") + app.add_config_value("argparse_flatten_subcommands", False, "html") + app.add_config_value("argparse_subcommand_style", "nested", "html") + app.add_config_value("argparse_show_defaults", True, "html") + app.add_config_value("argparse_show_choices", True, "html") + app.add_config_value("argparse_show_types", True, "html") + app.add_config_value("argparse_hide_suppressed", True, "html") + app.add_config_value("argparse_help_format", "rst", "html") + app.add_config_value("argparse_usage_style", "literal", "html") + + # Register custom nodes + app.add_node( + argparse_program, + html=(visit_argparse_program_html, depart_argparse_program_html), + ) + app.add_node( + argparse_usage, + html=(visit_argparse_usage_html, depart_argparse_usage_html), + ) + app.add_node( + argparse_group, + html=(visit_argparse_group_html, depart_argparse_group_html), + ) + app.add_node( + argparse_argument, + html=(visit_argparse_argument_html, depart_argparse_argument_html), + ) + app.add_node( + argparse_subcommands, + html=(visit_argparse_subcommands_html, depart_argparse_subcommands_html), + ) + app.add_node( + argparse_subcommand, + html=(visit_argparse_subcommand_html, depart_argparse_subcommand_html), + ) + + # Register directive + app.add_directive("argparse", ArgparseDirective) + + return { + "version": __version__, + "parallel_read_safe": True, + "parallel_write_safe": True, + } diff --git a/docs/_ext/sphinx_argparse_neo/compat.py b/docs/_ext/sphinx_argparse_neo/compat.py new file mode 100644 index 000000000..3e2a1958c --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/compat.py @@ -0,0 +1,247 @@ +"""Compatibility utilities for module loading. + +This module provides utilities for loading Python modules safely, +including mock handling for imports that may fail during documentation +builds. + +Unlike sphinx-argparse, this module does NOT depend on autodoc's mock +functionality, which moved in Sphinx 9.x. +""" + +from __future__ import annotations + +import contextlib +import importlib +import sys +import typing as t + +if t.TYPE_CHECKING: + import argparse + from collections.abc import Iterator + + +class MockModule: + """Simple mock for unavailable imports. + + This class provides a minimal mock that can be used as a placeholder + for modules that aren't available during documentation builds. + + Parameters + ---------- + name : str + The module name being mocked. + + Examples + -------- + >>> mock = MockModule("mypackage.submodule") + >>> mock.__name__ + 'mypackage.submodule' + >>> child = mock.child_attr + >>> child.__name__ + 'mypackage.submodule.child_attr' + >>> callable(mock.some_function) + True + >>> mock.some_function() + + """ + + def __init__(self, name: str) -> None: + """Initialize the mock module.""" + self.__name__ = name + self._name = name + + def __repr__(self) -> str: + """Return string representation.""" + return f"" + + def __getattr__(self, name: str) -> MockModule: + """Return a child mock for any attribute access. + + Parameters + ---------- + name : str + The attribute name. + + Returns + ------- + MockModule + A new mock for the child attribute. + """ + return MockModule(f"{self._name}.{name}") + + def __call__(self, *args: t.Any, **kwargs: t.Any) -> MockModule: + """Return self when called as a function. + + Parameters + ---------- + *args : t.Any + Positional arguments (ignored). + **kwargs : t.Any + Keyword arguments (ignored). + + Returns + ------- + MockModule + Self. + """ + return self + + +@contextlib.contextmanager +def mock_imports(modules: list[str]) -> Iterator[None]: + """Context manager to mock missing imports. + + This provides a simple way to temporarily add mock modules to + sys.modules, allowing imports to succeed during documentation builds + even when the actual modules aren't available. + + Parameters + ---------- + modules : list[str] + List of module names to mock. + + Yields + ------ + None + Context manager yields nothing. + + Examples + -------- + >>> import sys + >>> "fake_module" in sys.modules + False + >>> with mock_imports(["fake_module", "fake_module.sub"]): + ... import fake_module + ... fake_module.__name__ + 'fake_module' + >>> "fake_module" in sys.modules + False + """ + mocked: dict[str, MockModule] = {} + + for name in modules: + if name not in sys.modules: + mocked[name] = MockModule(name) + sys.modules[name] = mocked[name] # type: ignore[assignment] + + try: + yield + finally: + for name in mocked: + del sys.modules[name] + + +def import_module(module_name: str) -> t.Any: + """Import a module by name. + + Parameters + ---------- + module_name : str + The fully qualified module name. + + Returns + ------- + t.Any + The imported module. + + Raises + ------ + ImportError + If the module cannot be imported. + + Examples + -------- + >>> mod = import_module("argparse") + >>> hasattr(mod, "ArgumentParser") + True + """ + return importlib.import_module(module_name) + + +def get_parser_from_module( + module_name: str, + func_name: str, + mock_modules: list[str] | None = None, +) -> argparse.ArgumentParser: + """Import a module and call a function to get an ArgumentParser. + + Parameters + ---------- + module_name : str + The module containing the parser factory function. + func_name : str + The name of the function that returns an ArgumentParser. + Can be a dotted path like "Class.method". + mock_modules : list[str] | None + Optional list of module names to mock during import. + + Returns + ------- + argparse.ArgumentParser + The argument parser returned by the function. + + Raises + ------ + ImportError + If the module cannot be imported. + AttributeError + If the function is not found. + TypeError + If the function doesn't return an ArgumentParser. + + Examples + -------- + >>> import argparse + >>> def make_parser(): + ... return argparse.ArgumentParser(prog="test") + >>> # In practice, this would import from a module file + """ + ctx = mock_imports(mock_modules) if mock_modules else contextlib.nullcontext() + + with ctx: + module = import_module(module_name) + + # Handle dotted paths like "Class.method" + obj = module + for part in func_name.split("."): + obj = getattr(obj, part) + + # Call the function if it's callable + parser = obj() if callable(obj) else obj + + return t.cast("argparse.ArgumentParser", parser) + + +def get_parser_from_entry_point( + entry_point: str, + mock_modules: list[str] | None = None, +) -> argparse.ArgumentParser: + """Get an ArgumentParser from a setuptools-style entry point string. + + Parameters + ---------- + entry_point : str + Entry point in the format "module:function" or "module:Class.method". + mock_modules : list[str] | None + Optional list of module names to mock during import. + + Returns + ------- + argparse.ArgumentParser + The argument parser. + + Raises + ------ + ValueError + If the entry point format is invalid. + + Examples + -------- + >>> # get_parser_from_entry_point("myapp.cli:create_parser") + """ + if ":" not in entry_point: + msg = f"Invalid entry point format: {entry_point!r}. Expected 'module:function'" + raise ValueError(msg) + + module_name, func_name = entry_point.split(":", 1) + return get_parser_from_module(module_name, func_name, mock_modules) diff --git a/docs/_ext/sphinx_argparse_neo/directive.py b/docs/_ext/sphinx_argparse_neo/directive.py new file mode 100644 index 000000000..246f0b4b5 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/directive.py @@ -0,0 +1,257 @@ +"""Sphinx directive for argparse documentation. + +This module provides the ArgparseDirective class that integrates +with Sphinx to generate documentation from ArgumentParser instances. +""" + +from __future__ import annotations + +import typing as t + +from docutils import nodes +from docutils.parsers.rst import directives +from sphinx.util.docutils import SphinxDirective +from sphinx_argparse_neo.compat import get_parser_from_module +from sphinx_argparse_neo.parser import extract_parser +from sphinx_argparse_neo.renderer import ArgparseRenderer, RenderConfig + +if t.TYPE_CHECKING: + import argparse + + +class ArgparseDirective(SphinxDirective): + """Sphinx directive for documenting argparse-based CLI tools. + + Usage + ----- + .. argparse:: + :module: myapp.cli + :func: create_parser + :prog: myapp + + Options + ------- + :module: + The Python module containing the parser factory function. + :func: + The function name that returns an ArgumentParser. + Can be a dotted path like "Class.method". + :prog: + Override the program name (optional). + :path: + Navigate to a specific subparser by path (e.g., "sync pull"). + :heading-level: + Base heading level for sections (default: 2). + :use-rubric: + Use rubric instead of section titles (flag). + :flatten-subcommands: + Don't nest subcommands under "Sub-commands" heading (flag). + :no-defaults: + Don't show default values (flag). + :no-description: + Don't show parser description (flag). + :no-epilog: + Don't show parser epilog (flag). + :mock-modules: + Comma-separated list of modules to mock during import. + + Examples + -------- + In RST documentation:: + + .. argparse:: + :module: vcspull.cli + :func: create_parser + :prog: vcspull + + :path: sync + :use-rubric: + """ + + has_content = True + required_arguments = 0 + optional_arguments = 0 + + option_spec: t.ClassVar[dict[str, t.Any]] = { + "module": directives.unchanged_required, + "func": directives.unchanged_required, + "prog": directives.unchanged, + "path": directives.unchanged, + "heading-level": directives.nonnegative_int, + "use-rubric": directives.flag, + "flatten-subcommands": directives.flag, + "no-defaults": directives.flag, + "no-description": directives.flag, + "no-epilog": directives.flag, + "no-choices": directives.flag, + "no-types": directives.flag, + "mock-modules": directives.unchanged, + # sphinx-argparse compatibility options + "nosubcommands": directives.flag, + "nodefault": directives.flag, + "nodefaultconst": directives.flag, + "noepilog": directives.flag, + "nodescription": directives.flag, + } + + def run(self) -> list[nodes.Node]: + """Execute the directive and return docutils nodes. + + Returns + ------- + list[nodes.Node] + List of docutils nodes representing the CLI documentation. + """ + # Get required options + module_name = self.options.get("module") + func_name = self.options.get("func") + + if not module_name or not func_name: + error = self.state_machine.reporter.error( + "argparse directive requires :module: and :func: options", + line=self.lineno, + ) + return [error] + + # Parse mock modules + mock_modules: list[str] | None = None + if "mock-modules" in self.options: + mock_modules = [m.strip() for m in self.options["mock-modules"].split(",")] + + # Load the parser + try: + parser = get_parser_from_module(module_name, func_name, mock_modules) + except Exception as e: + error = self.state_machine.reporter.error( + f"Failed to load parser from {module_name}:{func_name}: {e}", + line=self.lineno, + ) + return [error] + + # Override prog if specified + if "prog" in self.options: + parser.prog = self.options["prog"] + + # Navigate to subparser if path specified + if "path" in self.options: + parser = self._navigate_to_subparser(parser, self.options["path"]) + if parser is None: + error = self.state_machine.reporter.error( + f"Subparser path not found: {self.options['path']}", + line=self.lineno, + ) + return [error] + + # Build render config from directive options and Sphinx config + config = self._build_render_config() + + # Extract parser info + parser_info = extract_parser(parser) + + # Apply directive-level overrides + # Handle both new-style and sphinx-argparse compatibility options + if "no-description" in self.options or "nodescription" in self.options: + parser_info = parser_info.__class__( + prog=parser_info.prog, + usage=parser_info.usage, + bare_usage=parser_info.bare_usage, + description=None, + epilog=parser_info.epilog, + argument_groups=parser_info.argument_groups, + subcommands=parser_info.subcommands, + subcommand_dest=parser_info.subcommand_dest, + ) + if "no-epilog" in self.options or "noepilog" in self.options: + parser_info = parser_info.__class__( + prog=parser_info.prog, + usage=parser_info.usage, + bare_usage=parser_info.bare_usage, + description=parser_info.description, + epilog=None, + argument_groups=parser_info.argument_groups, + subcommands=parser_info.subcommands, + subcommand_dest=parser_info.subcommand_dest, + ) + if "nosubcommands" in self.options: + parser_info = parser_info.__class__( + prog=parser_info.prog, + usage=parser_info.usage, + bare_usage=parser_info.bare_usage, + description=parser_info.description, + epilog=parser_info.epilog, + argument_groups=parser_info.argument_groups, + subcommands=None, + subcommand_dest=None, + ) + + # Render to nodes + renderer = ArgparseRenderer(config=config, state=self.state) + return renderer.render(parser_info) + + def _build_render_config(self) -> RenderConfig: + """Build RenderConfig from directive and Sphinx config options. + + Returns + ------- + RenderConfig + Configuration for the renderer. + """ + # Start with Sphinx config defaults + config = RenderConfig.from_sphinx_config(self.config) + + # Override with directive options + if "heading-level" in self.options: + config.heading_level = self.options["heading-level"] + if "use-rubric" in self.options: + config.use_rubric = True + if "flatten-subcommands" in self.options: + config.flatten_subcommands = True + # Handle both new-style and sphinx-argparse compatibility options + if "no-defaults" in self.options or "nodefault" in self.options: + config.show_defaults = False + if "no-choices" in self.options: + config.show_choices = False + if "no-types" in self.options: + config.show_types = False + + return config + + def _navigate_to_subparser( + self, parser: argparse.ArgumentParser, path: str + ) -> argparse.ArgumentParser | None: + """Navigate to a nested subparser by path. + + Parameters + ---------- + parser : argparse.ArgumentParser + The root parser. + path : str + Space-separated path to the subparser (e.g., "sync pull"). + + Returns + ------- + argparse.ArgumentParser | None + The subparser, or None if not found. + """ + import argparse as argparse_module + + current = parser + for name in path.split(): + # Find subparsers action + subparser_action = None + for action in current._actions: + if isinstance(action, argparse_module._SubParsersAction): + subparser_action = action + break + + if subparser_action is None: + return None + + # Find the named subparser + choices = subparser_action.choices or {} + if name not in choices: + return None + + current = choices[name] + + return current diff --git a/docs/_ext/sphinx_argparse_neo/myst.py b/docs/_ext/sphinx_argparse_neo/myst.py new file mode 100644 index 000000000..eac31405c --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/myst.py @@ -0,0 +1,306 @@ +"""MyST markdown support for argparse help text. + +This module provides utilities for parsing help text that may contain +MyST markdown syntax, converting it to docutils nodes. +""" + +from __future__ import annotations + +import re +import typing as t + +from docutils import nodes +from docutils.statemachine import StringList + +if t.TYPE_CHECKING: + from docutils.parsers.rst.states import RSTStateMachine + + +# Patterns that indicate MyST markdown +MYST_PATTERNS = [ + re.compile(r"^```"), # Fenced code blocks + re.compile(r"^\{[a-z]+\}"), # MyST directives {note}, {warning}, etc. + re.compile(r"\[.+\]\(.+\)"), # Markdown links [text](url) + re.compile(r"!\[.+\]\(.+\)"), # Markdown images ![alt](url) +] + +# Patterns that indicate RST +RST_PATTERNS = [ + re.compile(r"^\.\. "), # RST directives + re.compile(r":ref:`"), # RST roles + re.compile(r":doc:`"), # RST doc role + re.compile(r":class:`"), # RST class role + re.compile(r":func:`"), # RST func role + re.compile(r":meth:`"), # RST method role + re.compile(r":mod:`"), # RST module role +] + + +def detect_format(text: str) -> str: + r"""Detect whether text is RST or MyST format. + + Parameters + ---------- + text : str + The text to analyze. + + Returns + ------- + str + Either "rst" or "myst". + + Examples + -------- + >>> detect_format("Plain text without markup") + 'rst' + >>> detect_format("Check :ref:`docs` for more") + 'rst' + >>> detect_format("See [the docs](https://example.com)") + 'myst' + >>> detect_format("```python\\ncode\\n```") + 'myst' + >>> detect_format("{note}\\nThis is important") + 'myst' + """ + # Check for MyST patterns + for pattern in MYST_PATTERNS: + if pattern.search(text): + return "myst" + + # Check for RST patterns + for pattern in RST_PATTERNS: + if pattern.search(text): + return "rst" + + # Default to RST (simpler, and plain text is valid RST) + return "rst" + + +def parse_myst(text: str) -> list[nodes.Node]: + """Parse MyST markdown text to docutils nodes. + + This is a simplified parser that handles common MyST patterns. + For full MyST support, use the myst-parser extension. + + Parameters + ---------- + text : str + MyST markdown text. + + Returns + ------- + list[nodes.Node] + Parsed docutils nodes. + + Examples + -------- + >>> result = parse_myst("Simple text") + >>> len(result) + 1 + >>> isinstance(result[0], nodes.paragraph) + True + """ + result_nodes: list[nodes.Node] = [] + + # Split into paragraphs + paragraphs = text.split("\n\n") + + for para_text in paragraphs: + para_text = para_text.strip() + if not para_text: + continue + + # Check for fenced code blocks + if para_text.startswith("```"): + code_node = _parse_fenced_code(para_text) + if code_node: + result_nodes.append(code_node) + continue + + # Parse as paragraph with inline markup + para = nodes.paragraph() + _parse_inline_myst(para_text, para) + result_nodes.append(para) + + return result_nodes + + +def _parse_fenced_code(text: str) -> nodes.literal_block | None: + r"""Parse a fenced code block. + + Parameters + ---------- + text : str + Text starting with ```. + + Returns + ------- + nodes.literal_block | None + Code block node, or None if parsing fails. + + Examples + -------- + >>> node = _parse_fenced_code("```python\\nprint('hi')\\n```") + >>> node["language"] + 'python' + >>> "print" in node.astext() + True + """ + lines = text.split("\n") + if len(lines) < 2: + return None + + # First line: ```language + first_line = lines[0].strip() + if not first_line.startswith("```"): + return None + + language = first_line[3:].strip() or "text" + + # Find closing ``` + code_lines: list[str] = [] + for line in lines[1:]: + if line.strip() == "```": + break + code_lines.append(line) + + code_text = "\n".join(code_lines) + node = nodes.literal_block(code_text, code_text) + node["language"] = language + return node + + +def _parse_inline_myst(text: str, parent: nodes.Element) -> None: + """Parse inline MyST markup and add to parent node. + + Parameters + ---------- + text : str + Text with potential inline markup. + parent : nodes.Element + Parent node to add children to. + """ + # Pattern for markdown links: [text](url) + link_pattern = re.compile(r"\[([^\]]+)\]\(([^)]+)\)") + + # Pattern for inline code: `code` + code_pattern = re.compile(r"`([^`]+)`") + + # Pattern for bold: **text** + bold_pattern = re.compile(r"\*\*([^*]+)\*\*") + + # Pattern for italic: *text* or _text_ + italic_pattern = re.compile(r"(? 0: + parent.append(nodes.Text(remaining[:start])) + + # Add matched node + if node is not None: + parent.append(node) + + # Continue with remaining text + remaining = remaining[end:] + + +def parse_help_text( + text: str, + help_format: str, + state: RSTStateMachine | None = None, +) -> list[nodes.Node]: + """Parse help text to docutils nodes. + + Parameters + ---------- + text : str + The help text to parse. + help_format : str + The format: "rst", "myst", or "auto". + state : RSTStateMachine | None + RST state machine for parsing RST content. + + Returns + ------- + list[nodes.Node] + Parsed docutils nodes. + + Examples + -------- + >>> nodes_list = parse_help_text("Simple help text", "auto") + >>> len(nodes_list) + 1 + >>> parse_help_text("See [docs](url)", "auto")[0].__class__.__name__ + 'paragraph' + """ + if not text: + return [] + + # Determine format + if help_format == "auto": + help_format = detect_format(text) + + if help_format == "myst": + return parse_myst(text) + + # RST format + if state is not None: + # Use the state machine to parse RST + container = nodes.container() + state.nested_parse( + StringList(text.split("\n")), + 0, + container, + ) + return list(container.children) + + # No state machine, return simple paragraph + para = nodes.paragraph(text=text) + return [para] diff --git a/docs/_ext/sphinx_argparse_neo/nodes.py b/docs/_ext/sphinx_argparse_neo/nodes.py new file mode 100644 index 000000000..97f71a260 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/nodes.py @@ -0,0 +1,562 @@ +"""Custom docutils node types for argparse documentation. + +This module defines custom node types that represent the structure of +CLI documentation, along with HTML visitor functions for rendering. +""" + +from __future__ import annotations + +import typing as t + +from docutils import nodes + +if t.TYPE_CHECKING: + from sphinx.writers.html5 import HTML5Translator + +# Import the lexer - use absolute import from parent package +import sys +from pathlib import Path + +# Add parent directory to path for lexer import +_ext_dir = Path(__file__).parent.parent +if str(_ext_dir) not in sys.path: + sys.path.insert(0, str(_ext_dir)) + +from argparse_lexer import ArgparseUsageLexer # noqa: E402 + + +def _token_to_css_class(token_type: t.Any) -> str: + """Map a Pygments token type to its CSS class abbreviation. + + Pygments uses hierarchical token names like Token.Name.Attribute. + These map to CSS classes using abbreviations of the last two parts: + - Token.Name.Attribute → 'na' (Name.Attribute) + - Token.Generic.Heading → 'gh' (Generic.Heading) + - Token.Punctuation → 'p' (just Punctuation) + + Parameters + ---------- + token_type : Any + A Pygments token type (from pygments.token). + + Returns + ------- + str + CSS class abbreviation, or empty string if not mappable. + + Examples + -------- + >>> from pygments.token import Token + >>> _token_to_css_class(Token.Name.Attribute) + 'na' + >>> _token_to_css_class(Token.Generic.Heading) + 'gh' + >>> _token_to_css_class(Token.Punctuation) + 'p' + >>> _token_to_css_class(Token.Text.Whitespace) + 'tw' + """ + type_str = str(token_type) + # Token string looks like "Token.Name.Attribute" or "Token.Punctuation" + parts = type_str.split(".") + + if len(parts) >= 3: + # Token.Name.Attribute -> "na" (first char of each of last two parts) + return parts[-2][0].lower() + parts[-1][0].lower() + elif len(parts) == 2: + # Token.Punctuation -> "p" (first char of last part) + return parts[-1][0].lower() + return "" + + +def _highlight_usage(usage_text: str, encode: t.Callable[[str], str]) -> str: + """Tokenize usage text and wrap tokens in highlighted span elements. + + Uses ArgparseUsageLexer to tokenize the usage string, then wraps each + token in a with the appropriate CSS class for styling. + + Parameters + ---------- + usage_text : str + The usage string to highlight (should include "usage: " prefix). + encode : Callable[[str], str] + HTML encoding function (typically translator.encode). + + Returns + ------- + str + HTML string with tokens wrapped in styled elements. + + Examples + -------- + >>> def mock_encode(s: str) -> str: + ... return s.replace("&", "&").replace("<", "<") + >>> html = _highlight_usage("usage: cmd [-h]", mock_encode) + >>> 'usage:' in html + True + >>> 'cmd' in html + True + >>> '-h' in html + True + """ + lexer = ArgparseUsageLexer() + parts: list[str] = [] + + for tok_type, tok_value in lexer.get_tokens(usage_text): + if not tok_value: + continue + + css_class = _token_to_css_class(tok_type) + escaped = encode(tok_value) + type_str = str(tok_type).lower() + + # Skip wrapping for whitespace and plain text tokens + if css_class and "whitespace" not in type_str and "text" not in type_str: + parts.append(f'{escaped}') + else: + parts.append(escaped) + + return "".join(parts) + + +def _highlight_argument_names( + names: list[str], metavar: str | None, encode: t.Callable[[str], str] +) -> str: + """Highlight argument names and metavar with appropriate CSS classes. + + Short options (-h) get class 'na' (Name.Attribute). + Long options (--help) get class 'nt' (Name.Tag). + Positional arguments get class 'nl' (Name.Label). + Metavars get class 'nv' (Name.Variable). + + Parameters + ---------- + names : list[str] + List of argument names (e.g., ["-v", "--verbose"]). + metavar : str | None + Optional metavar (e.g., "FILE", "PATH"). + encode : Callable[[str], str] + HTML encoding function. + + Returns + ------- + str + HTML string with highlighted argument signature. + + Examples + -------- + >>> def mock_encode(s: str) -> str: + ... return s + >>> html = _highlight_argument_names(["-h", "--help"], None, mock_encode) + >>> '-h' in html + True + >>> '--help' in html + True + >>> html = _highlight_argument_names(["--output"], "FILE", mock_encode) + >>> 'FILE' in html + True + >>> html = _highlight_argument_names(["sync"], None, mock_encode) + >>> 'sync' in html + True + """ + sig_parts: list[str] = [] + + for name in names: + escaped = encode(name) + if name.startswith("--"): + sig_parts.append(f'{escaped}') + elif name.startswith("-"): + sig_parts.append(f'{escaped}') + else: + # Positional argument or subcommand + sig_parts.append(f'{escaped}') + + result = ", ".join(sig_parts) + + if metavar: + escaped_metavar = encode(metavar) + result = f'{result} {escaped_metavar}' + + return result + + +class argparse_program(nodes.General, nodes.Element): + """Root node for an argparse program documentation block. + + Attributes + ---------- + prog : str + The program name. + + Examples + -------- + >>> node = argparse_program() + >>> node["prog"] = "myapp" + >>> node["prog"] + 'myapp' + """ + + pass + + +class argparse_usage(nodes.General, nodes.Element): + """Node for displaying program usage. + + Contains the usage string as a literal block. + + Examples + -------- + >>> node = argparse_usage() + >>> node["usage"] = "myapp [-h] [--verbose] command" + >>> node["usage"] + 'myapp [-h] [--verbose] command' + """ + + pass + + +class argparse_group(nodes.General, nodes.Element): + """Node for an argument group (positional, optional, or custom). + + Attributes + ---------- + title : str + The group title. + description : str | None + Optional group description. + + Examples + -------- + >>> node = argparse_group() + >>> node["title"] = "Output Options" + >>> node["title"] + 'Output Options' + """ + + pass + + +class argparse_argument(nodes.Part, nodes.Element): + """Node for a single CLI argument. + + Attributes + ---------- + names : list[str] + Argument names/flags. + help : str | None + Help text. + default : str | None + Default value string. + choices : list[str] | None + Available choices. + required : bool + Whether the argument is required. + metavar : str | None + Metavar for display. + + Examples + -------- + >>> node = argparse_argument() + >>> node["names"] = ["-v", "--verbose"] + >>> node["names"] + ['-v', '--verbose'] + """ + + pass + + +class argparse_subcommands(nodes.General, nodes.Element): + """Container node for subcommands section. + + Examples + -------- + >>> node = argparse_subcommands() + >>> node["title"] = "Commands" + >>> node["title"] + 'Commands' + """ + + pass + + +class argparse_subcommand(nodes.General, nodes.Element): + """Node for a single subcommand. + + Attributes + ---------- + name : str + Subcommand name. + aliases : list[str] + Subcommand aliases. + help : str | None + Subcommand help text. + + Examples + -------- + >>> node = argparse_subcommand() + >>> node["name"] = "sync" + >>> node["aliases"] = ["s"] + >>> node["name"] + 'sync' + """ + + pass + + +# HTML Visitor Functions + + +def visit_argparse_program_html(self: HTML5Translator, node: argparse_program) -> None: + """Visit argparse_program node - start program container. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_program + The program node being visited. + """ + prog = node.get("prog", "") + self.body.append(f'
\n') + + +def depart_argparse_program_html(self: HTML5Translator, node: argparse_program) -> None: + """Depart argparse_program node - close program container. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_program + The program node being departed. + """ + self.body.append("
\n") + + +def visit_argparse_usage_html(self: HTML5Translator, node: argparse_usage) -> None: + """Visit argparse_usage node - render usage block with syntax highlighting. + + The usage text is tokenized using ArgparseUsageLexer and wrapped in + styled elements for semantic highlighting of options, metavars, + commands, and punctuation. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_usage + The usage node being visited. + """ + usage = node.get("usage", "") + # Add both argparse-usage class and highlight class for CSS targeting + self.body.append('
')
+    # Prepend "usage: " and highlight the full usage string
+    highlighted = _highlight_usage(f"usage: {usage}", self.encode)
+    self.body.append(highlighted)
+
+
+def depart_argparse_usage_html(self: HTML5Translator, node: argparse_usage) -> None:
+    """Depart argparse_usage node - close usage block.
+
+    Parameters
+    ----------
+    self : HTML5Translator
+        The Sphinx HTML translator.
+    node : argparse_usage
+        The usage node being departed.
+    """
+    self.body.append("
\n") + + +def visit_argparse_group_html(self: HTML5Translator, node: argparse_group) -> None: + """Visit argparse_group node - start argument group. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_group + The group node being visited. + """ + title = node.get("title", "") + group_id = title.lower().replace(" ", "-") + self.body.append(f'
\n') + if title: + self.body.append(f'

{self.encode(title)}

\n') + description = node.get("description") + if description: + self.body.append( + f'

{self.encode(description)}

\n' + ) + self.body.append('
\n') + + +def depart_argparse_group_html(self: HTML5Translator, node: argparse_group) -> None: + """Depart argparse_group node - close argument group. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_group + The group node being departed. + """ + self.body.append("
\n") + self.body.append("
\n") + + +def visit_argparse_argument_html( + self: HTML5Translator, node: argparse_argument +) -> None: + """Visit argparse_argument node - render argument entry with highlighting. + + Argument names are highlighted with semantic CSS classes: + - Short options (-h) get class 'na' (Name.Attribute) + - Long options (--help) get class 'nt' (Name.Tag) + - Positional arguments get class 'nl' (Name.Label) + - Metavars get class 'nv' (Name.Variable) + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_argument + The argument node being visited. + """ + names = node.get("names", []) + metavar = node.get("metavar") + + # Build the argument signature with syntax highlighting + highlighted_sig = _highlight_argument_names(names, metavar, self.encode) + + self.body.append(f'
{highlighted_sig}
\n') + self.body.append('
') + + # Add help text + help_text = node.get("help") + if help_text: + self.body.append(f"

{self.encode(help_text)}

") + + +def depart_argparse_argument_html( + self: HTML5Translator, node: argparse_argument +) -> None: + """Depart argparse_argument node - close argument entry. + + Adds default, choices, and type information if present. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_argument + The argument node being departed. + """ + # Add metadata (default, choices, type) + metadata: list[str] = [] + + default = node.get("default_string") + if default is not None: + metadata.append(f"Default: {self.encode(default)}") + + choices = node.get("choices") + if choices: + choices_str = ", ".join(str(c) for c in choices) + metadata.append(f"Choices: {self.encode(choices_str)}") + + type_name = node.get("type_name") + if type_name: + metadata.append(f"Type: {self.encode(type_name)}") + + required = node.get("required", False) + if required: + metadata.append("Required") + + if metadata: + meta_str = " | ".join(metadata) + self.body.append(f'

{meta_str}

') + + self.body.append("
\n") + + +def visit_argparse_subcommands_html( + self: HTML5Translator, node: argparse_subcommands +) -> None: + """Visit argparse_subcommands node - start subcommands section. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_subcommands + The subcommands node being visited. + """ + title = node.get("title", "Sub-commands") + self.body.append('
\n') + self.body.append( + f'

{self.encode(title)}

\n' + ) + + +def depart_argparse_subcommands_html( + self: HTML5Translator, node: argparse_subcommands +) -> None: + """Depart argparse_subcommands node - close subcommands section. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_subcommands + The subcommands node being departed. + """ + self.body.append("
\n") + + +def visit_argparse_subcommand_html( + self: HTML5Translator, node: argparse_subcommand +) -> None: + """Visit argparse_subcommand node - start subcommand entry. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_subcommand + The subcommand node being visited. + """ + name = node.get("name", "") + aliases = node.get("aliases", []) + + self.body.append(f'
\n') + + # Subcommand header + header = name + if aliases: + alias_str = ", ".join(aliases) + header = f"{name} ({alias_str})" + self.body.append( + f'

{self.encode(header)}

\n' + ) + + # Help text + help_text = node.get("help") + if help_text: + self.body.append( + f'

{self.encode(help_text)}

\n' + ) + + +def depart_argparse_subcommand_html( + self: HTML5Translator, node: argparse_subcommand +) -> None: + """Depart argparse_subcommand node - close subcommand entry. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_subcommand + The subcommand node being departed. + """ + self.body.append("
\n") diff --git a/docs/_ext/sphinx_argparse_neo/parser.py b/docs/_ext/sphinx_argparse_neo/parser.py new file mode 100644 index 000000000..c1718ef09 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/parser.py @@ -0,0 +1,625 @@ +"""Argparse introspection - extract structured data from ArgumentParser. + +This module provides dataclasses and functions to introspect argparse +ArgumentParser instances and convert them into structured data suitable +for documentation rendering. +""" + +from __future__ import annotations + +import argparse +import dataclasses +import typing as t + +# Sentinel for "no default" (distinct from None which is a valid default) +NO_DEFAULT = object() + + +@dataclasses.dataclass +class ArgumentInfo: + """Represents a single CLI argument. + + Examples + -------- + >>> info = ArgumentInfo( + ... names=["-v", "--verbose"], + ... help="Enable verbose output", + ... default=False, + ... default_string="False", + ... choices=None, + ... required=False, + ... metavar=None, + ... nargs=None, + ... action="store_true", + ... type_name=None, + ... const=True, + ... dest="verbose", + ... ) + >>> info.names + ['-v', '--verbose'] + >>> info.is_positional + False + """ + + names: list[str] + help: str | None + default: t.Any + default_string: str | None + choices: list[t.Any] | None + required: bool + metavar: str | None + nargs: str | int | None + action: str + type_name: str | None + const: t.Any + dest: str + + @property + def is_positional(self) -> bool: + """Return True if this is a positional argument. + + Examples + -------- + >>> ArgumentInfo( + ... names=["filename"], + ... help=None, + ... default=None, + ... default_string=None, + ... choices=None, + ... required=True, + ... metavar=None, + ... nargs=None, + ... action="store", + ... type_name=None, + ... const=None, + ... dest="filename", + ... ).is_positional + True + >>> ArgumentInfo( + ... names=["-f", "--file"], + ... help=None, + ... default=None, + ... default_string=None, + ... choices=None, + ... required=False, + ... metavar=None, + ... nargs=None, + ... action="store", + ... type_name=None, + ... const=None, + ... dest="file", + ... ).is_positional + False + """ + return bool(self.names) and not self.names[0].startswith("-") + + +@dataclasses.dataclass +class MutuallyExclusiveGroup: + """Arguments that cannot be used together. + + Examples + -------- + >>> group = MutuallyExclusiveGroup(arguments=[], required=True) + >>> group.required + True + """ + + arguments: list[ArgumentInfo] + required: bool + + +@dataclasses.dataclass +class ArgumentGroup: + """Named group of arguments. + + Examples + -------- + >>> group = ArgumentGroup( + ... title="Output Options", + ... description="Control output format", + ... arguments=[], + ... mutually_exclusive=[], + ... ) + >>> group.title + 'Output Options' + """ + + title: str + description: str | None + arguments: list[ArgumentInfo] + mutually_exclusive: list[MutuallyExclusiveGroup] + + +@dataclasses.dataclass +class SubcommandInfo: + """A subparser/subcommand. + + Examples + -------- + >>> sub = SubcommandInfo( + ... name="sync", + ... aliases=["s"], + ... help="Synchronize repositories", + ... parser=None, # type: ignore[arg-type] + ... ) + >>> sub.aliases + ['s'] + """ + + name: str + aliases: list[str] + help: str | None + parser: ParserInfo # Recursive reference + + +@dataclasses.dataclass +class ParserInfo: + """Complete parsed ArgumentParser. + + Examples + -------- + >>> info = ParserInfo( + ... prog="myapp", + ... usage=None, + ... bare_usage="myapp [-h] command", + ... description="My application", + ... epilog=None, + ... argument_groups=[], + ... subcommands=None, + ... subcommand_dest=None, + ... ) + >>> info.prog + 'myapp' + """ + + prog: str + usage: str | None + bare_usage: str + description: str | None + epilog: str | None + argument_groups: list[ArgumentGroup] + subcommands: list[SubcommandInfo] | None + subcommand_dest: str | None + + +def _format_default(default: t.Any) -> str | None: + """Format a default value for display. + + Parameters + ---------- + default : t.Any + The default value to format. + + Returns + ------- + str | None + Formatted string representation, or None if suppressed/unset. + + Examples + -------- + >>> _format_default(None) + 'None' + >>> _format_default("hello") + 'hello' + >>> _format_default(42) + '42' + >>> _format_default(argparse.SUPPRESS) is None + True + >>> _format_default([1, 2, 3]) + '[1, 2, 3]' + """ + if default is argparse.SUPPRESS: + return None + if default is None: + return "None" + if isinstance(default, str): + return default + return repr(default) + + +def _get_type_name(action: argparse.Action) -> str | None: + """Extract the type name from an action. + + Parameters + ---------- + action : argparse.Action + The argparse action to inspect. + + Returns + ------- + str | None + The type name, or None if no type is specified. + + Examples + -------- + >>> parser = argparse.ArgumentParser() + >>> action = parser.add_argument("--count", type=int) + >>> _get_type_name(action) + 'int' + >>> action2 = parser.add_argument("--name") + >>> _get_type_name(action2) is None + True + """ + if action.type is None: + return None + if hasattr(action.type, "__name__"): + return action.type.__name__ + return str(action.type) + + +def _get_action_name(action: argparse.Action) -> str: + """Get the action type name. + + Parameters + ---------- + action : argparse.Action + The argparse action to inspect. + + Returns + ------- + str + The action type name. + + Examples + -------- + >>> parser = argparse.ArgumentParser() + >>> action = parser.add_argument("--verbose", action="store_true") + >>> _get_action_name(action) + 'store_true' + >>> action2 = parser.add_argument("--file") + >>> _get_action_name(action2) + 'store' + """ + # Map action classes to their string names + action_class = type(action).__name__ + action_map = { + "_StoreAction": "store", + "_StoreTrueAction": "store_true", + "_StoreFalseAction": "store_false", + "_StoreConstAction": "store_const", + "_AppendAction": "append", + "_AppendConstAction": "append_const", + "_CountAction": "count", + "_HelpAction": "help", + "_VersionAction": "version", + "_ExtendAction": "extend", + "BooleanOptionalAction": "boolean_optional", + } + return action_map.get(action_class, action_class.lower()) + + +def _extract_argument(action: argparse.Action) -> ArgumentInfo: + """Extract ArgumentInfo from an argparse Action. + + Parameters + ---------- + action : argparse.Action + The argparse action to extract information from. + + Returns + ------- + ArgumentInfo + Structured argument information. + + Examples + -------- + >>> parser = argparse.ArgumentParser() + >>> action = parser.add_argument( + ... "-v", "--verbose", + ... action="store_true", + ... help="Enable verbose mode", + ... ) + >>> info = _extract_argument(action) + >>> info.names + ['-v', '--verbose'] + >>> info.action + 'store_true' + """ + # Determine names - option_strings for optionals, dest for positionals + names = list(action.option_strings) if action.option_strings else [action.dest] + + # Determine if required + required = action.required if hasattr(action, "required") else False + # Positional arguments are required by default (unless nargs makes them optional) + if not action.option_strings: + required = action.nargs not in ("?", "*") + + # Format metavar + metavar = action.metavar + if isinstance(metavar, tuple): + metavar = " ".join(metavar) + + # Handle default + default = action.default + default_string = _format_default(default) + + return ArgumentInfo( + names=names, + help=action.help if action.help != argparse.SUPPRESS else None, + default=default if default is not argparse.SUPPRESS else NO_DEFAULT, + default_string=default_string, + choices=list(action.choices) if action.choices else None, + required=required, + metavar=metavar, + nargs=action.nargs, + action=_get_action_name(action), + type_name=_get_type_name(action), + const=action.const, + dest=action.dest, + ) + + +def _extract_mutex_groups( + parser: argparse.ArgumentParser, +) -> dict[int, MutuallyExclusiveGroup]: + """Extract mutually exclusive groups from a parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to extract from. + + Returns + ------- + dict[int, MutuallyExclusiveGroup] + Mapping from action id to the MutuallyExclusiveGroup it belongs to. + """ + mutex_map: dict[int, MutuallyExclusiveGroup] = {} + + for mutex_group in parser._mutually_exclusive_groups: + group_info = MutuallyExclusiveGroup( + arguments=[ + _extract_argument(action) + for action in mutex_group._group_actions + if action.help != argparse.SUPPRESS + ], + required=mutex_group.required, + ) + for action in mutex_group._group_actions: + mutex_map[id(action)] = group_info + + return mutex_map + + +def _extract_argument_groups( + parser: argparse.ArgumentParser, + hide_suppressed: bool = True, +) -> list[ArgumentGroup]: + """Extract argument groups from a parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to extract from. + hide_suppressed : bool + Whether to hide arguments with SUPPRESS help. + + Returns + ------- + list[ArgumentGroup] + List of argument groups. + + Examples + -------- + >>> parser = argparse.ArgumentParser(description="Test") + >>> _ = parser.add_argument("filename", help="Input file") + >>> _ = parser.add_argument("-v", "--verbose", action="store_true") + >>> groups = _extract_argument_groups(parser) + >>> len(groups) >= 2 # positional and optional groups + True + """ + mutex_map = _extract_mutex_groups(parser) + seen_mutex: set[int] = set() + groups: list[ArgumentGroup] = [] + + for group in parser._action_groups: + arguments: list[ArgumentInfo] = [] + mutex_groups: list[MutuallyExclusiveGroup] = [] + + for action in group._group_actions: + # Skip help action and suppressed actions + if isinstance(action, argparse._HelpAction): + continue + if hide_suppressed and action.help == argparse.SUPPRESS: + continue + # Skip subparser actions - handled separately + if isinstance(action, argparse._SubParsersAction): + continue + + # Check if this action is in a mutex group + if id(action) in mutex_map: + mutex_info = mutex_map[id(action)] + mutex_id = id(mutex_info) + if mutex_id not in seen_mutex: + seen_mutex.add(mutex_id) + mutex_groups.append(mutex_info) + else: + arguments.append(_extract_argument(action)) + + # Skip empty groups + if not arguments and not mutex_groups: + continue + + groups.append( + ArgumentGroup( + title=group.title or "", + description=group.description, + arguments=arguments, + mutually_exclusive=mutex_groups, + ) + ) + + return groups + + +def _extract_subcommands( + parser: argparse.ArgumentParser, + hide_suppressed: bool = True, +) -> tuple[list[SubcommandInfo] | None, str | None]: + """Extract subcommands from a parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to extract from. + hide_suppressed : bool + Whether to hide subcommands with SUPPRESS help. + + Returns + ------- + tuple[list[SubcommandInfo] | None, str | None] + Tuple of (subcommands list, destination variable name). + + Examples + -------- + >>> parser = argparse.ArgumentParser() + >>> subparsers = parser.add_subparsers(dest="command") + >>> _ = subparsers.add_parser("sync", help="Sync repos") + >>> _ = subparsers.add_parser("add", help="Add repo") + >>> subs, dest = _extract_subcommands(parser) + >>> dest + 'command' + >>> len(subs) + 2 + """ + for action in parser._actions: + if isinstance(action, argparse._SubParsersAction): + subcommands: list[SubcommandInfo] = [] + + # Get the choices (subparsers) + choices = action.choices or {} + + # Build reverse mapping of aliases + # action._parser_class might have name_parser_map with aliases + alias_map: dict[str, list[str]] = {} + seen_parsers: dict[int, str] = {} + + for name, subparser in choices.items(): + parser_id = id(subparser) + if parser_id in seen_parsers: + # This is an alias + primary = seen_parsers[parser_id] + if primary not in alias_map: + alias_map[primary] = [] + alias_map[primary].append(name) + else: + seen_parsers[parser_id] = name + + # Now extract subcommand info + processed: set[int] = set() + for name, subparser in choices.items(): + parser_id = id(subparser) + if parser_id in processed: + continue + processed.add(parser_id) + + # Get help text + help_text: str | None = None + if hasattr(action, "_choices_actions"): + for choice_action in action._choices_actions: + if choice_action.dest == name: + help_text = choice_action.help + break + + if hide_suppressed and help_text == argparse.SUPPRESS: + continue + + # Recursively extract parser info + sub_info = extract_parser(subparser, hide_suppressed=hide_suppressed) + + subcommands.append( + SubcommandInfo( + name=name, + aliases=alias_map.get(name, []), + help=help_text, + parser=sub_info, + ) + ) + + return subcommands, action.dest + + return None, None + + +def _generate_usage(parser: argparse.ArgumentParser) -> str: + """Generate the usage string for a parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to generate usage for. + + Returns + ------- + str + The bare usage string (without "usage: " prefix). + + Examples + -------- + >>> parser = argparse.ArgumentParser(prog="myapp") + >>> _ = parser.add_argument("-v", "--verbose", action="store_true") + >>> usage = _generate_usage(parser) + >>> "myapp" in usage + True + """ + # Use argparse's built-in formatter to generate usage + formatter = parser._get_formatter() + formatter.add_usage( + parser.usage, parser._actions, parser._mutually_exclusive_groups + ) + usage = formatter.format_help().strip() + + # Remove "usage: " prefix if present + if usage.lower().startswith("usage:"): + usage = usage[6:].strip() + + return usage + + +def extract_parser( + parser: argparse.ArgumentParser, + hide_suppressed: bool = True, +) -> ParserInfo: + """Extract complete parser information. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to extract information from. + hide_suppressed : bool + Whether to hide arguments/subcommands with SUPPRESS help. + + Returns + ------- + ParserInfo + Complete structured parser information. + + Examples + -------- + >>> parser = argparse.ArgumentParser( + ... prog="myapp", + ... description="My application", + ... ) + >>> _ = parser.add_argument("filename", help="Input file") + >>> _ = parser.add_argument("-v", "--verbose", action="store_true") + >>> info = extract_parser(parser) + >>> info.prog + 'myapp' + >>> info.description + 'My application' + >>> len(info.argument_groups) >= 1 + True + """ + subcommands, subcommand_dest = _extract_subcommands(parser, hide_suppressed) + + return ParserInfo( + prog=parser.prog, + usage=parser.usage, + bare_usage=_generate_usage(parser), + description=parser.description, + epilog=parser.epilog, + argument_groups=_extract_argument_groups(parser, hide_suppressed), + subcommands=subcommands, + subcommand_dest=subcommand_dest, + ) diff --git a/docs/_ext/sphinx_argparse_neo/renderer.py b/docs/_ext/sphinx_argparse_neo/renderer.py new file mode 100644 index 000000000..c9577ba9d --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/renderer.py @@ -0,0 +1,429 @@ +"""Renderer - convert ParserInfo to docutils nodes. + +This module provides the ArgparseRenderer class that transforms +structured parser information into docutils nodes for documentation. +""" + +from __future__ import annotations + +import typing as t + +from docutils import nodes +from docutils.statemachine import StringList +from sphinx_argparse_neo.nodes import ( + argparse_argument, + argparse_group, + argparse_program, + argparse_subcommand, + argparse_subcommands, + argparse_usage, +) +from sphinx_argparse_neo.parser import ( + ArgumentGroup, + ArgumentInfo, + MutuallyExclusiveGroup, + ParserInfo, + SubcommandInfo, +) + +if t.TYPE_CHECKING: + from docutils.parsers.rst.states import RSTStateMachine + from sphinx.config import Config + + +@t.dataclass_transform() +class RenderConfig: + """Configuration for the renderer. + + Examples + -------- + >>> config = RenderConfig() + >>> config.heading_level + 2 + >>> config.use_rubric + False + """ + + heading_level: int = 2 + use_rubric: bool = False + group_title_prefix: str = "" + include_in_toc: bool = True + toc_depth: int = 2 + flatten_subcommands: bool = False + subcommand_style: str = "nested" + show_defaults: bool = True + show_choices: bool = True + show_types: bool = True + help_format: str = "rst" + usage_style: str = "literal" + + def __init__( + self, + heading_level: int = 2, + use_rubric: bool = False, + group_title_prefix: str = "", + include_in_toc: bool = True, + toc_depth: int = 2, + flatten_subcommands: bool = False, + subcommand_style: str = "nested", + show_defaults: bool = True, + show_choices: bool = True, + show_types: bool = True, + help_format: str = "rst", + usage_style: str = "literal", + ) -> None: + """Initialize render configuration.""" + self.heading_level = heading_level + self.use_rubric = use_rubric + self.group_title_prefix = group_title_prefix + self.include_in_toc = include_in_toc + self.toc_depth = toc_depth + self.flatten_subcommands = flatten_subcommands + self.subcommand_style = subcommand_style + self.show_defaults = show_defaults + self.show_choices = show_choices + self.show_types = show_types + self.help_format = help_format + self.usage_style = usage_style + + @classmethod + def from_sphinx_config(cls, config: Config) -> RenderConfig: + """Create RenderConfig from Sphinx configuration. + + Parameters + ---------- + config : Config + Sphinx configuration object. + + Returns + ------- + RenderConfig + Render configuration based on Sphinx config values. + """ + return cls( + heading_level=getattr(config, "argparse_heading_level", 2), + use_rubric=getattr(config, "argparse_use_rubric", False), + group_title_prefix=getattr(config, "argparse_group_title_prefix", ""), + include_in_toc=getattr(config, "argparse_include_in_toc", True), + toc_depth=getattr(config, "argparse_toc_depth", 2), + flatten_subcommands=getattr(config, "argparse_flatten_subcommands", False), + subcommand_style=getattr(config, "argparse_subcommand_style", "nested"), + show_defaults=getattr(config, "argparse_show_defaults", True), + show_choices=getattr(config, "argparse_show_choices", True), + show_types=getattr(config, "argparse_show_types", True), + help_format=getattr(config, "argparse_help_format", "rst"), + usage_style=getattr(config, "argparse_usage_style", "literal"), + ) + + +class ArgparseRenderer: + """Render ParserInfo to docutils nodes. + + This class can be subclassed to customize rendering behavior. + Override individual methods to change how specific elements are rendered. + + Parameters + ---------- + config : RenderConfig + Rendering configuration. + state : RSTStateMachine | None + RST state machine for parsing nested RST content. + + Examples + -------- + >>> from sphinx_argparse_neo.parser import ParserInfo + >>> config = RenderConfig() + >>> renderer = ArgparseRenderer(config) + >>> info = ParserInfo( + ... prog="myapp", + ... usage=None, + ... bare_usage="myapp [-h]", + ... description="My app", + ... epilog=None, + ... argument_groups=[], + ... subcommands=None, + ... subcommand_dest=None, + ... ) + >>> result = renderer.render(info) + >>> isinstance(result, list) + True + """ + + def __init__( + self, + config: RenderConfig | None = None, + state: RSTStateMachine | None = None, + ) -> None: + """Initialize the renderer.""" + self.config = config or RenderConfig() + self.state = state + + def render(self, parser_info: ParserInfo) -> list[nodes.Node]: + """Render a complete parser to docutils nodes. + + Parameters + ---------- + parser_info : ParserInfo + The parsed parser information. + + Returns + ------- + list[nodes.Node] + List of docutils nodes representing the documentation. + """ + result: list[nodes.Node] = [] + + # Create program container + program_node = argparse_program() + program_node["prog"] = parser_info.prog + + # Add description + if parser_info.description: + desc_nodes = self._parse_text(parser_info.description) + program_node.extend(desc_nodes) + + # Add usage + usage_node = self.render_usage(parser_info) + program_node.append(usage_node) + + # Add argument groups + for group in parser_info.argument_groups: + group_node = self.render_group(group) + program_node.append(group_node) + + # Add subcommands + if parser_info.subcommands: + subcommands_node = self.render_subcommands(parser_info.subcommands) + program_node.append(subcommands_node) + + # Add epilog + if parser_info.epilog: + epilog_nodes = self._parse_text(parser_info.epilog) + program_node.extend(epilog_nodes) + + result.append(program_node) + return self.post_process(result) + + def render_usage(self, parser_info: ParserInfo) -> argparse_usage: + """Render the usage block. + + Parameters + ---------- + parser_info : ParserInfo + The parser information. + + Returns + ------- + argparse_usage + Usage node. + """ + usage_node = argparse_usage() + usage_node["usage"] = parser_info.bare_usage + return usage_node + + def render_group(self, group: ArgumentGroup) -> argparse_group: + """Render an argument group. + + Parameters + ---------- + group : ArgumentGroup + The argument group to render. + + Returns + ------- + argparse_group + Group node containing argument nodes. + """ + group_node = argparse_group() + title = group.title + if self.config.group_title_prefix: + title = f"{self.config.group_title_prefix}{title}" + group_node["title"] = title + group_node["description"] = group.description + + # Add individual arguments + for arg in group.arguments: + arg_node = self.render_argument(arg) + group_node.append(arg_node) + + # Add mutually exclusive groups + for mutex in group.mutually_exclusive: + mutex_nodes = self.render_mutex_group(mutex) + group_node.extend(mutex_nodes) + + return group_node + + def render_argument(self, arg: ArgumentInfo) -> argparse_argument: + """Render a single argument. + + Parameters + ---------- + arg : ArgumentInfo + The argument to render. + + Returns + ------- + argparse_argument + Argument node. + """ + arg_node = argparse_argument() + arg_node["names"] = arg.names + arg_node["help"] = arg.help + arg_node["metavar"] = arg.metavar + arg_node["required"] = arg.required + + if self.config.show_defaults: + arg_node["default_string"] = arg.default_string + + if self.config.show_choices: + arg_node["choices"] = arg.choices + + if self.config.show_types: + arg_node["type_name"] = arg.type_name + + return arg_node + + def render_mutex_group( + self, mutex: MutuallyExclusiveGroup + ) -> list[argparse_argument]: + """Render a mutually exclusive group. + + Parameters + ---------- + mutex : MutuallyExclusiveGroup + The mutually exclusive group. + + Returns + ------- + list[argparse_argument] + List of argument nodes with mutex indicator. + """ + result: list[argparse_argument] = [] + for arg in mutex.arguments: + arg_node = self.render_argument(arg) + # Mark as part of mutex group + arg_node["mutex"] = True + arg_node["mutex_required"] = mutex.required + result.append(arg_node) + return result + + def render_subcommands( + self, subcommands: list[SubcommandInfo] + ) -> argparse_subcommands: + """Render subcommands section. + + Parameters + ---------- + subcommands : list[SubcommandInfo] + List of subcommand information. + + Returns + ------- + argparse_subcommands + Subcommands container node. + """ + container = argparse_subcommands() + container["title"] = "Sub-commands" + + for subcmd in subcommands: + subcmd_node = self.render_subcommand(subcmd) + container.append(subcmd_node) + + return container + + def render_subcommand(self, subcmd: SubcommandInfo) -> argparse_subcommand: + """Render a single subcommand. + + Parameters + ---------- + subcmd : SubcommandInfo + The subcommand information. + + Returns + ------- + argparse_subcommand + Subcommand node, potentially containing nested parser content. + """ + subcmd_node = argparse_subcommand() + subcmd_node["name"] = subcmd.name + subcmd_node["aliases"] = subcmd.aliases + subcmd_node["help"] = subcmd.help + + # Recursively render the subcommand's parser + if subcmd.parser: + nested_nodes = self.render(subcmd.parser) + subcmd_node.extend(nested_nodes) + + return subcmd_node + + def post_process(self, result_nodes: list[nodes.Node]) -> list[nodes.Node]: + """Post-process the rendered nodes. + + Override this method to apply transformations after rendering. + + Parameters + ---------- + result_nodes : list[nodes.Node] + The rendered nodes. + + Returns + ------- + list[nodes.Node] + Post-processed nodes. + """ + return result_nodes + + def _parse_text(self, text: str) -> list[nodes.Node]: + """Parse text as RST or MyST content. + + Parameters + ---------- + text : str + Text to parse. + + Returns + ------- + list[nodes.Node] + Parsed docutils nodes. + """ + if not text: + return [] + + if self.state is None: + # No state machine available, return as paragraph + para = nodes.paragraph(text=text) + return [para] + + # Use the state machine to parse RST + container = nodes.container() + self.state.nested_parse( + StringList(text.split("\n")), + 0, + container, + ) + return list(container.children) + + +def create_renderer( + config: RenderConfig | None = None, + state: RSTStateMachine | None = None, + renderer_class: type[ArgparseRenderer] | None = None, +) -> ArgparseRenderer: + """Create a renderer instance. + + Parameters + ---------- + config : RenderConfig | None + Rendering configuration. + state : RSTStateMachine | None + RST state machine for parsing. + renderer_class : type[ArgparseRenderer] | None + Custom renderer class to use. + + Returns + ------- + ArgparseRenderer + Configured renderer instance. + """ + cls = renderer_class or ArgparseRenderer + return cls(config=config, state=state) diff --git a/docs/_ext/vcspull_console_lexer.py b/docs/_ext/vcspull_console_lexer.py new file mode 100644 index 000000000..9c10d987c --- /dev/null +++ b/docs/_ext/vcspull_console_lexer.py @@ -0,0 +1,119 @@ +"""Pygments lexer for vcspull CLI sessions (command + output). + +This module provides a custom Pygments lexer for highlighting vcspull command +sessions, combining shell command highlighting with semantic output highlighting. +""" + +from __future__ import annotations + +import re + +from pygments.lexer import Lexer, do_insertions, line_re # type: ignore[attr-defined] +from pygments.lexers.shell import BashLexer +from pygments.token import Generic, Text +from vcspull_output_lexer import ( # type: ignore[import-not-found] + VcspullOutputLexer, +) + + +class VcspullConsoleLexer(Lexer): + r"""Lexer for vcspull CLI sessions with semantic output highlighting. + + Extends BashSessionLexer pattern but delegates output lines to + VcspullOutputLexer for semantic coloring of vcspull command output. + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = VcspullConsoleLexer() + >>> text = "$ vcspull list\\n• flask → ~/code/flask\\n" + >>> tokens = list(lexer.get_tokens(text)) + >>> any(t == Token.Generic.Prompt for t, v in tokens) + True + >>> any(t == Token.Name.Function for t, v in tokens) + True + """ + + name = "Vcspull Console" + aliases = ["vcspull-console"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-vcspull-console"] # noqa: RUF012 + + _venv = re.compile(r"^(\([^)]*\))(\s*)") + _ps1rgx = re.compile( + r"^((?:(?:\[.*?\])|(?:\(\S+\))?(?:| |sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)" + r"?|\[\S+[@:][^\n]+\].+))\s*[$#%]\s*)(.*\n?)" + ) + _ps2 = "> " + + def get_tokens_unprocessed( # type: ignore[no-untyped-def] + self, + text: str, + ): + """Tokenize text with shell commands and vcspull output. + + Parameters + ---------- + text : str + The text to tokenize. + + Yields + ------ + tuple[int, TokenType, str] + Tuples of (index, token_type, value). + """ + innerlexer = BashLexer(**self.options) + outputlexer = VcspullOutputLexer(**self.options) + + pos = 0 + curcode = "" + insertions = [] + backslash_continuation = False + + for match in line_re.finditer(text): + line = match.group() + + venv_match = self._venv.match(line) + if venv_match: + venv = venv_match.group(1) + venv_whitespace = venv_match.group(2) + insertions.append( + (len(curcode), [(0, Generic.Prompt.VirtualEnv, venv)]) + ) + if venv_whitespace: + insertions.append((len(curcode), [(0, Text, venv_whitespace)])) + line = line[venv_match.end() :] + + m = self._ps1rgx.match(line) + if m: + if not insertions: + pos = match.start() + + insertions.append((len(curcode), [(0, Generic.Prompt, m.group(1))])) + curcode += m.group(2) + backslash_continuation = curcode.endswith("\\\n") + elif backslash_continuation: + if line.startswith(self._ps2): + insertions.append( + (len(curcode), [(0, Generic.Prompt, line[: len(self._ps2)])]) + ) + curcode += line[len(self._ps2) :] + else: + curcode += line + backslash_continuation = curcode.endswith("\\\n") + else: + if insertions: + toks = innerlexer.get_tokens_unprocessed(curcode) + for i, t, v in do_insertions(insertions, toks): + yield pos + i, t, v + # Use VcspullOutputLexer for output lines + for i, t, v in outputlexer.get_tokens_unprocessed(line): + yield match.start() + i, t, v + insertions = [] + curcode = "" + + if insertions: + for i, t, v in do_insertions( + insertions, innerlexer.get_tokens_unprocessed(curcode) + ): + yield pos + i, t, v diff --git a/docs/_ext/vcspull_output_lexer.py b/docs/_ext/vcspull_output_lexer.py new file mode 100644 index 000000000..9e54a5938 --- /dev/null +++ b/docs/_ext/vcspull_output_lexer.py @@ -0,0 +1,176 @@ +"""Pygments lexer for vcspull CLI output. + +This module provides a custom Pygments lexer for highlighting vcspull command +output (list, status, sync, search) with semantic colors matching the CLI. +""" + +from __future__ import annotations + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import ( + Comment, + Generic, + Name, + Number, + Punctuation, + Text, + Whitespace, +) + + +class VcspullOutputLexer(RegexLexer): + """Lexer for vcspull CLI output. + + Highlights vcspull command output including list, status, sync, and search + results with semantic coloring. + + Token mapping to vcspull semantic colors: + - SUCCESS (green): Generic.Inserted - checkmarks, "up to date", "synced" + - WARNING (yellow): Name.Exception - warning symbols, "dirty", "behind" + - ERROR (red): Generic.Error - error symbols, "missing", "error" + - INFO (cyan): Name.Function - repository names + - HIGHLIGHT (magenta): Generic.Subheading - workspace headers + - MUTED (blue/gray): Comment - bullets, arrows, labels + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = VcspullOutputLexer() + >>> tokens = list(lexer.get_tokens("• flask → ~/code/flask")) + >>> tokens[0] + (Token.Comment, '•') + >>> tokens[2] + (Token.Name.Function, 'flask') + """ + + name = "vcspull Output" + aliases = ["vcspull-output", "vcspull"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-vcspull-output"] # noqa: RUF012 + + tokens = { # noqa: RUF012 + "root": [ + # Newlines + (r"\n", Whitespace), + # Workspace header - path ending with / at start of line or after newline + # Matched by looking for ~/path/ or /path/ pattern as a whole line + (r"(~?/[-a-zA-Z0-9_.~/+]+/)(?=\s*$|\s*\n)", Generic.Subheading), + # Success symbol with repo name (green) - for sync output like "✓ repo" + ( + r"(✓)(\s+)([a-zA-Z][-a-zA-Z0-9_.]+)(?=\s+[~/]|:|\s*$)", + bygroups(Generic.Inserted, Whitespace, Name.Function), # type: ignore[no-untyped-call] + ), + # Success symbol standalone (green) + (r"✓", Generic.Inserted), + # Warning symbol with repo name (yellow) + ( + r"(⚠)(\s+)([a-zA-Z][-a-zA-Z0-9_.]+)(?=\s+[~/]|:|\s*$)", + bygroups(Name.Exception, Whitespace, Name.Function), # type: ignore[no-untyped-call] + ), + # Warning symbol standalone (yellow) + (r"⚠", Name.Exception), + # Error symbol with repo name (red) + ( + r"(✗)(\s+)([a-zA-Z][-a-zA-Z0-9_.]+)(?=\s+[~/]|:|\s*$)", + bygroups(Generic.Error, Whitespace, Name.Function), # type: ignore[no-untyped-call] + ), + # Error symbol standalone (red) + (r"✗", Generic.Error), + # Clone/add symbol with repo name (green) + ( + r"(\+)(\s+)([a-zA-Z][-a-zA-Z0-9_.]+)", + bygroups(Generic.Inserted, Whitespace, Name.Function), # type: ignore[no-untyped-call] + ), + # Update/change symbol with repo name (yellow) + ( + r"(~)(\s+)([a-zA-Z][-a-zA-Z0-9_.]+)", + bygroups(Name.Exception, Whitespace, Name.Function), # type: ignore[no-untyped-call] + ), + # Bullet (muted) + (r"•", Comment), + # Arrow (muted) + (r"→", Comment), + # Status messages - success (green) - must be at word boundary + (r"\bup to date\b", Generic.Inserted), + (r"\bsynced\b", Generic.Inserted), + (r"\bexists?\b", Generic.Inserted), + (r"\bahead by \d+\b", Generic.Inserted), + # Status messages - warning (yellow) + (r"\bdirty\b", Name.Exception), + (r"\bbehind(?: by \d+)?\b", Name.Exception), + (r"\bdiverged\b", Name.Exception), + (r"\bnot a git repo\b", Name.Exception), + # Status messages - error (red) + (r"(?<=: )missing\b", Generic.Error), # "missing" after colon + (r"\berror\b", Generic.Error), + (r"\bfailed\b", Generic.Error), + # Labels (muted) - common vcspull output labels + ( + r"(Summary:|Progress:|Path:|Branch:|url:|workspace:|Ahead/Behind:|" + r"Remote:|Repository:|Note:|Usage:)", + Generic.Heading, + ), + # vcspull command and subcommands (for pretty docs) + (r"\bvcspull\b", Name.Builtin), + (r"\b(sync|list|add|status|search|discover|fmt)\b(?=\s|$)", Name.Builtin), + # Git URLs (with git+ prefix) + (r"git\+https?://[^\s]+", Name.Tag), + # Plain HTTPS/HTTP URLs (without git+ prefix) + (r"https?://[^\s()]+", Name.Tag), + # Interactive prompt options like [y/N], [Y/n] + (r"\[[yYnN]/[yYnN]\]", Comment), + # Question mark prompt indicator + (r"\?", Generic.Prompt), + # Paths with ~/ - include + for c++ directories + (r"~?/[-a-zA-Z0-9_.~/+]+(?![\w/+])", Name.Variable), + # Repository names followed by arrow (muted arrow) + # Only match repo name when followed by arrow - avoids false positives + ( + r"([a-zA-Z][-a-zA-Z0-9_.]+)(\s*)(→)", + bygroups(Name.Function, Whitespace, Comment), # type: ignore[no-untyped-call] + ), + # Note: Removed generic "name:" pattern as it caused false positives + # (matching "add:" in "Would add:", "complete:" in "Dry run complete:") + # Repo names are matched via symbol-prefixed patterns (✓, ✗, ⚠, etc.) + # Count labels in summaries + ( + r"(\d+)(\s+)(repositories|repos|exist|missing|synced|failed|blocked|errors)", + bygroups(Number.Integer, Whitespace, Name.Label), # type: ignore[no-untyped-call] + ), + # Numbers + (r"\d+", Number.Integer), + # Whitespace + (r"[ \t]+", Whitespace), + # Punctuation + (r"[,():]", Punctuation), + # Fallback - any other text + (r"[^\s•→✓✗⚠+~:,()]+", Text), + ], + } + + +def tokenize_output(text: str) -> list[tuple[str, str]]: + """Tokenize vcspull output and return list of (token_type, value) tuples. + + Parameters + ---------- + text : str + vcspull CLI output text to tokenize. + + Returns + ------- + list[tuple[str, str]] + List of (token_type_name, text_value) tuples. + + Examples + -------- + >>> result = tokenize_output("• flask → ~/code/flask") + >>> result[0] + ('Token.Comment', '•') + >>> result[2] + ('Token.Name.Function', 'flask') + """ + lexer = VcspullOutputLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] diff --git a/docs/_static/css/argparse-highlight.css b/docs/_static/css/argparse-highlight.css new file mode 100644 index 000000000..e3036cb73 --- /dev/null +++ b/docs/_static/css/argparse-highlight.css @@ -0,0 +1,234 @@ +/* + * Argparse/CLI Highlighting Styles + * + * Styles for CLI inline roles and argparse help output highlighting. + * Uses Furo CSS variables for automatic light/dark theme compatibility. + */ + +/* ========================================================================== + Inline Role Styles + ========================================================================== */ + +/* + * CLI Options + * + * Long options (--verbose) and short options (-h) have distinct styling. + */ +.cli-option { + font-family: var(--font-stack--monospace); + font-weight: 600; +} + +.cli-option-long { + color: var(--color-link); +} + +.cli-option-short { + color: var(--color-inline-code-text); +} + +/* + * CLI Metavars + * + * Placeholder values like FILE, PATH, DIRECTORY. + * Styled with italic and muted color to indicate "replace me". + */ +.cli-metavar { + font-family: var(--font-stack--monospace); + color: var(--color-foreground-secondary); + font-style: italic; +} + +/* + * CLI Commands + * + * Subcommand names like sync, add, list. + * Uses brand color for emphasis. + */ +.cli-command { + font-family: var(--font-stack--monospace); + color: var(--color-brand-primary); + font-weight: 600; +} + +/* + * CLI Default Values + * + * Default values shown in help text like None, "auto". + * Subtle styling to not distract from options. + */ +.cli-default { + font-family: var(--font-stack--monospace); + color: var(--color-foreground-muted); + font-style: italic; +} + +/* + * CLI Choice Values + * + * Choice enumeration values like json, yaml, table. + * Distinct color to show they are valid literal values. + */ +.cli-choice { + font-family: var(--font-stack--monospace); + color: var(--color-problematic); +} + +/* ========================================================================== + Argparse Code Block Highlighting + ========================================================================== */ + +/* + * These styles apply within Pygments-highlighted code blocks using the + * argparse, argparse-usage, or argparse-help lexers. + */ + +/* Usage heading "usage:" */ +.highlight-argparse .gh, +.highlight-argparse-usage .gh, +.highlight-argparse-help .gh { + color: var(--color-foreground-secondary); + font-weight: bold; +} + +/* Section headers like "positional arguments:", "options:" */ +.highlight-argparse .gs, +.highlight-argparse-help .gs { + color: var(--color-foreground-secondary); + font-weight: bold; + text-decoration: underline; +} + +/* Long options --foo */ +.highlight-argparse .nt, +.highlight-argparse-usage .nt, +.highlight-argparse-help .nt { + color: var(--color-link); + font-weight: bold; +} + +/* Short options -h */ +.highlight-argparse .na, +.highlight-argparse-usage .na, +.highlight-argparse-help .na { + color: var(--color-inline-code-text); + font-weight: bold; +} + +/* Metavar placeholders FILE, PATH */ +.highlight-argparse .nv, +.highlight-argparse-usage .nv, +.highlight-argparse-help .nv { + color: var(--color-foreground-secondary); + font-style: italic; +} + +/* Command/positional names */ +.highlight-argparse .nl, +.highlight-argparse-usage .nl, +.highlight-argparse-help .nl { + color: var(--color-brand-primary); +} + +/* Choice values in braces {json,yaml} */ +.highlight-argparse .no, +.highlight-argparse-usage .no, +.highlight-argparse-help .no { + color: var(--color-problematic); +} + +/* Operators like | for mutex groups */ +.highlight-argparse .o, +.highlight-argparse-usage .o, +.highlight-argparse-help .o { + color: var(--color-foreground-muted); + font-weight: bold; +} + +/* Punctuation [], {}, () */ +.highlight-argparse .p, +.highlight-argparse-usage .p, +.highlight-argparse-help .p { + color: var(--color-foreground-muted); +} + +/* ========================================================================== + Argparse Directive Highlighting (.. argparse:: output) + ========================================================================== */ + +/* + * These styles apply to the argparse directive output which uses custom + * nodes rendered by sphinx_argparse_neo. The directive adds highlight spans + * directly to the HTML output. + */ + +/* + * Usage Block (.argparse-usage) + * + * The usage block now has both .argparse-usage and .highlight-argparse-usage + * classes. The .highlight-argparse-usage selectors above already handle the + * token highlighting. These selectors provide fallback and ensure consistent + * styling. + */ +.argparse-usage .gh { + color: var(--color-foreground-secondary); + font-weight: bold; +} + +.argparse-usage .nt { + color: var(--color-link); + font-weight: bold; +} + +.argparse-usage .na { + color: var(--color-inline-code-text); + font-weight: bold; +} + +.argparse-usage .nv { + color: var(--color-foreground-secondary); + font-style: italic; +} + +.argparse-usage .nl { + color: var(--color-brand-primary); +} + +.argparse-usage .no { + color: var(--color-problematic); +} + +.argparse-usage .o { + color: var(--color-foreground-muted); + font-weight: bold; +} + +.argparse-usage .p { + color: var(--color-foreground-muted); +} + +/* + * Argument Name (
) + * + * The argument names in the dl/dt structure are highlighted with + * semantic spans for options and metavars. + */ +.argparse-argument-name .nt { + color: var(--color-link); + font-weight: bold; +} + +.argparse-argument-name .na { + color: var(--color-inline-code-text); + font-weight: bold; +} + +.argparse-argument-name .nv { + color: var(--color-foreground-secondary); + font-style: italic; +} + +.argparse-argument-name .nl { + color: var(--color-brand-primary); + font-weight: 600; +} diff --git a/docs/cli/add.md b/docs/cli/add.md index e6211c249..df96dc0f8 100644 --- a/docs/cli/add.md +++ b/docs/cli/add.md @@ -20,14 +20,13 @@ For bulk scanning of existing repositories, see {ref}`cli-discover`. :func: create_parser :prog: vcspull :path: add - :nodescription: ``` ## Basic usage Point to an existing checkout to add it under its parent workspace: -```console +```vcspull-console $ vcspull add ~/study/python/pytest-docker Found new repository to import: + pytest-docker (https://github.com/avast/pytest-docker) diff --git a/docs/cli/discover.md b/docs/cli/discover.md index 4e7ee35d4..18a50e5fb 100644 --- a/docs/cli/discover.md +++ b/docs/cli/discover.md @@ -14,14 +14,13 @@ workspaces or migrating from other tools. :func: create_parser :prog: vcspull :path: discover - :nodescription: ``` ## Basic usage Scan a directory for Git repositories: -```console +```vcspull-console $ vcspull discover ~/code Found 2 repositories in ~/code @@ -67,7 +66,7 @@ This scans all subdirectories for Git repositories, making it ideal for: Skip prompts and add all repositories with `--yes` or `-y`: -```console +```vcspull-console $ vcspull discover ~/code --recursive --yes Found 15 repositories in ~/code Added 15 repositories to ~/.vcspull.yaml @@ -88,7 +87,7 @@ $ vcspull discover ~/code --dry-run Output shows: -```console +```vcspull-output Would add: vcspull (~/code/) Remote: git+https://github.com/vcs-python/vcspull.git @@ -149,7 +148,7 @@ For each repository found: Repositories without an `origin` remote are detected but logged as a warning: -```console +```vcspull-console $ vcspull discover ~/code WARNING: Could not determine remote URL for ~/code/local-project (no origin remote) Skipping local-project @@ -211,7 +210,7 @@ After discovering repositories, consider: If a repository already exists in your configuration, vcspull will detect it: -```console +```vcspull-console Repository: flask Path: ~/code/flask Remote: git+https://github.com/pallets/flask.git diff --git a/docs/cli/fmt.md b/docs/cli/fmt.md index ef12e69d5..f0d3e7183 100644 --- a/docs/cli/fmt.md +++ b/docs/cli/fmt.md @@ -19,7 +19,6 @@ place while still showing a warning. :func: create_parser :prog: vcspull :path: fmt - :nodescription: ``` ## What gets formatted diff --git a/docs/cli/index.md b/docs/cli/index.md index 69cf16118..977483710 100644 --- a/docs/cli/index.md +++ b/docs/cli/index.md @@ -34,7 +34,6 @@ completion :func: create_parser :prog: vcspull :nosubcommands: - :nodescription: subparser_name : @replace See :ref:`cli-sync`, :ref:`cli-add`, :ref:`cli-discover`, :ref:`cli-list`, :ref:`cli-search`, :ref:`cli-status`, :ref:`cli-fmt` diff --git a/docs/cli/list.md b/docs/cli/list.md index 66d76e01c..ad69646eb 100644 --- a/docs/cli/list.md +++ b/docs/cli/list.md @@ -14,14 +14,13 @@ filter repositories by patterns, and export structured data for automation. :func: create_parser :prog: vcspull :path: list - :nodescription: ``` ## Basic usage List all configured repositories: -```console +```vcspull-console $ vcspull list • tiktoken → ~/study/ai/tiktoken • GeographicLib → ~/study/c++/GeographicLib @@ -32,7 +31,7 @@ $ vcspull list Filter repositories using fnmatch-style patterns: -```console +```vcspull-console $ vcspull list 'flask*' • flask → ~/code/flask • flask-sqlalchemy → ~/code/flask-sqlalchemy @@ -48,7 +47,7 @@ $ vcspull list django flask Group repositories by workspace root with `--tree`: -```console +```vcspull-console $ vcspull list --tree ~/study/ai/ @@ -132,7 +131,7 @@ $ vcspull list -f ~/projects/.vcspull.yaml Filter repositories by workspace root with `-w/--workspace/--workspace-root`: -```console +```vcspull-console $ vcspull list -w ~/code/ • flask → ~/code/flask • requests → ~/code/requests diff --git a/docs/cli/search.md b/docs/cli/search.md index 6bd61706d..b7c186ff3 100644 --- a/docs/cli/search.md +++ b/docs/cli/search.md @@ -14,14 +14,13 @@ scope to specific fields, and can emit structured JSON for automation. :func: create_parser :prog: vcspull :path: search - :nodescription: ``` ## Basic usage Search all fields (name, path, url, workspace) with regex: -```console +```vcspull-console $ vcspull search django • django → ~/code/django ``` @@ -30,7 +29,7 @@ $ vcspull search django Target specific fields with prefixes: -```console +```vcspull-console $ vcspull search name:django url:github • django → ~/code/django url: git+https://github.com/django/django.git diff --git a/docs/cli/status.md b/docs/cli/status.md index e8a352bdd..82bd1fe56 100644 --- a/docs/cli/status.md +++ b/docs/cli/status.md @@ -14,14 +14,13 @@ This introspection command helps verify your local workspace matches your config :func: create_parser :prog: vcspull :path: status - :nodescription: ``` ## Basic usage Check the status of all configured repositories: -```console +```vcspull-console $ vcspull status ✗ tiktoken: missing ✓ flask: up to date @@ -40,7 +39,7 @@ The command shows: Filter repositories using fnmatch-style patterns: -```console +```vcspull-console $ vcspull status 'django*' • django → ~/code/django (exists, clean) • django-extensions → ~/code/django-extensions (missing) @@ -56,7 +55,7 @@ $ vcspull status django flask requests Show additional information with `--detailed` or `-d`: -```console +```vcspull-console $ vcspull status --detailed ✓ flask: up to date Path: ~/code/flask diff --git a/docs/cli/sync.md b/docs/cli/sync.md index 2e219e529..f5636bbde 100644 --- a/docs/cli/sync.md +++ b/docs/cli/sync.md @@ -16,14 +16,13 @@ synchronized with remote repositories. :func: create_parser :prog: vcspull :path: sync - :nodescription: ``` ## Dry run mode Preview what would be synchronized without making changes: -```console +```vcspull-console $ vcspull sync --dry-run '*' Would sync flask at ~/code/flask Would sync django at ~/code/django @@ -169,17 +168,17 @@ $ vcspull sync 'django-anymail' 'django-guardian' As of 1.13.x, if you enter a repo term (or terms) that aren't found throughout your configurations, it will show a warning: -```console +```vcspull-console $ vcspull sync non_existent_repo No repo found in config(s) for "non_existent_repo" ``` -```console +```vcspull-console $ vcspull sync non_existent_repo existing_repo No repo found in config(s) for "non_existent_repo" ``` -```console +```vcspull-console $ vcspull sync non_existent_repo existing_repo another_repo_not_in_config No repo found in config(s) for "non_existent_repo" No repo found in config(s) for "another_repo_not_in_config" diff --git a/docs/conf.py b/docs/conf.py index 779888b98..c608d4f64 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -34,7 +34,7 @@ "sphinx.ext.todo", "sphinx.ext.napoleon", "sphinx.ext.linkcode", - "sphinxarg.ext", # sphinx-argparse + "pretty_argparse", # Uses sphinx_argparse_neo with vcspull enhancements "sphinx_inline_tabs", "sphinx_copybutton", "sphinxext.opengraph", @@ -123,6 +123,7 @@ autodoc_typehints = "description" # Don't show class signature with the class' name. autodoc_class_signature = "separated" +autodoc_preserve_defaults = True # sphinx-autodoc-typehints # Suppress warnings for forward references that can't be resolved @@ -133,7 +134,7 @@ # sphinx.ext.napoleon napoleon_google_docstring = True -napoleon_include_init_with_doc = True +napoleon_include_init_with_doc = False # sphinx-copybutton copybutton_prompt_text = ( @@ -153,8 +154,9 @@ intersphinx_mapping = { "py": ("https://docs.python.org/", None), - "libvcs": ("http://libvcs.git-pull.com/", None), + "libvcs": ("https://libvcs.git-pull.com/", None), } +intersphinx_cache_limit = 5 # Cache inventories for 5 days def linkcode_resolve(domain: str, info: dict[str, str]) -> None | str: diff --git a/pyproject.toml b/pyproject.toml index f863ec332..7e8e5aaef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,7 @@ vcspull = "vcspull:cli.cli" [dependency-groups] dev = [ # Docs - "sphinx<9", + "sphinx>=8", "furo", "gp-libs", "sphinx-autobuild", @@ -79,7 +79,6 @@ dev = [ "sphinxext-opengraph", "sphinx-copybutton", "sphinxext-rediraffe", - "sphinx-argparse", "myst-parser", "linkify-it-py", # Testing @@ -98,13 +97,15 @@ dev = [ "ruff", "mypy", # Annotations + "types-docutils", + "types-Pygments", "types-requests", "types-PyYAML", "types-colorama" ] docs = [ - "sphinx<9", + "sphinx>=8", "furo", "gp-libs", "sphinx-autobuild", @@ -113,7 +114,6 @@ docs = [ "sphinxext-opengraph", "sphinx-copybutton", "sphinxext-rediraffe", - "sphinx-argparse", "myst-parser", "linkify-it-py", ] @@ -136,6 +136,8 @@ lint = [ "mypy", ] typings = [ + "types-docutils", + "types-Pygments", "types-requests", "types-PyYAML", "types-colorama" @@ -157,6 +159,15 @@ strict = true [[tool.mypy.overrides]] module = [ "shtab", + "sphinx_argparse_neo", + "sphinx_argparse_neo.*", + "cli_usage_lexer", + "argparse_lexer", + "argparse_roles", + "docutils", + "docutils.*", + "pygments", + "pygments.*", ] ignore_missing_imports = true @@ -167,6 +178,7 @@ omit = [ "*/_*", "*/_compat.py", "docs/conf.py", + "docs/_ext/*", "tests/*", ] @@ -239,13 +251,13 @@ required-imports = [ "*/__init__.py" = ["F401"] [tool.pytest.ini_options] -addopts = "--tb=short --no-header --showlocals" +addopts = "--tb=short --no-header --showlocals --doctest-modules" +doctest_optionflags = "ELLIPSIS NORMALIZE_WHITESPACE" asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "function" testpaths = [ "src/vcspull", "tests", - "docs", ] filterwarnings = [ "ignore:The frontend.Option(Parser)? class.*:DeprecationWarning::", diff --git a/src/vcspull/_internal/config_reader.py b/src/vcspull/_internal/config_reader.py index 4313d7938..b6798ab61 100644 --- a/src/vcspull/_internal/config_reader.py +++ b/src/vcspull/_internal/config_reader.py @@ -53,13 +53,13 @@ def load(cls, fmt: FormatLiteral, content: str) -> ConfigReader: >>> cfg = ConfigReader.load("json", '{ "session_name": "my session" }') >>> cfg - + >>> cfg.content {'session_name': 'my session'} >>> cfg = ConfigReader.load("yaml", 'session_name: my session') >>> cfg - + >>> cfg.content {'session_name': 'my session'} """ @@ -146,7 +146,7 @@ def from_file(cls, path: pathlib.Path) -> ConfigReader: >>> cfg = ConfigReader.from_file(yaml_file) >>> cfg - + >>> cfg.content {'session_name': 'my session'} @@ -163,7 +163,7 @@ def from_file(cls, path: pathlib.Path) -> ConfigReader: >>> cfg = ConfigReader.from_file(json_file) >>> cfg - + >>> cfg.content {'session_name': 'my session'} diff --git a/tests/docs/__init__.py b/tests/docs/__init__.py new file mode 100644 index 000000000..b6723bfd0 --- /dev/null +++ b/tests/docs/__init__.py @@ -0,0 +1,3 @@ +"""Tests for documentation extensions.""" + +from __future__ import annotations diff --git a/tests/docs/_ext/__init__.py b/tests/docs/_ext/__init__.py new file mode 100644 index 000000000..56548488e --- /dev/null +++ b/tests/docs/_ext/__init__.py @@ -0,0 +1,3 @@ +"""Tests for docs/_ext Sphinx extensions.""" + +from __future__ import annotations diff --git a/tests/docs/_ext/conftest.py b/tests/docs/_ext/conftest.py new file mode 100644 index 000000000..bb2cf99b5 --- /dev/null +++ b/tests/docs/_ext/conftest.py @@ -0,0 +1,11 @@ +"""Fixtures and configuration for docs extension tests.""" + +from __future__ import annotations + +import sys +from pathlib import Path + +# Add docs/_ext to path so we can import the extension module +docs_ext_path = Path(__file__).parent.parent.parent.parent / "docs" / "_ext" +if str(docs_ext_path) not in sys.path: + sys.path.insert(0, str(docs_ext_path)) diff --git a/tests/docs/_ext/sphinx_argparse_neo/__init__.py b/tests/docs/_ext/sphinx_argparse_neo/__init__.py new file mode 100644 index 000000000..259f37c93 --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/__init__.py @@ -0,0 +1,3 @@ +"""Tests for sphinx_argparse_neo extension.""" + +from __future__ import annotations diff --git a/tests/docs/_ext/sphinx_argparse_neo/conftest.py b/tests/docs/_ext/sphinx_argparse_neo/conftest.py new file mode 100644 index 000000000..d1b0d1ef5 --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/conftest.py @@ -0,0 +1,235 @@ +"""Fixtures and configuration for sphinx_argparse_neo tests.""" + +from __future__ import annotations + +import argparse +import sys +from pathlib import Path + +import pytest + +# Add docs/_ext to path so we can import the extension module +docs_ext_path = Path(__file__).parent.parent.parent.parent.parent / "docs" / "_ext" +if str(docs_ext_path) not in sys.path: + sys.path.insert(0, str(docs_ext_path)) + + +@pytest.fixture +def simple_parser() -> argparse.ArgumentParser: + """Create a simple parser with basic arguments. + + Returns + ------- + argparse.ArgumentParser + Parser with a positional argument and a couple of options. + """ + parser = argparse.ArgumentParser( + prog="myapp", + description="A simple test application", + ) + parser.add_argument("filename", help="Input file to process") + parser.add_argument( + "-v", "--verbose", action="store_true", help="Enable verbose mode" + ) + parser.add_argument("-o", "--output", metavar="FILE", help="Output file") + return parser + + +@pytest.fixture +def parser_with_groups() -> argparse.ArgumentParser: + """Create a parser with custom argument groups. + + Returns + ------- + argparse.ArgumentParser + Parser with multiple argument groups. + """ + parser = argparse.ArgumentParser(prog="grouped", description="Parser with groups") + + input_group = parser.add_argument_group("Input Options", "Options for input") + input_group.add_argument("--input", "-i", required=True, help="Input file") + input_group.add_argument("--format", choices=["json", "yaml"], help="Input format") + + output_group = parser.add_argument_group("Output Options", "Options for output") + output_group.add_argument("--output", "-o", help="Output file") + output_group.add_argument("--pretty", action="store_true", help="Pretty print") + + return parser + + +@pytest.fixture +def parser_with_subcommands() -> argparse.ArgumentParser: + """Create a parser with subcommands. + + Returns + ------- + argparse.ArgumentParser + Parser with subparsers. + """ + parser = argparse.ArgumentParser(prog="cli", description="CLI with subcommands") + parser.add_argument("-v", "--verbose", action="store_true", help="Verbose mode") + + subparsers = parser.add_subparsers(dest="command", help="Available commands") + + # Sync subcommand + sync_parser = subparsers.add_parser("sync", help="Synchronize repositories") + sync_parser.add_argument("repo", nargs="?", help="Repository to sync") + sync_parser.add_argument("-f", "--force", action="store_true", help="Force sync") + + # Add subcommand + add_parser = subparsers.add_parser("add", aliases=["a"], help="Add a repository") + add_parser.add_argument("url", help="Repository URL") + add_parser.add_argument("-n", "--name", help="Repository name") + + return parser + + +@pytest.fixture +def parser_with_mutex() -> argparse.ArgumentParser: + """Create a parser with mutually exclusive arguments. + + Returns + ------- + argparse.ArgumentParser + Parser with mutually exclusive group. + """ + parser = argparse.ArgumentParser(prog="mutex", description="Parser with mutex") + + mutex = parser.add_mutually_exclusive_group(required=True) + mutex.add_argument("-v", "--verbose", action="store_true", help="Verbose output") + mutex.add_argument("-q", "--quiet", action="store_true", help="Quiet output") + + return parser + + +@pytest.fixture +def parser_with_all_actions() -> argparse.ArgumentParser: + """Create a parser with all action types. + + Returns + ------- + argparse.ArgumentParser + Parser demonstrating all action types. + """ + parser = argparse.ArgumentParser(prog="actions", description="All action types") + + # store (default) + parser.add_argument("--name", help="Store action") + + # store_const + parser.add_argument( + "--enable", action="store_const", const="enabled", help="Store const" + ) + + # store_true / store_false + parser.add_argument("--flag", action="store_true", help="Store true") + parser.add_argument("--no-flag", action="store_false", help="Store false") + + # append + parser.add_argument("--item", action="append", help="Append action") + + # append_const + parser.add_argument( + "--debug", + action="append_const", + const="debug", + dest="features", + help="Append const", + ) + + # count + parser.add_argument("-v", "--verbose", action="count", default=0, help="Count") + + # BooleanOptionalAction (Python 3.9+) + parser.add_argument( + "--option", action=argparse.BooleanOptionalAction, help="Boolean optional" + ) + + return parser + + +@pytest.fixture +def parser_with_types() -> argparse.ArgumentParser: + """Create a parser with typed arguments. + + Returns + ------- + argparse.ArgumentParser + Parser with various type specifications. + """ + parser = argparse.ArgumentParser(prog="types", description="Typed arguments") + + parser.add_argument("--count", type=int, help="Integer argument") + parser.add_argument("--ratio", type=float, help="Float argument") + parser.add_argument("--path", type=Path, help="Path argument") + parser.add_argument("--choice", type=str, choices=["a", "b", "c"], help="Choices") + + return parser + + +@pytest.fixture +def parser_with_nargs() -> argparse.ArgumentParser: + """Create a parser demonstrating nargs variants. + + Returns + ------- + argparse.ArgumentParser + Parser with various nargs specifications. + """ + parser = argparse.ArgumentParser(prog="nargs", description="Nargs variants") + + parser.add_argument("single", help="Single positional") + parser.add_argument("optional", nargs="?", default="default", help="Optional") + parser.add_argument("zero_or_more", nargs="*", help="Zero or more") + parser.add_argument("--one-or-more", nargs="+", help="One or more") + parser.add_argument("--exactly-two", nargs=2, metavar=("A", "B"), help="Exactly 2") + parser.add_argument("remainder", nargs=argparse.REMAINDER, help="Remainder") + + return parser + + +@pytest.fixture +def parser_with_defaults() -> argparse.ArgumentParser: + """Create a parser with various default values. + + Returns + ------- + argparse.ArgumentParser + Parser demonstrating default handling. + """ + parser = argparse.ArgumentParser(prog="defaults") + + parser.add_argument("--none-default", default=None, help="None default") + parser.add_argument("--string-default", default="hello", help="String default") + parser.add_argument("--int-default", default=42, type=int, help="Int default") + parser.add_argument("--list-default", default=[1, 2], help="List default") + parser.add_argument("--suppress", default=argparse.SUPPRESS, help=argparse.SUPPRESS) + + return parser + + +@pytest.fixture +def nested_subcommands_parser() -> argparse.ArgumentParser: + """Create a parser with nested subcommands. + + Returns + ------- + argparse.ArgumentParser + Parser with multiple levels of subparsers. + """ + parser = argparse.ArgumentParser(prog="nested", description="Nested subcommands") + + level1 = parser.add_subparsers(dest="level1") + + # First level: repo + repo = level1.add_parser("repo", help="Repository commands") + repo_subs = repo.add_subparsers(dest="level2") + + # Second level: repo clone + clone = repo_subs.add_parser("clone", help="Clone a repository") + clone.add_argument("url", help="Repository URL") + + # Second level: repo list + repo_subs.add_parser("list", help="List repositories") + + return parser diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_compat.py b/tests/docs/_ext/sphinx_argparse_neo/test_compat.py new file mode 100644 index 000000000..417e29a6e --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_compat.py @@ -0,0 +1,330 @@ +"""Tests for sphinx_argparse_neo.compat module.""" + +from __future__ import annotations + +import sys +import typing as t + +import pytest +from sphinx_argparse_neo.compat import ( + MockModule, + get_parser_from_entry_point, + get_parser_from_module, + import_module, + mock_imports, +) + +# --- MockModule tests --- + + +def test_mock_module_name() -> None: + """Test MockModule name attribute.""" + mock = MockModule("mypackage.submodule") + assert mock.__name__ == "mypackage.submodule" + + +def test_mock_module_repr() -> None: + """Test MockModule string representation.""" + mock = MockModule("mypackage") + assert repr(mock) == "" + + +def test_mock_module_getattr() -> None: + """Test MockModule attribute access.""" + mock = MockModule("mypackage") + child = mock.submodule + + assert isinstance(child, MockModule) + assert child.__name__ == "mypackage.submodule" + + +def test_mock_module_nested_getattr() -> None: + """Test MockModule nested attribute access.""" + mock = MockModule("pkg") + deep = mock.level1.level2.level3 + + assert deep.__name__ == "pkg.level1.level2.level3" + + +def test_mock_module_callable() -> None: + """Test MockModule is callable.""" + mock = MockModule("mypackage") + result = mock() + + assert result is mock + + +def test_mock_module_callable_with_args() -> None: + """Test MockModule callable with arguments.""" + mock = MockModule("mypackage") + result = mock(1, 2, 3, key="value") + + assert result is mock + + +def test_mock_module_chained_call() -> None: + """Test MockModule chained attribute access and call.""" + mock = MockModule("pkg") + result = mock.SomeClass() + + assert isinstance(result, MockModule) + + +# --- mock_imports context manager tests --- + + +def test_mock_imports_adds_to_sys_modules() -> None: + """Test that mock_imports adds modules to sys.modules.""" + module_name = "test_fake_module_xyz" + + assert module_name not in sys.modules + + with mock_imports([module_name]): + assert module_name in sys.modules + assert isinstance(sys.modules[module_name], MockModule) + + assert module_name not in sys.modules + + +def test_mock_imports_multiple_modules() -> None: + """Test mocking multiple modules.""" + modules = ["fake_a", "fake_b", "fake_c"] + + with mock_imports(modules): + for name in modules: + assert name in sys.modules + + for name in modules: + assert name not in sys.modules + + +def test_mock_imports_nested_modules() -> None: + """Test mocking nested module paths.""" + modules = ["fake_pkg", "fake_pkg.sub", "fake_pkg.sub.deep"] + + with mock_imports(modules): + for name in modules: + assert name in sys.modules + + for name in modules: + assert name not in sys.modules + + +def test_mock_imports_does_not_override_existing() -> None: + """Test that mock_imports doesn't override existing modules.""" + # argparse is already imported + original = sys.modules["argparse"] + + with mock_imports(["argparse"]): + # Should not be replaced + assert sys.modules["argparse"] is original + + assert sys.modules["argparse"] is original + + +def test_mock_imports_cleanup_on_exception() -> None: + """Test that mock_imports cleans up even on exception.""" + module_name = "fake_exception_test" + exc_msg = "Test exception" + + with pytest.raises(ValueError), mock_imports([module_name]): + assert module_name in sys.modules + raise ValueError(exc_msg) + + assert module_name not in sys.modules + + +def test_mock_imports_allows_import() -> None: + """Test that mocked modules can be imported.""" + module_name = "fake_importable" + + with mock_imports([module_name]): + # This should work without ImportError + import fake_importable # type: ignore[import-not-found] + + assert fake_importable.__name__ == "fake_importable" + + +# --- import_module tests --- + + +def test_import_module_builtin() -> None: + """Test importing a built-in module.""" + mod = import_module("argparse") + assert hasattr(mod, "ArgumentParser") + + +def test_import_module_stdlib() -> None: + """Test importing a stdlib module.""" + mod = import_module("os.path") + assert hasattr(mod, "join") + + +def test_import_module_not_found() -> None: + """Test importing a non-existent module.""" + with pytest.raises(ModuleNotFoundError): + import_module("nonexistent_module_xyz") + + +# --- get_parser_from_module tests --- + + +def test_get_parser_from_module_argparse() -> None: + """Test getting parser from argparse module itself.""" + # Create a test module with a parser factory + import types + + test_module = types.ModuleType("test_parser_module") + + def create_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="test") + + test_module.create_parser = create_parser # type: ignore[attr-defined] + sys.modules["test_parser_module"] = test_module + + try: + parser = get_parser_from_module("test_parser_module", "create_parser") + assert parser.prog == "test" + finally: + del sys.modules["test_parser_module"] + + +def test_get_parser_from_module_with_mock() -> None: + """Test getting parser with mocked dependencies.""" + import types + + test_module = types.ModuleType("test_mock_parser") + + def create_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="mocked") + + test_module.create_parser = create_parser # type: ignore[attr-defined] + sys.modules["test_mock_parser"] = test_module + + try: + parser = get_parser_from_module( + "test_mock_parser", + "create_parser", + mock_modules=["fake_dependency"], + ) + assert parser.prog == "mocked" + finally: + del sys.modules["test_mock_parser"] + + +def test_get_parser_from_module_dotted_path() -> None: + """Test getting parser from class method.""" + import types + + test_module = types.ModuleType("test_class_parser") + + class CLI: + @staticmethod + def create_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="from_class") + + test_module.CLI = CLI # type: ignore[attr-defined] + sys.modules["test_class_parser"] = test_module + + try: + parser = get_parser_from_module("test_class_parser", "CLI.create_parser") + assert parser.prog == "from_class" + finally: + del sys.modules["test_class_parser"] + + +def test_get_parser_from_module_not_found() -> None: + """Test error when module not found.""" + with pytest.raises(ModuleNotFoundError): + get_parser_from_module("nonexistent_xyz", "func") + + +def test_get_parser_from_module_func_not_found() -> None: + """Test error when function not found.""" + with pytest.raises(AttributeError): + get_parser_from_module("argparse", "nonexistent_func") + + +# --- get_parser_from_entry_point tests --- + + +def test_get_parser_from_entry_point_valid() -> None: + """Test parsing valid entry point format.""" + import types + + test_module = types.ModuleType("test_entry_point") + + def get_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="entry") + + test_module.get_parser = get_parser # type: ignore[attr-defined] + sys.modules["test_entry_point"] = test_module + + try: + parser = get_parser_from_entry_point("test_entry_point:get_parser") + assert parser.prog == "entry" + finally: + del sys.modules["test_entry_point"] + + +def test_get_parser_from_entry_point_invalid_format() -> None: + """Test error on invalid entry point format.""" + with pytest.raises(ValueError) as exc_info: + get_parser_from_entry_point("no_colon_separator") + + assert "Invalid entry point format" in str(exc_info.value) + + +def test_get_parser_from_entry_point_with_class() -> None: + """Test entry point with class method.""" + import types + + test_module = types.ModuleType("test_entry_class") + + class Factory: + @staticmethod + def parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="factory") + + test_module.Factory = Factory # type: ignore[attr-defined] + sys.modules["test_entry_class"] = test_module + + try: + parser = get_parser_from_entry_point("test_entry_class:Factory.parser") + assert parser.prog == "factory" + finally: + del sys.modules["test_entry_class"] + + +def test_get_parser_from_entry_point_with_mock() -> None: + """Test entry point with mocked modules.""" + import types + + test_module = types.ModuleType("test_entry_mock") + + def make_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="with_mock") + + test_module.make_parser = make_parser # type: ignore[attr-defined] + sys.modules["test_entry_mock"] = test_module + + try: + parser = get_parser_from_entry_point( + "test_entry_mock:make_parser", + mock_modules=["some_optional_dep"], + ) + assert parser.prog == "with_mock" + finally: + del sys.modules["test_entry_mock"] diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_myst.py b/tests/docs/_ext/sphinx_argparse_neo/test_myst.py new file mode 100644 index 000000000..879d77b1d --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_myst.py @@ -0,0 +1,357 @@ +"""Tests for sphinx_argparse_neo.myst module.""" + +from __future__ import annotations + +import typing as t + +import pytest +from docutils import nodes +from sphinx_argparse_neo.myst import ( + _parse_fenced_code, + _parse_inline_myst, + detect_format, + parse_help_text, + parse_myst, +) + +# --- detect_format tests --- + + +class DetectFormatFixture(t.NamedTuple): + """Test fixture for detect_format function.""" + + test_id: str + text: str + expected: str + + +DETECT_FORMAT_FIXTURES: list[DetectFormatFixture] = [ + DetectFormatFixture( + test_id="plain_text", + text="Plain text without markup", + expected="rst", + ), + DetectFormatFixture( + test_id="rst_ref_role", + text="Check :ref:`docs` for more", + expected="rst", + ), + DetectFormatFixture( + test_id="rst_doc_role", + text="See :doc:`guide` for details", + expected="rst", + ), + DetectFormatFixture( + test_id="rst_class_role", + text="Use :class:`MyClass` instead", + expected="rst", + ), + DetectFormatFixture( + test_id="rst_func_role", + text="Call :func:`my_function`", + expected="rst", + ), + DetectFormatFixture( + test_id="rst_directive", + text=".. note:: This is important", + expected="rst", + ), + DetectFormatFixture( + test_id="myst_link", + text="See [the docs](https://example.com)", + expected="myst", + ), + DetectFormatFixture( + test_id="myst_fenced_code", + text="```python\ncode\n```", + expected="myst", + ), + DetectFormatFixture( + test_id="myst_directive", + text="{note}\nThis is important", + expected="myst", + ), + DetectFormatFixture( + test_id="myst_image", + text="![alt text](image.png)", + expected="myst", + ), + DetectFormatFixture( + test_id="empty_string", + text="", + expected="rst", + ), +] + + +@pytest.mark.parametrize( + DetectFormatFixture._fields, + DETECT_FORMAT_FIXTURES, + ids=[f.test_id for f in DETECT_FORMAT_FIXTURES], +) +def test_detect_format(test_id: str, text: str, expected: str) -> None: + """Test format detection.""" + assert detect_format(text) == expected + + +# --- _parse_fenced_code tests --- + + +def test_parse_fenced_code_python() -> None: + """Test parsing Python fenced code block.""" + text = "```python\nprint('hello')\n```" + node = _parse_fenced_code(text) + + assert node is not None + assert isinstance(node, nodes.literal_block) + assert node["language"] == "python" + assert "print" in node.astext() + + +def test_parse_fenced_code_no_language() -> None: + """Test parsing fenced code block without language.""" + text = "```\nsome code\n```" + node = _parse_fenced_code(text) + + assert node is not None + assert node["language"] == "text" + + +def test_parse_fenced_code_multiline() -> None: + """Test parsing multiline fenced code.""" + text = "```bash\necho 'line1'\necho 'line2'\n```" + node = _parse_fenced_code(text) + + assert node is not None + assert "line1" in node.astext() + assert "line2" in node.astext() + + +def test_parse_fenced_code_invalid() -> None: + """Test parsing invalid fenced code.""" + text = "not a code block" + node = _parse_fenced_code(text) + + assert node is None + + +def test_parse_fenced_code_single_line() -> None: + """Test parsing single line (no closing fence).""" + text = "```" + node = _parse_fenced_code(text) + + assert node is None + + +# --- _parse_inline_myst tests --- + + +def test_parse_inline_myst_plain_text() -> None: + """Test parsing plain text.""" + parent = nodes.paragraph() + _parse_inline_myst("Plain text", parent) + + assert len(parent.children) == 1 + assert isinstance(parent.children[0], nodes.Text) + assert parent.astext() == "Plain text" + + +def test_parse_inline_myst_link() -> None: + """Test parsing markdown link.""" + parent = nodes.paragraph() + _parse_inline_myst("See [docs](https://example.com)", parent) + + # Should have text + reference + refs = [c for c in parent.children if isinstance(c, nodes.reference)] + assert len(refs) == 1 + assert refs[0]["refuri"] == "https://example.com" + assert refs[0].astext() == "docs" + + +def test_parse_inline_myst_code() -> None: + """Test parsing inline code.""" + parent = nodes.paragraph() + _parse_inline_myst("Use `code` here", parent) + + literals = [c for c in parent.children if isinstance(c, nodes.literal)] + assert len(literals) == 1 + assert literals[0].astext() == "code" + + +def test_parse_inline_myst_bold() -> None: + """Test parsing bold text.""" + parent = nodes.paragraph() + _parse_inline_myst("This is **bold** text", parent) + + strong = [c for c in parent.children if isinstance(c, nodes.strong)] + assert len(strong) == 1 + assert strong[0].astext() == "bold" + + +def test_parse_inline_myst_italic() -> None: + """Test parsing italic text with asterisks.""" + parent = nodes.paragraph() + _parse_inline_myst("This is *italic* text", parent) + + emphasis = [c for c in parent.children if isinstance(c, nodes.emphasis)] + assert len(emphasis) == 1 + assert emphasis[0].astext() == "italic" + + +def test_parse_inline_myst_multiple_patterns() -> None: + """Test parsing text with multiple inline patterns.""" + parent = nodes.paragraph() + _parse_inline_myst("Use `code` and [link](url) here", parent) + + literals = [c for c in parent.children if isinstance(c, nodes.literal)] + refs = [c for c in parent.children if isinstance(c, nodes.reference)] + + assert len(literals) == 1 + assert len(refs) == 1 + + +# --- parse_myst tests --- + + +def test_parse_myst_simple_paragraph() -> None: + """Test parsing simple paragraph.""" + result = parse_myst("Simple text") + + assert len(result) == 1 + assert isinstance(result[0], nodes.paragraph) + + +def test_parse_myst_multiple_paragraphs() -> None: + """Test parsing multiple paragraphs.""" + result = parse_myst("Para 1\n\nPara 2") + + assert len(result) == 2 + assert all(isinstance(n, nodes.paragraph) for n in result) + + +def test_parse_myst_fenced_code() -> None: + """Test parsing paragraph with fenced code.""" + result = parse_myst("```python\ncode\n```") + + assert len(result) == 1 + assert isinstance(result[0], nodes.literal_block) + + +def test_parse_myst_mixed_content() -> None: + """Test parsing mixed content.""" + result = parse_myst("Intro text\n\n```python\ncode\n```\n\nConclusion") + + assert len(result) == 3 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.paragraph) + + +def test_parse_myst_empty_string() -> None: + """Test parsing empty string.""" + result = parse_myst("") + + assert result == [] + + +def test_parse_myst_whitespace_only() -> None: + """Test parsing whitespace-only string.""" + result = parse_myst(" \n\n ") + + assert result == [] + + +# --- parse_help_text tests --- + + +def test_parse_help_text_auto_rst() -> None: + """Test auto-detecting RST format.""" + result = parse_help_text("See :ref:`docs` for more", "auto") + + assert len(result) >= 1 + + +def test_parse_help_text_auto_myst() -> None: + """Test auto-detecting MyST format.""" + result = parse_help_text("See [docs](url) for more", "auto") + + assert len(result) >= 1 + + +def test_parse_help_text_explicit_rst() -> None: + """Test explicit RST format.""" + result = parse_help_text("Plain text", "rst") + + assert len(result) == 1 + assert isinstance(result[0], nodes.paragraph) + + +def test_parse_help_text_explicit_myst() -> None: + """Test explicit MyST format.""" + result = parse_help_text("**bold** text", "myst") + + assert len(result) == 1 + # Should parse as MyST with bold + para = result[0] + strong = [c for c in para.children if isinstance(c, nodes.strong)] + assert len(strong) == 1 + + +def test_parse_help_text_empty() -> None: + """Test parsing empty text.""" + result = parse_help_text("", "auto") + + assert result == [] + + +def test_parse_help_text_none_format() -> None: + """Test with None state (falls back to simple paragraph).""" + result = parse_help_text("Test text", "rst", state=None) + + assert len(result) == 1 + assert isinstance(result[0], nodes.paragraph) + + +# --- Edge cases --- + + +def test_detect_format_myst_link_in_middle() -> None: + """Test detecting MyST format when link is in middle of text.""" + text = "For details, see [the documentation](https://docs.example.com) here." + assert detect_format(text) == "myst" + + +def test_detect_format_rst_role_in_middle() -> None: + """Test detecting RST format when role is in middle of text.""" + text = "For details, see :doc:`guide` for more information." + assert detect_format(text) == "rst" + + +def test_parse_myst_consecutive_code_blocks() -> None: + """Test parsing consecutive code blocks.""" + text = "```python\ncode1\n```\n\n```bash\ncode2\n```" + result = parse_myst(text) + + assert len(result) == 2 + assert all(isinstance(n, nodes.literal_block) for n in result) + + +def test_parse_myst_link_with_special_chars() -> None: + """Test parsing link with special characters in URL.""" + parent = nodes.paragraph() + _parse_inline_myst("[link](https://example.com/path?query=1&other=2)", parent) + + refs = [c for c in parent.children if isinstance(c, nodes.reference)] + assert len(refs) == 1 + assert refs[0]["refuri"] == "https://example.com/path?query=1&other=2" + + +def test_parse_myst_nested_formatting() -> None: + """Test that bold inside code doesn't interfere.""" + parent = nodes.paragraph() + _parse_inline_myst("Use `**not bold**` for literal", parent) + + # The **not bold** should be inside the literal, not processed as bold + literals = [c for c in parent.children if isinstance(c, nodes.literal)] + assert len(literals) == 1 + assert "**not bold**" in literals[0].astext() diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_nodes.py b/tests/docs/_ext/sphinx_argparse_neo/test_nodes.py new file mode 100644 index 000000000..c473c9974 --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_nodes.py @@ -0,0 +1,259 @@ +"""Tests for sphinx_argparse_neo.nodes module.""" + +from __future__ import annotations + +from docutils import nodes +from sphinx_argparse_neo.nodes import ( + argparse_argument, + argparse_group, + argparse_program, + argparse_subcommand, + argparse_subcommands, + argparse_usage, +) + +# --- Node creation tests --- + + +def test_argparse_program_creation() -> None: + """Test creating argparse_program node.""" + node = argparse_program() + node["prog"] = "myapp" + + assert node["prog"] == "myapp" + assert isinstance(node, nodes.General) + assert isinstance(node, nodes.Element) + + +def test_argparse_usage_creation() -> None: + """Test creating argparse_usage node.""" + node = argparse_usage() + node["usage"] = "myapp [-h] [--verbose] command" + + assert node["usage"] == "myapp [-h] [--verbose] command" + + +def test_argparse_group_creation() -> None: + """Test creating argparse_group node.""" + node = argparse_group() + node["title"] = "Output Options" + node["description"] = "Control output format" + + assert node["title"] == "Output Options" + assert node["description"] == "Control output format" + + +def test_argparse_argument_creation() -> None: + """Test creating argparse_argument node.""" + node = argparse_argument() + node["names"] = ["-v", "--verbose"] + node["help"] = "Enable verbose mode" + node["metavar"] = None + node["required"] = False + + assert node["names"] == ["-v", "--verbose"] + assert node["help"] == "Enable verbose mode" + + +def test_argparse_subcommands_creation() -> None: + """Test creating argparse_subcommands node.""" + node = argparse_subcommands() + node["title"] = "Commands" + + assert node["title"] == "Commands" + + +def test_argparse_subcommand_creation() -> None: + """Test creating argparse_subcommand node.""" + node = argparse_subcommand() + node["name"] = "sync" + node["aliases"] = ["s"] + node["help"] = "Synchronize repositories" + + assert node["name"] == "sync" + assert node["aliases"] == ["s"] + + +# --- Node nesting tests --- + + +def test_program_can_contain_usage() -> None: + """Test that program node can contain usage node.""" + program = argparse_program() + program["prog"] = "myapp" + + usage = argparse_usage() + usage["usage"] = "myapp [-h]" + + program.append(usage) + + assert len(program.children) == 1 + assert isinstance(program.children[0], argparse_usage) + + +def test_program_can_contain_groups() -> None: + """Test that program node can contain group nodes.""" + program = argparse_program() + + group1 = argparse_group() + group1["title"] = "Positional Arguments" + + group2 = argparse_group() + group2["title"] = "Optional Arguments" + + program.append(group1) + program.append(group2) + + assert len(program.children) == 2 + + +def test_group_can_contain_arguments() -> None: + """Test that group node can contain argument nodes.""" + group = argparse_group() + group["title"] = "Options" + + arg1 = argparse_argument() + arg1["names"] = ["-v"] + + arg2 = argparse_argument() + arg2["names"] = ["-o"] + + group.append(arg1) + group.append(arg2) + + assert len(group.children) == 2 + + +def test_subcommands_can_contain_subcommand() -> None: + """Test that subcommands container can contain subcommand nodes.""" + container = argparse_subcommands() + container["title"] = "Commands" + + sub1 = argparse_subcommand() + sub1["name"] = "sync" + + sub2 = argparse_subcommand() + sub2["name"] = "add" + + container.append(sub1) + container.append(sub2) + + assert len(container.children) == 2 + + +def test_subcommand_can_contain_program() -> None: + """Test that subcommand can contain nested program (for recursion).""" + subcommand = argparse_subcommand() + subcommand["name"] = "sync" + + nested_program = argparse_program() + nested_program["prog"] = "myapp sync" + + subcommand.append(nested_program) + + assert len(subcommand.children) == 1 + assert isinstance(subcommand.children[0], argparse_program) + + +# --- Attribute handling tests --- + + +def test_argument_with_all_attributes() -> None: + """Test argument node with all possible attributes.""" + node = argparse_argument() + node["names"] = ["-f", "--file"] + node["help"] = "Input file" + node["metavar"] = "FILE" + node["required"] = True + node["default_string"] = "input.txt" + node["choices"] = ["a", "b", "c"] + node["type_name"] = "str" + node["mutex"] = False + node["mutex_required"] = False + + assert node["names"] == ["-f", "--file"] + assert node["help"] == "Input file" + assert node["metavar"] == "FILE" + assert node["required"] is True + assert node["default_string"] == "input.txt" + assert node["choices"] == ["a", "b", "c"] + assert node["type_name"] == "str" + + +def test_argument_with_mutex_marker() -> None: + """Test argument node marked as part of mutex group.""" + node = argparse_argument() + node["names"] = ["-v"] + node["mutex"] = True + node["mutex_required"] = True + + assert node["mutex"] is True + assert node["mutex_required"] is True + + +def test_node_get_with_default() -> None: + """Test getting attributes with defaults.""" + node = argparse_argument() + node["names"] = ["-v"] + + # Attribute that exists + assert node.get("names") == ["-v"] + + # Attribute that doesn't exist - get() returns None + assert node.get("nonexistent") is None + + # Attribute with explicit default + assert node.get("help", "default help") == "default help" + + +# --- Full tree construction test --- + + +def test_full_node_tree() -> None: + """Test constructing a complete node tree.""" + # Root program + program = argparse_program() + program["prog"] = "myapp" + + # Usage + usage = argparse_usage() + usage["usage"] = "myapp [-h] [-v] command" + program.append(usage) + + # Positional group + pos_group = argparse_group() + pos_group["title"] = "Positional Arguments" + + cmd_arg = argparse_argument() + cmd_arg["names"] = ["command"] + cmd_arg["help"] = "Command to run" + pos_group.append(cmd_arg) + program.append(pos_group) + + # Optional group + opt_group = argparse_group() + opt_group["title"] = "Optional Arguments" + + verbose = argparse_argument() + verbose["names"] = ["-v", "--verbose"] + verbose["help"] = "Verbose mode" + opt_group.append(verbose) + program.append(opt_group) + + # Subcommands + subs = argparse_subcommands() + subs["title"] = "Commands" + + sync_sub = argparse_subcommand() + sync_sub["name"] = "sync" + sync_sub["help"] = "Sync repos" + subs.append(sync_sub) + + program.append(subs) + + # Verify tree structure + assert len(program.children) == 4 # usage, pos_group, opt_group, subs + assert isinstance(program.children[0], argparse_usage) + assert isinstance(program.children[1], argparse_group) + assert isinstance(program.children[2], argparse_group) + assert isinstance(program.children[3], argparse_subcommands) diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_parser.py b/tests/docs/_ext/sphinx_argparse_neo/test_parser.py new file mode 100644 index 000000000..48f43d937 --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_parser.py @@ -0,0 +1,524 @@ +"""Tests for sphinx_argparse_neo.parser module.""" + +from __future__ import annotations + +import argparse +import typing as t + +import pytest +from sphinx_argparse_neo.parser import ( + ArgumentInfo, + ParserInfo, + SubcommandInfo, + _extract_argument, + _format_default, + _get_action_name, + _get_type_name, + extract_parser, +) + +# --- _format_default tests --- + + +class FormatDefaultFixture(t.NamedTuple): + """Test fixture for _format_default function.""" + + test_id: str + default: t.Any + expected: str | None + + +FORMAT_DEFAULT_FIXTURES: list[FormatDefaultFixture] = [ + FormatDefaultFixture( + test_id="none_value", + default=None, + expected="None", + ), + FormatDefaultFixture( + test_id="string_value", + default="hello", + expected="hello", + ), + FormatDefaultFixture( + test_id="integer_value", + default=42, + expected="42", + ), + FormatDefaultFixture( + test_id="float_value", + default=3.14, + expected="3.14", + ), + FormatDefaultFixture( + test_id="list_value", + default=[1, 2, 3], + expected="[1, 2, 3]", + ), + FormatDefaultFixture( + test_id="suppress_value", + default=argparse.SUPPRESS, + expected=None, + ), + FormatDefaultFixture( + test_id="empty_string", + default="", + expected="", + ), + FormatDefaultFixture( + test_id="boolean_true", + default=True, + expected="True", + ), + FormatDefaultFixture( + test_id="boolean_false", + default=False, + expected="False", + ), +] + + +@pytest.mark.parametrize( + FormatDefaultFixture._fields, + FORMAT_DEFAULT_FIXTURES, + ids=[f.test_id for f in FORMAT_DEFAULT_FIXTURES], +) +def test_format_default(test_id: str, default: t.Any, expected: str | None) -> None: + """Test default value formatting.""" + assert _format_default(default) == expected + + +# --- _get_type_name tests --- + + +def test_get_type_name_int() -> None: + """Test type name extraction for int.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--count", type=int) + assert _get_type_name(action) == "int" + + +def test_get_type_name_float() -> None: + """Test type name extraction for float.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--ratio", type=float) + assert _get_type_name(action) == "float" + + +def test_get_type_name_str() -> None: + """Test type name extraction for str.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--name", type=str) + assert _get_type_name(action) == "str" + + +def test_get_type_name_none() -> None: + """Test type name extraction when no type specified.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--name") + assert _get_type_name(action) is None + + +def test_get_type_name_callable() -> None: + """Test type name extraction for callable types.""" + from pathlib import Path + + parser = argparse.ArgumentParser() + action = parser.add_argument("--path", type=Path) + assert _get_type_name(action) == "Path" + + +# --- _get_action_name tests --- + + +class ActionNameFixture(t.NamedTuple): + """Test fixture for _get_action_name function.""" + + test_id: str + action_kwargs: dict[str, t.Any] + expected: str + + +ACTION_NAME_FIXTURES: list[ActionNameFixture] = [ + ActionNameFixture( + test_id="store_default", + action_kwargs={"dest": "name"}, + expected="store", + ), + ActionNameFixture( + test_id="store_true", + action_kwargs={"action": "store_true", "dest": "flag"}, + expected="store_true", + ), + ActionNameFixture( + test_id="store_false", + action_kwargs={"action": "store_false", "dest": "flag"}, + expected="store_false", + ), + ActionNameFixture( + test_id="store_const", + action_kwargs={"action": "store_const", "const": "value", "dest": "const"}, + expected="store_const", + ), + ActionNameFixture( + test_id="append", + action_kwargs={"action": "append", "dest": "items"}, + expected="append", + ), + ActionNameFixture( + test_id="count", + action_kwargs={"action": "count", "dest": "verbose"}, + expected="count", + ), +] + + +@pytest.mark.parametrize( + ActionNameFixture._fields, + ACTION_NAME_FIXTURES, + ids=[f.test_id for f in ACTION_NAME_FIXTURES], +) +def test_get_action_name( + test_id: str, action_kwargs: dict[str, t.Any], expected: str +) -> None: + """Test action name extraction.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--test", **action_kwargs) + assert _get_action_name(action) == expected + + +# --- _extract_argument tests --- + + +def test_extract_argument_positional() -> None: + """Test extracting a positional argument.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("filename", help="Input file") + info = _extract_argument(action) + + assert info.names == ["filename"] + assert info.help == "Input file" + assert info.is_positional is True + assert info.required is True + + +def test_extract_argument_optional() -> None: + """Test extracting an optional argument.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("-v", "--verbose", action="store_true", help="Verbose") + info = _extract_argument(action) + + assert info.names == ["-v", "--verbose"] + assert info.action == "store_true" + assert info.is_positional is False + assert info.required is False + + +def test_extract_argument_with_choices() -> None: + """Test extracting argument with choices.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--format", choices=["json", "yaml", "xml"]) + info = _extract_argument(action) + + assert info.choices == ["json", "yaml", "xml"] + + +def test_extract_argument_with_metavar() -> None: + """Test extracting argument with metavar.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--output", metavar="FILE") + info = _extract_argument(action) + + assert info.metavar == "FILE" + + +def test_extract_argument_tuple_metavar() -> None: + """Test extracting argument with tuple metavar.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--range", nargs=2, metavar=("MIN", "MAX")) + info = _extract_argument(action) + + assert info.metavar == "MIN MAX" + + +def test_extract_argument_suppressed_help() -> None: + """Test extracting argument with suppressed help.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--secret", help=argparse.SUPPRESS) + info = _extract_argument(action) + + assert info.help is None + + +# --- extract_parser integration tests --- + + +def test_extract_parser_simple(simple_parser: argparse.ArgumentParser) -> None: + """Test extracting a simple parser.""" + info = extract_parser(simple_parser) + + assert info.prog == "myapp" + assert info.description == "A simple test application" + assert len(info.argument_groups) >= 1 + + # Find arguments + all_args = [arg for group in info.argument_groups for arg in group.arguments] + arg_names = [name for arg in all_args for name in arg.names] + + assert "filename" in arg_names + assert "--verbose" in arg_names or "-v" in arg_names + + +def test_extract_parser_with_groups( + parser_with_groups: argparse.ArgumentParser, +) -> None: + """Test extracting parser with custom groups.""" + info = extract_parser(parser_with_groups) + + group_titles = [g.title for g in info.argument_groups] + assert "Input Options" in group_titles + assert "Output Options" in group_titles + + +def test_extract_parser_with_subcommands( + parser_with_subcommands: argparse.ArgumentParser, +) -> None: + """Test extracting parser with subcommands.""" + info = extract_parser(parser_with_subcommands) + + assert info.subcommands is not None + assert len(info.subcommands) == 2 + + subcmd_names = [s.name for s in info.subcommands] + assert "sync" in subcmd_names + assert "add" in subcmd_names + + # Check alias + add_cmd = next(s for s in info.subcommands if s.name == "add") + assert "a" in add_cmd.aliases + + +def test_extract_parser_with_mutex(parser_with_mutex: argparse.ArgumentParser) -> None: + """Test extracting parser with mutually exclusive group.""" + info = extract_parser(parser_with_mutex) + + # Find the group with mutex + for group in info.argument_groups: + if group.mutually_exclusive: + mutex = group.mutually_exclusive[0] + assert mutex.required is True + assert len(mutex.arguments) == 2 + return + + pytest.fail("No mutually exclusive group found") + + +def test_extract_parser_with_all_actions( + parser_with_all_actions: argparse.ArgumentParser, +) -> None: + """Test extracting parser with all action types.""" + info = extract_parser(parser_with_all_actions) + + all_args = [arg for group in info.argument_groups for arg in group.arguments] + actions = {arg.dest: arg.action for arg in all_args} + + assert actions.get("name") == "store" + assert actions.get("enable") == "store_const" + assert actions.get("flag") == "store_true" + assert actions.get("item") == "append" + assert actions.get("verbose") == "count" + + +def test_extract_parser_with_types( + parser_with_types: argparse.ArgumentParser, +) -> None: + """Test extracting parser with typed arguments.""" + info = extract_parser(parser_with_types) + + all_args = [arg for group in info.argument_groups for arg in group.arguments] + types = {arg.dest: arg.type_name for arg in all_args} + + assert types.get("count") == "int" + assert types.get("ratio") == "float" + assert types.get("path") == "Path" + + +def test_extract_parser_with_nargs( + parser_with_nargs: argparse.ArgumentParser, +) -> None: + """Test extracting parser with nargs variants.""" + info = extract_parser(parser_with_nargs) + + all_args = [arg for group in info.argument_groups for arg in group.arguments] + nargs_map = {arg.dest: arg.nargs for arg in all_args} + + assert nargs_map.get("optional") == "?" + assert nargs_map.get("zero_or_more") == "*" + assert nargs_map.get("one_or_more") == "+" + assert nargs_map.get("exactly_two") == 2 + + +def test_extract_parser_with_defaults( + parser_with_defaults: argparse.ArgumentParser, +) -> None: + """Test extracting parser with various defaults.""" + info = extract_parser(parser_with_defaults) + + all_args = [arg for group in info.argument_groups for arg in group.arguments] + defaults = {arg.dest: arg.default_string for arg in all_args} + + assert defaults.get("none_default") == "None" + assert defaults.get("string_default") == "hello" + assert defaults.get("int_default") == "42" + # Suppressed default should have None default_string + assert "suppress" not in defaults or defaults.get("suppress") is None + + +def test_extract_parser_nested_subcommands( + nested_subcommands_parser: argparse.ArgumentParser, +) -> None: + """Test extracting parser with nested subcommands.""" + info = extract_parser(nested_subcommands_parser) + + assert info.subcommands is not None + assert len(info.subcommands) == 1 + + repo = info.subcommands[0] + assert repo.name == "repo" + assert repo.parser.subcommands is not None + assert len(repo.parser.subcommands) == 2 + + +def test_extract_parser_usage_generation() -> None: + """Test usage string generation.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("file") + parser.add_argument("-v", "--verbose", action="store_true") + + info = extract_parser(parser) + + assert "test" in info.bare_usage + assert "file" in info.bare_usage + + +def test_extract_parser_custom_usage() -> None: + """Test parser with custom usage string.""" + parser = argparse.ArgumentParser(prog="test", usage="test [options] file") + + info = extract_parser(parser) + + assert info.usage == "test [options] file" + + +def test_extract_parser_with_epilog() -> None: + """Test parser with epilog.""" + parser = argparse.ArgumentParser( + prog="test", + epilog="For more info, see docs.", + ) + + info = extract_parser(parser) + + assert info.epilog == "For more info, see docs." + + +# --- ArgumentInfo property tests --- + + +def test_argument_info_is_positional_true() -> None: + """Test is_positional for positional argument.""" + info = ArgumentInfo( + names=["filename"], + help=None, + default=None, + default_string=None, + choices=None, + required=True, + metavar=None, + nargs=None, + action="store", + type_name=None, + const=None, + dest="filename", + ) + assert info.is_positional is True + + +def test_argument_info_is_positional_false() -> None: + """Test is_positional for optional argument.""" + info = ArgumentInfo( + names=["-f", "--file"], + help=None, + default=None, + default_string=None, + choices=None, + required=False, + metavar=None, + nargs=None, + action="store", + type_name=None, + const=None, + dest="file", + ) + assert info.is_positional is False + + +def test_argument_info_empty_names() -> None: + """Test is_positional with empty names list.""" + info = ArgumentInfo( + names=[], + help=None, + default=None, + default_string=None, + choices=None, + required=False, + metavar=None, + nargs=None, + action="store", + type_name=None, + const=None, + dest="empty", + ) + assert info.is_positional is False + + +# --- Dataclass tests --- + + +def test_parser_info_dataclass() -> None: + """Test ParserInfo dataclass creation.""" + info = ParserInfo( + prog="test", + usage=None, + bare_usage="test [-h]", + description="Test description", + epilog="Test epilog", + argument_groups=[], + subcommands=None, + subcommand_dest=None, + ) + + assert info.prog == "test" + assert info.description == "Test description" + + +def test_subcommand_info_recursive() -> None: + """Test SubcommandInfo with nested parser.""" + nested_info = ParserInfo( + prog="nested", + usage=None, + bare_usage="nested [-h]", + description=None, + epilog=None, + argument_groups=[], + subcommands=None, + subcommand_dest=None, + ) + + sub = SubcommandInfo( + name="sub", + aliases=[], + help="Subcommand help", + parser=nested_info, + ) + + assert sub.parser.prog == "nested" diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_renderer.py b/tests/docs/_ext/sphinx_argparse_neo/test_renderer.py new file mode 100644 index 000000000..5e522e9ab --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_renderer.py @@ -0,0 +1,466 @@ +"""Tests for sphinx_argparse_neo.renderer module.""" + +from __future__ import annotations + +import argparse +import typing as t + +from sphinx_argparse_neo.nodes import ( + argparse_argument, + argparse_group, + argparse_program, + argparse_subcommand, + argparse_subcommands, + argparse_usage, +) +from sphinx_argparse_neo.parser import ( + ArgumentGroup, + ArgumentInfo, + MutuallyExclusiveGroup, + ParserInfo, + SubcommandInfo, + extract_parser, +) +from sphinx_argparse_neo.renderer import ( + ArgparseRenderer, + RenderConfig, + create_renderer, +) + +# --- RenderConfig tests --- + + +def test_render_config_defaults() -> None: + """Test RenderConfig default values.""" + config = RenderConfig() + + assert config.heading_level == 2 + assert config.use_rubric is False + assert config.group_title_prefix == "" + assert config.include_in_toc is True + assert config.flatten_subcommands is False + assert config.show_defaults is True + assert config.show_choices is True + assert config.show_types is True + + +def test_render_config_custom_values() -> None: + """Test RenderConfig with custom values.""" + config = RenderConfig( + heading_level=3, + use_rubric=True, + group_title_prefix="CLI ", + flatten_subcommands=True, + show_defaults=False, + ) + + assert config.heading_level == 3 + assert config.use_rubric is True + assert config.group_title_prefix == "CLI " + assert config.flatten_subcommands is True + assert config.show_defaults is False + + +# --- ArgparseRenderer basic tests --- + + +def test_renderer_creation_default_config() -> None: + """Test creating renderer with default config.""" + renderer = ArgparseRenderer() + + assert renderer.config is not None + assert renderer.config.heading_level == 2 + + +def test_renderer_creation_custom_config() -> None: + """Test creating renderer with custom config.""" + config = RenderConfig(heading_level=4) + renderer = ArgparseRenderer(config=config) + + assert renderer.config.heading_level == 4 + + +def test_create_renderer_factory() -> None: + """Test create_renderer factory function.""" + renderer = create_renderer() + assert isinstance(renderer, ArgparseRenderer) + + +def test_create_renderer_with_config() -> None: + """Test create_renderer with custom config.""" + config = RenderConfig(use_rubric=True) + renderer = create_renderer(config=config) + + assert renderer.config.use_rubric is True + + +# --- Render method tests --- + + +def test_render_simple_parser(simple_parser: argparse.ArgumentParser) -> None: + """Test rendering a simple parser.""" + parser_info = extract_parser(simple_parser) + renderer = ArgparseRenderer() + nodes = renderer.render(parser_info) + + assert len(nodes) == 1 + assert isinstance(nodes[0], argparse_program) + assert nodes[0]["prog"] == "myapp" + + +def test_render_includes_usage(simple_parser: argparse.ArgumentParser) -> None: + """Test that render includes usage node.""" + parser_info = extract_parser(simple_parser) + renderer = ArgparseRenderer() + nodes = renderer.render(parser_info) + + program = nodes[0] + usage_nodes = [c for c in program.children if isinstance(c, argparse_usage)] + + assert len(usage_nodes) == 1 + assert "myapp" in usage_nodes[0]["usage"] + + +def test_render_includes_groups(simple_parser: argparse.ArgumentParser) -> None: + """Test that render includes argument groups.""" + parser_info = extract_parser(simple_parser) + renderer = ArgparseRenderer() + nodes = renderer.render(parser_info) + + program = nodes[0] + group_nodes = [c for c in program.children if isinstance(c, argparse_group)] + + assert len(group_nodes) >= 1 + + +def test_render_groups_contain_arguments( + simple_parser: argparse.ArgumentParser, +) -> None: + """Test that rendered groups contain argument nodes.""" + parser_info = extract_parser(simple_parser) + renderer = ArgparseRenderer() + nodes = renderer.render(parser_info) + + program = nodes[0] + group_nodes = [c for c in program.children if isinstance(c, argparse_group)] + + # At least one group should have arguments + all_args = [ + arg + for group in group_nodes + for arg in group.children + if isinstance(arg, argparse_argument) + ] + assert len(all_args) >= 1 + + +def test_render_with_subcommands( + parser_with_subcommands: argparse.ArgumentParser, +) -> None: + """Test rendering parser with subcommands.""" + parser_info = extract_parser(parser_with_subcommands) + renderer = ArgparseRenderer() + nodes = renderer.render(parser_info) + + program = nodes[0] + subcommands_nodes = [ + c for c in program.children if isinstance(c, argparse_subcommands) + ] + + assert len(subcommands_nodes) == 1 + + # Check subcommand children + subs_container = subcommands_nodes[0] + subcmd_nodes = [ + c for c in subs_container.children if isinstance(c, argparse_subcommand) + ] + assert len(subcmd_nodes) == 2 + + +# --- Config option effect tests --- + + +def test_render_group_title_prefix() -> None: + """Test that group_title_prefix is applied.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("--opt") + parser_info = extract_parser(parser) + + config = RenderConfig(group_title_prefix="CLI: ") + renderer = ArgparseRenderer(config=config) + nodes = renderer.render(parser_info) + + program = nodes[0] + group_nodes = [c for c in program.children if isinstance(c, argparse_group)] + + # At least one group should have the prefix + titles = [g["title"] for g in group_nodes] + assert any(t.startswith("CLI: ") for t in titles) + + +def test_render_show_defaults_false() -> None: + """Test that show_defaults=False hides defaults.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("--opt", default="value") + parser_info = extract_parser(parser) + + config = RenderConfig(show_defaults=False) + renderer = ArgparseRenderer(config=config) + nodes = renderer.render(parser_info) + + program = nodes[0] + group_nodes = [c for c in program.children if isinstance(c, argparse_group)] + all_args = [ + arg + for group in group_nodes + for arg in group.children + if isinstance(arg, argparse_argument) + ] + + # Default string should not be set + for arg in all_args: + assert arg.get("default_string") is None + + +def test_render_show_choices_false() -> None: + """Test that show_choices=False hides choices.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("--format", choices=["json", "yaml"]) + parser_info = extract_parser(parser) + + config = RenderConfig(show_choices=False) + renderer = ArgparseRenderer(config=config) + nodes = renderer.render(parser_info) + + program = nodes[0] + group_nodes = [c for c in program.children if isinstance(c, argparse_group)] + all_args = [ + arg + for group in group_nodes + for arg in group.children + if isinstance(arg, argparse_argument) + ] + + # Choices should not be set + for arg in all_args: + assert arg.get("choices") is None + + +def test_render_show_types_false() -> None: + """Test that show_types=False hides type info.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("--count", type=int) + parser_info = extract_parser(parser) + + config = RenderConfig(show_types=False) + renderer = ArgparseRenderer(config=config) + nodes = renderer.render(parser_info) + + program = nodes[0] + group_nodes = [c for c in program.children if isinstance(c, argparse_group)] + all_args = [ + arg + for group in group_nodes + for arg in group.children + if isinstance(arg, argparse_argument) + ] + + # Type name should not be set + for arg in all_args: + assert arg.get("type_name") is None + + +# --- Individual render method tests --- + + +def test_render_usage_method() -> None: + """Test render_usage method directly.""" + parser_info = ParserInfo( + prog="test", + usage=None, + bare_usage="test [-h] [-v]", + description=None, + epilog=None, + argument_groups=[], + subcommands=None, + subcommand_dest=None, + ) + + renderer = ArgparseRenderer() + usage_node = renderer.render_usage(parser_info) + + assert isinstance(usage_node, argparse_usage) + assert usage_node["usage"] == "test [-h] [-v]" + + +def test_render_argument_method() -> None: + """Test render_argument method directly.""" + arg_info = ArgumentInfo( + names=["-v", "--verbose"], + help="Enable verbose mode", + default=False, + default_string="False", + choices=None, + required=False, + metavar=None, + nargs=None, + action="store_true", + type_name=None, + const=True, + dest="verbose", + ) + + renderer = ArgparseRenderer() + arg_node = renderer.render_argument(arg_info) + + assert isinstance(arg_node, argparse_argument) + assert arg_node["names"] == ["-v", "--verbose"] + assert arg_node["help"] == "Enable verbose mode" + + +def test_render_group_method() -> None: + """Test render_group method directly.""" + group_info = ArgumentGroup( + title="Options", + description="Available options", + arguments=[ + ArgumentInfo( + names=["-v"], + help="Verbose", + default=False, + default_string="False", + choices=None, + required=False, + metavar=None, + nargs=None, + action="store_true", + type_name=None, + const=True, + dest="verbose", + ), + ], + mutually_exclusive=[], + ) + + renderer = ArgparseRenderer() + group_node = renderer.render_group(group_info) + + assert isinstance(group_node, argparse_group) + assert group_node["title"] == "Options" + assert group_node["description"] == "Available options" + assert len(group_node.children) == 1 + + +def test_render_mutex_group_method() -> None: + """Test render_mutex_group method.""" + mutex = MutuallyExclusiveGroup( + arguments=[ + ArgumentInfo( + names=["-v"], + help="Verbose", + default=False, + default_string="False", + choices=None, + required=False, + metavar=None, + nargs=None, + action="store_true", + type_name=None, + const=True, + dest="verbose", + ), + ArgumentInfo( + names=["-q"], + help="Quiet", + default=False, + default_string="False", + choices=None, + required=False, + metavar=None, + nargs=None, + action="store_true", + type_name=None, + const=True, + dest="quiet", + ), + ], + required=True, + ) + + renderer = ArgparseRenderer() + nodes = renderer.render_mutex_group(mutex) + + assert len(nodes) == 2 + assert all(isinstance(n, argparse_argument) for n in nodes) + assert all(n.get("mutex") is True for n in nodes) + assert all(n.get("mutex_required") is True for n in nodes) + + +def test_render_subcommand_method() -> None: + """Test render_subcommand method.""" + nested_parser = ParserInfo( + prog="myapp sub", + usage=None, + bare_usage="myapp sub [-h]", + description="Subcommand description", + epilog=None, + argument_groups=[], + subcommands=None, + subcommand_dest=None, + ) + + subcmd_info = SubcommandInfo( + name="sub", + aliases=["s"], + help="Subcommand help", + parser=nested_parser, + ) + + renderer = ArgparseRenderer() + subcmd_node = renderer.render_subcommand(subcmd_info) + + assert isinstance(subcmd_node, argparse_subcommand) + assert subcmd_node["name"] == "sub" + assert subcmd_node["aliases"] == ["s"] + assert subcmd_node["help"] == "Subcommand help" + + # Should have nested program + nested = [c for c in subcmd_node.children if isinstance(c, argparse_program)] + assert len(nested) == 1 + + +# --- Post-process hook test --- + + +def test_post_process_default() -> None: + """Test that default post_process returns nodes unchanged.""" + renderer = ArgparseRenderer() + + from docutils import nodes as dn + + input_nodes = [dn.paragraph(text="test")] + + result = renderer.post_process(input_nodes) + + assert result == input_nodes + + +def test_post_process_custom() -> None: + """Test custom post_process implementation.""" + + class CustomRenderer(ArgparseRenderer): # type: ignore[misc] + def post_process(self, result_nodes: list[t.Any]) -> list[t.Any]: + # Add a marker to each node + for node in result_nodes: + node["custom_marker"] = True + return result_nodes + + renderer = CustomRenderer() + + from docutils import nodes as dn + + input_nodes = [dn.paragraph(text="test")] + + result = renderer.post_process(input_nodes) + + assert result[0].get("custom_marker") is True diff --git a/tests/docs/_ext/test_argparse_lexer.py b/tests/docs/_ext/test_argparse_lexer.py new file mode 100644 index 000000000..6f7cb0778 --- /dev/null +++ b/tests/docs/_ext/test_argparse_lexer.py @@ -0,0 +1,794 @@ +"""Tests for argparse_lexer Pygments extension.""" + +from __future__ import annotations + +import typing as t + +import pytest +from argparse_lexer import ( + ArgparseHelpLexer, + ArgparseLexer, + ArgparseUsageLexer, + tokenize_argparse, + tokenize_usage, +) + +# --- Helper to extract token type names --- + + +def get_tokens(text: str, lexer_class: type = ArgparseLexer) -> list[tuple[str, str]]: + """Get tokens as (type_name, value) tuples. + + Examples + -------- + >>> tokens = get_tokens("usage: cmd [-h]") + >>> any(t[0] == "Token.Name.Attribute" for t in tokens) + True + """ + lexer = lexer_class() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] + + +def get_usage_tokens(text: str) -> list[tuple[str, str]]: + """Get tokens using ArgparseUsageLexer. + + Examples + -------- + >>> tokens = get_usage_tokens("usage: cmd") + >>> tokens[0] + ('Token.Generic.Heading', 'usage:') + """ + return get_tokens(text, ArgparseUsageLexer) + + +def get_help_tokens(text: str) -> list[tuple[str, str]]: + """Get tokens using ArgparseHelpLexer. + + Examples + -------- + >>> tokens = get_help_tokens("positional arguments:") + >>> any("Subheading" in t[0] for t in tokens) + True + """ + return get_tokens(text, ArgparseHelpLexer) + + +# --- Token type fixtures --- + + +class TokenTypeFixture(t.NamedTuple): + """Test fixture for verifying specific token types.""" + + test_id: str + input_text: str + expected_token_type: str + expected_value: str + + +TOKEN_TYPE_FIXTURES: list[TokenTypeFixture] = [ + TokenTypeFixture( + test_id="usage_heading", + input_text="usage:", + expected_token_type="Token.Generic.Heading", + expected_value="usage:", + ), + TokenTypeFixture( + test_id="short_option", + input_text="-h", + expected_token_type="Token.Name.Attribute", + expected_value="-h", + ), + TokenTypeFixture( + test_id="short_option_v", + input_text="-v", + expected_token_type="Token.Name.Attribute", + expected_value="-v", + ), + TokenTypeFixture( + test_id="long_option", + input_text="--verbose", + expected_token_type="Token.Name.Tag", + expected_value="--verbose", + ), + TokenTypeFixture( + test_id="long_option_with_dashes", + input_text="--no-color", + expected_token_type="Token.Name.Tag", + expected_value="--no-color", + ), + TokenTypeFixture( + test_id="uppercase_metavar", + input_text="FILE", + expected_token_type="Token.Name.Variable", + expected_value="FILE", + ), + TokenTypeFixture( + test_id="uppercase_metavar_path", + input_text="PATH", + expected_token_type="Token.Name.Variable", + expected_value="PATH", + ), + TokenTypeFixture( + test_id="uppercase_metavar_with_underscore", + input_text="FILE_PATH", + expected_token_type="Token.Name.Variable", + expected_value="FILE_PATH", + ), + TokenTypeFixture( + test_id="command_name", + input_text="sync", + expected_token_type="Token.Name.Label", + expected_value="sync", + ), + TokenTypeFixture( + test_id="command_with_dash", + input_text="repo-name", + expected_token_type="Token.Name.Label", + expected_value="repo-name", + ), + TokenTypeFixture( + test_id="open_bracket", + input_text="[", + expected_token_type="Token.Punctuation", + expected_value="[", + ), + TokenTypeFixture( + test_id="close_bracket", + input_text="]", + expected_token_type="Token.Punctuation", + expected_value="]", + ), + TokenTypeFixture( + test_id="open_paren", + input_text="(", + expected_token_type="Token.Punctuation", + expected_value="(", + ), + TokenTypeFixture( + test_id="close_paren", + input_text=")", + expected_token_type="Token.Punctuation", + expected_value=")", + ), + TokenTypeFixture( + test_id="open_brace", + input_text="{", + expected_token_type="Token.Punctuation", + expected_value="{", + ), + TokenTypeFixture( + test_id="pipe_operator", + input_text="|", + expected_token_type="Token.Operator", + expected_value="|", + ), + TokenTypeFixture( + test_id="ellipsis", + input_text="...", + expected_token_type="Token.Punctuation", + expected_value="...", + ), +] + + +@pytest.mark.parametrize( + list(TokenTypeFixture._fields), + TOKEN_TYPE_FIXTURES, + ids=[f.test_id for f in TOKEN_TYPE_FIXTURES], +) +def test_token_type( + test_id: str, + input_text: str, + expected_token_type: str, + expected_value: str, +) -> None: + """Test individual token type detection.""" + tokens = get_usage_tokens(input_text) + # Find the expected token (skip whitespace) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t and v.strip()] + assert len(non_ws_tokens) >= 1, f"No non-whitespace tokens found for '{input_text}'" + token_type, token_value = non_ws_tokens[0] + assert token_type == expected_token_type, ( + f"Expected {expected_token_type}, got {token_type}" + ) + assert token_value == expected_value + + +# --- Choice fixtures --- + + +class ChoiceFixture(t.NamedTuple): + """Test fixture for choice enumeration patterns.""" + + test_id: str + input_text: str + expected_choices: list[str] + + +CHOICE_FIXTURES: list[ChoiceFixture] = [ + ChoiceFixture( + test_id="simple_choices", + input_text="{json,yaml,table}", + expected_choices=["json", "yaml", "table"], + ), + ChoiceFixture( + test_id="numeric_choices", + input_text="{1,2,3}", + expected_choices=["1", "2", "3"], + ), + ChoiceFixture( + test_id="auto_always_never", + input_text="{auto,always,never}", + expected_choices=["auto", "always", "never"], + ), + ChoiceFixture( + test_id="two_choices", + input_text="{a,b}", + expected_choices=["a", "b"], + ), +] + + +@pytest.mark.parametrize( + list(ChoiceFixture._fields), + CHOICE_FIXTURES, + ids=[f.test_id for f in CHOICE_FIXTURES], +) +def test_choices( + test_id: str, + input_text: str, + expected_choices: list[str], +) -> None: + """Test choice enumeration tokenization.""" + tokens = get_usage_tokens(input_text) + # Extract choice values (Name.Constant tokens) + choice_tokens = [v for t, v in tokens if t == "Token.Name.Constant"] + assert choice_tokens == expected_choices + + +# --- Mutex group fixtures --- + + +class MutexGroupFixture(t.NamedTuple): + """Test fixture for mutually exclusive group patterns.""" + + test_id: str + input_text: str + expected_options: list[str] + is_required: bool + + +MUTEX_GROUP_FIXTURES: list[MutexGroupFixture] = [ + MutexGroupFixture( + test_id="optional_short", + input_text="[-a | -b | -c]", + expected_options=["-a", "-b", "-c"], + is_required=False, + ), + MutexGroupFixture( + test_id="optional_long", + input_text="[--foo FOO | --bar BAR]", + expected_options=["--foo", "--bar"], + is_required=False, + ), + MutexGroupFixture( + test_id="required_long", + input_text="(--foo | --bar)", + expected_options=["--foo", "--bar"], + is_required=True, + ), + MutexGroupFixture( + test_id="required_with_metavar", + input_text="(--input FILE | --stdin)", + expected_options=["--input", "--stdin"], + is_required=True, + ), + MutexGroupFixture( + test_id="optional_output_formats", + input_text="[--json | --ndjson | --table]", + expected_options=["--json", "--ndjson", "--table"], + is_required=False, + ), +] + + +@pytest.mark.parametrize( + list(MutexGroupFixture._fields), + MUTEX_GROUP_FIXTURES, + ids=[f.test_id for f in MUTEX_GROUP_FIXTURES], +) +def test_mutex_groups( + test_id: str, + input_text: str, + expected_options: list[str], + is_required: bool, +) -> None: + """Test mutually exclusive group tokenization.""" + tokens = get_usage_tokens(input_text) + + # Check for proper brackets (required uses parens, optional uses brackets) + if is_required: + assert ("Token.Punctuation", "(") in tokens + assert ("Token.Punctuation", ")") in tokens + else: + assert ("Token.Punctuation", "[") in tokens + assert ("Token.Punctuation", "]") in tokens + + # Check pipe operators present + pipe_count = sum(1 for t, v in tokens if t == "Token.Operator" and v == "|") + assert pipe_count == len(expected_options) - 1 + + # Check options are present + for opt in expected_options: + if opt.startswith("--"): + assert ("Token.Name.Tag", opt) in tokens + else: + assert ("Token.Name.Attribute", opt) in tokens + + +# --- Nargs pattern fixtures --- + + +class NargsFixture(t.NamedTuple): + """Test fixture for nargs/variadic patterns.""" + + test_id: str + input_text: str + has_ellipsis: bool + has_metavar: str | None + + +NARGS_FIXTURES: list[NargsFixture] = [ + NargsFixture( + test_id="nargs_plus", + input_text="FILE ...", + has_ellipsis=True, + has_metavar="FILE", + ), + NargsFixture( + test_id="nargs_star", + input_text="[FILE ...]", + has_ellipsis=True, + has_metavar="FILE", + ), + NargsFixture( + test_id="nargs_question", + input_text="[--foo [FOO]]", + has_ellipsis=False, + has_metavar="FOO", + ), + NargsFixture( + test_id="nargs_plus_with_option", + input_text="[--bar X [X ...]]", + has_ellipsis=True, + has_metavar="X", + ), +] + + +@pytest.mark.parametrize( + list(NargsFixture._fields), + NARGS_FIXTURES, + ids=[f.test_id for f in NARGS_FIXTURES], +) +def test_nargs_patterns( + test_id: str, + input_text: str, + has_ellipsis: bool, + has_metavar: str | None, +) -> None: + """Test nargs/variadic pattern tokenization.""" + tokens = get_usage_tokens(input_text) + + # Check ellipsis + ellipsis_present = ("Token.Punctuation", "...") in tokens + assert ellipsis_present == has_ellipsis + + # Check metavar + if has_metavar: + assert ("Token.Name.Variable", has_metavar) in tokens + + +# --- Long option with value fixtures --- + + +class LongOptionValueFixture(t.NamedTuple): + """Test fixture for long options with = values.""" + + test_id: str + input_text: str + option: str + value: str + + +LONG_OPTION_VALUE_FIXTURES: list[LongOptionValueFixture] = [ + LongOptionValueFixture( + test_id="config_file", + input_text="--config=FILE", + option="--config", + value="FILE", + ), + LongOptionValueFixture( + test_id="log_level", + input_text="--log-level=DEBUG", + option="--log-level", + value="DEBUG", + ), + LongOptionValueFixture( + test_id="lowercase_value", + input_text="--output=path", + option="--output", + value="path", + ), +] + + +@pytest.mark.parametrize( + list(LongOptionValueFixture._fields), + LONG_OPTION_VALUE_FIXTURES, + ids=[f.test_id for f in LONG_OPTION_VALUE_FIXTURES], +) +def test_long_option_with_equals_value( + test_id: str, + input_text: str, + option: str, + value: str, +) -> None: + """Test long option with = value tokenization.""" + tokens = get_usage_tokens(input_text) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t] + + assert len(non_ws_tokens) >= 3 + assert non_ws_tokens[0] == ("Token.Name.Tag", option) + assert non_ws_tokens[1] == ("Token.Operator", "=") + assert non_ws_tokens[2][1] == value + + +# --- Short option with value fixtures --- + + +class ShortOptionValueFixture(t.NamedTuple): + """Test fixture for short options with space-separated values.""" + + test_id: str + input_text: str + option: str + value: str + + +SHORT_OPTION_VALUE_FIXTURES: list[ShortOptionValueFixture] = [ + ShortOptionValueFixture( + test_id="config_path", + input_text="-c config-path", + option="-c", + value="config-path", + ), + ShortOptionValueFixture( + test_id="directory", + input_text="-d DIRECTORY", + option="-d", + value="DIRECTORY", + ), + ShortOptionValueFixture( + test_id="simple_name", + input_text="-r name", + option="-r", + value="name", + ), +] + + +@pytest.mark.parametrize( + list(ShortOptionValueFixture._fields), + SHORT_OPTION_VALUE_FIXTURES, + ids=[f.test_id for f in SHORT_OPTION_VALUE_FIXTURES], +) +def test_short_option_with_value( + test_id: str, + input_text: str, + option: str, + value: str, +) -> None: + """Test short option followed by value tokenization.""" + tokens = get_usage_tokens(input_text) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t] + + assert len(non_ws_tokens) >= 2 + assert non_ws_tokens[0] == ("Token.Name.Attribute", option) + assert non_ws_tokens[1][1] == value + + +# --- Full usage string fixtures --- + + +class UsageStringFixture(t.NamedTuple): + """Test fixture for full usage string tokenization.""" + + test_id: str + input_text: str + expected_contains: list[tuple[str, str]] + + +USAGE_STRING_FIXTURES: list[UsageStringFixture] = [ + UsageStringFixture( + test_id="simple_usage", + input_text="usage: cmd [-h]", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "cmd"), + ("Token.Punctuation", "["), + ("Token.Name.Attribute", "-h"), + ("Token.Punctuation", "]"), + ], + ), + UsageStringFixture( + test_id="mutually_exclusive", + input_text="[--json | --ndjson | --table]", + expected_contains=[ + ("Token.Name.Tag", "--json"), + ("Token.Operator", "|"), + ("Token.Name.Tag", "--ndjson"), + ("Token.Operator", "|"), + ("Token.Name.Tag", "--table"), + ], + ), + UsageStringFixture( + test_id="subcommand", + input_text="usage: vcspull sync", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "vcspull"), + ("Token.Name.Label", "sync"), + ], + ), + UsageStringFixture( + test_id="with_choices", + input_text="usage: cmd {a,b,c}", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Constant", "a"), + ("Token.Name.Constant", "b"), + ("Token.Name.Constant", "c"), + ], + ), + UsageStringFixture( + test_id="complex_usage", + input_text="usage: prog [-h] [--verbose] FILE ...", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "prog"), + ("Token.Name.Attribute", "-h"), + ("Token.Name.Tag", "--verbose"), + ("Token.Name.Variable", "FILE"), + ("Token.Punctuation", "..."), + ], + ), +] + + +@pytest.mark.parametrize( + list(UsageStringFixture._fields), + USAGE_STRING_FIXTURES, + ids=[f.test_id for f in USAGE_STRING_FIXTURES], +) +def test_usage_string( + test_id: str, + input_text: str, + expected_contains: list[tuple[str, str]], +) -> None: + """Test full usage string tokenization contains expected tokens.""" + tokens = get_usage_tokens(input_text) + for expected_type, expected_value in expected_contains: + assert (expected_type, expected_value) in tokens, ( + f"Expected ({expected_type}, {expected_value!r}) not found in tokens" + ) + + +# --- Section header fixtures --- + + +class SectionHeaderFixture(t.NamedTuple): + """Test fixture for section header recognition.""" + + test_id: str + input_text: str + expected_header: str + + +SECTION_HEADER_FIXTURES: list[SectionHeaderFixture] = [ + SectionHeaderFixture( + test_id="positional_arguments", + input_text="positional arguments:", + expected_header="positional arguments:", + ), + SectionHeaderFixture( + test_id="options", + input_text="options:", + expected_header="options:", + ), + SectionHeaderFixture( + test_id="optional_arguments", + input_text="optional arguments:", + expected_header="optional arguments:", + ), + SectionHeaderFixture( + test_id="custom_section", + input_text="advanced options:", + expected_header="advanced options:", + ), +] + + +@pytest.mark.parametrize( + list(SectionHeaderFixture._fields), + SECTION_HEADER_FIXTURES, + ids=[f.test_id for f in SECTION_HEADER_FIXTURES], +) +def test_section_headers( + test_id: str, + input_text: str, + expected_header: str, +) -> None: + """Test section header tokenization.""" + tokens = get_help_tokens(input_text) + # Section headers should be Generic.Subheading + # Strip newlines from token values (lexer may include trailing \n) + subheading_tokens = [ + v.strip() for t, v in tokens if t == "Token.Generic.Subheading" + ] + assert expected_header in subheading_tokens + + +# --- Full help output test --- + + +def test_full_help_output() -> None: + """Test full argparse -h output tokenization.""" + help_text = """\ +usage: vcspull sync [-h] [-c CONFIG] [-d DIRECTORY] + [--json | --ndjson | --table] + [repo-name] [path] + +positional arguments: + repo-name repository name filter + path path filter + +options: + -h, --help show this help message and exit + -c CONFIG, --config CONFIG + config file path + --json output as JSON +""" + tokens = get_help_tokens(help_text) + + # Check usage heading + assert ("Token.Generic.Heading", "usage:") in tokens + + # Check section headers + subheadings = [v for t, v in tokens if t == "Token.Generic.Subheading"] + assert "positional arguments:" in subheadings + assert "options:" in subheadings + + # Check options are recognized + assert ("Token.Name.Attribute", "-h") in tokens + assert ("Token.Name.Tag", "--help") in tokens + assert ("Token.Name.Tag", "--config") in tokens + assert ("Token.Name.Tag", "--json") in tokens + + # Check command/positional names + assert ("Token.Name.Label", "vcspull") in tokens + assert ("Token.Name.Label", "sync") in tokens + + +# --- Real vcspull usage output test --- + + +def test_vcspull_sync_usage() -> None: + """Test real vcspull sync usage output tokenization.""" + usage_text = """\ +usage: vcspull sync [-h] [-c CONFIG] [-d DIRECTORY] + [--json | --ndjson | --table] [--color {auto,always,never}] + [--no-progress] [--verbose] + [repo-name] [path]""" + + tokens = get_usage_tokens(usage_text) + + expected = [ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "vcspull"), + ("Token.Name.Label", "sync"), + ("Token.Name.Attribute", "-h"), + ("Token.Name.Attribute", "-c"), + ("Token.Name.Variable", "CONFIG"), + ("Token.Name.Attribute", "-d"), + ("Token.Name.Variable", "DIRECTORY"), + ("Token.Name.Tag", "--json"), + ("Token.Name.Tag", "--ndjson"), + ("Token.Name.Tag", "--table"), + ("Token.Name.Tag", "--color"), + ("Token.Name.Tag", "--no-progress"), + ("Token.Name.Tag", "--verbose"), + ("Token.Name.Label", "repo-name"), + ("Token.Name.Label", "path"), + ] + + for expected_type, expected_value in expected: + assert (expected_type, expected_value) in tokens, ( + f"Expected ({expected_type}, {expected_value!r}) not in tokens" + ) + + # Check choices are properly tokenized + assert ("Token.Name.Constant", "auto") in tokens + assert ("Token.Name.Constant", "always") in tokens + assert ("Token.Name.Constant", "never") in tokens + + +# --- tokenize_argparse helper function test --- + + +def test_tokenize_argparse_helper() -> None: + """Test the tokenize_argparse helper function.""" + result = tokenize_argparse("usage: cmd [-h]") + + assert result[0] == ("Token.Generic.Heading", "usage:") + assert ("Token.Name.Label", "cmd") in result + assert ("Token.Name.Attribute", "-h") in result + + +def test_tokenize_usage_helper() -> None: + """Test the tokenize_usage helper function.""" + result = tokenize_usage("usage: cmd [-h]") + + assert result[0] == ("Token.Generic.Heading", "usage:") + assert ("Token.Name.Label", "cmd") in result + assert ("Token.Name.Attribute", "-h") in result + + +# --- Lexer class selection tests --- + + +def test_argparse_lexer_usage_detection() -> None: + """Test ArgparseLexer handles usage lines correctly.""" + lexer = ArgparseLexer() + tokens = list(lexer.get_tokens("usage: cmd [-h]")) + token_types = [str(t) for t, v in tokens] + assert "Token.Generic.Heading" in token_types + + +def test_argparse_lexer_section_detection() -> None: + """Test ArgparseLexer handles section headers correctly.""" + lexer = ArgparseLexer() + tokens = list(lexer.get_tokens("positional arguments:")) + token_types = [str(t) for t, v in tokens] + assert "Token.Generic.Subheading" in token_types + + +def test_argparse_usage_lexer_standalone() -> None: + """Test ArgparseUsageLexer works standalone.""" + lexer = ArgparseUsageLexer() + tokens = list(lexer.get_tokens("usage: cmd [-h] --foo FILE")) + token_types = [str(t) for t, v in tokens] + + assert "Token.Generic.Heading" in token_types + assert "Token.Name.Label" in token_types # cmd + assert "Token.Name.Attribute" in token_types # -h + assert "Token.Name.Tag" in token_types # --foo + + +def test_argparse_help_lexer_multiline() -> None: + """Test ArgparseHelpLexer handles multiline help.""" + lexer = ArgparseHelpLexer() + help_text = """usage: cmd + +options: + -h help +""" + tokens = list(lexer.get_tokens(help_text)) + token_values = [v for t, v in tokens] + + assert "usage:" in token_values + assert "options:" in token_values or any( + "options:" in v for v in token_values if isinstance(v, str) + ) diff --git a/tests/docs/_ext/test_argparse_roles.py b/tests/docs/_ext/test_argparse_roles.py new file mode 100644 index 000000000..c31e12691 --- /dev/null +++ b/tests/docs/_ext/test_argparse_roles.py @@ -0,0 +1,439 @@ +"""Tests for argparse_roles docutils extension.""" + +from __future__ import annotations + +import typing as t + +import pytest +from argparse_roles import ( + cli_choice_role, + cli_command_role, + cli_default_role, + cli_metavar_role, + cli_option_role, + normalize_options, + register_roles, +) +from docutils import nodes + +# --- normalize_options tests --- + + +def test_normalize_options_none() -> None: + """Test normalize_options with None input.""" + assert normalize_options(None) == {} + + +def test_normalize_options_dict() -> None: + """Test normalize_options with dict input.""" + opts = {"class": "custom"} + assert normalize_options(opts) == {"class": "custom"} + + +def test_normalize_options_empty_dict() -> None: + """Test normalize_options with empty dict input.""" + assert normalize_options({}) == {} + + +# --- CLI Option Role Tests --- + + +class OptionRoleFixture(t.NamedTuple): + """Test fixture for CLI option role.""" + + test_id: str + text: str + expected_classes: list[str] + + +OPTION_ROLE_FIXTURES: list[OptionRoleFixture] = [ + OptionRoleFixture( + test_id="long_option", + text="--verbose", + expected_classes=["cli-option", "cli-option-long"], + ), + OptionRoleFixture( + test_id="long_option_with_dash", + text="--no-color", + expected_classes=["cli-option", "cli-option-long"], + ), + OptionRoleFixture( + test_id="short_option", + text="-h", + expected_classes=["cli-option", "cli-option-short"], + ), + OptionRoleFixture( + test_id="short_option_v", + text="-v", + expected_classes=["cli-option", "cli-option-short"], + ), + OptionRoleFixture( + test_id="no_dash_prefix", + text="option", + expected_classes=["cli-option"], + ), +] + + +@pytest.mark.parametrize( + list(OptionRoleFixture._fields), + OPTION_ROLE_FIXTURES, + ids=[f.test_id for f in OPTION_ROLE_FIXTURES], +) +def test_cli_option_role( + test_id: str, + text: str, + expected_classes: list[str], +) -> None: + """Test CLI option role generates correct node classes.""" + node_list, messages = cli_option_role( + "cli-option", + f":cli-option:`{text}`", + text, + 1, + None, + ) + + assert len(node_list) == 1 + assert len(messages) == 0 + + node = node_list[0] + assert isinstance(node, nodes.literal) + assert node.astext() == text + assert node["classes"] == expected_classes + + +def test_cli_option_role_with_options() -> None: + """Test CLI option role accepts options parameter.""" + node_list, _messages = cli_option_role( + "cli-option", + ":cli-option:`--test`", + "--test", + 1, + None, + options={"class": "extra"}, + ) + + assert len(node_list) == 1 + # Options are normalized but classes come from role logic + assert "cli-option" in node_list[0]["classes"] + + +# --- CLI Metavar Role Tests --- + + +class MetavarRoleFixture(t.NamedTuple): + """Test fixture for CLI metavar role.""" + + test_id: str + text: str + + +METAVAR_ROLE_FIXTURES: list[MetavarRoleFixture] = [ + MetavarRoleFixture(test_id="file", text="FILE"), + MetavarRoleFixture(test_id="path", text="PATH"), + MetavarRoleFixture(test_id="directory", text="DIRECTORY"), + MetavarRoleFixture(test_id="config", text="CONFIG"), + MetavarRoleFixture(test_id="lowercase", text="value"), +] + + +@pytest.mark.parametrize( + list(MetavarRoleFixture._fields), + METAVAR_ROLE_FIXTURES, + ids=[f.test_id for f in METAVAR_ROLE_FIXTURES], +) +def test_cli_metavar_role( + test_id: str, + text: str, +) -> None: + """Test CLI metavar role generates correct node.""" + node_list, messages = cli_metavar_role( + "cli-metavar", + f":cli-metavar:`{text}`", + text, + 1, + None, + ) + + assert len(node_list) == 1 + assert len(messages) == 0 + + node = node_list[0] + assert isinstance(node, nodes.literal) + assert node.astext() == text + assert node["classes"] == ["cli-metavar"] + + +# --- CLI Command Role Tests --- + + +class CommandRoleFixture(t.NamedTuple): + """Test fixture for CLI command role.""" + + test_id: str + text: str + + +COMMAND_ROLE_FIXTURES: list[CommandRoleFixture] = [ + CommandRoleFixture(test_id="sync", text="sync"), + CommandRoleFixture(test_id="add", text="add"), + CommandRoleFixture(test_id="vcspull", text="vcspull"), + CommandRoleFixture(test_id="list", text="list"), + CommandRoleFixture(test_id="with_dash", text="repo-add"), +] + + +@pytest.mark.parametrize( + list(CommandRoleFixture._fields), + COMMAND_ROLE_FIXTURES, + ids=[f.test_id for f in COMMAND_ROLE_FIXTURES], +) +def test_cli_command_role( + test_id: str, + text: str, +) -> None: + """Test CLI command role generates correct node.""" + node_list, messages = cli_command_role( + "cli-command", + f":cli-command:`{text}`", + text, + 1, + None, + ) + + assert len(node_list) == 1 + assert len(messages) == 0 + + node = node_list[0] + assert isinstance(node, nodes.literal) + assert node.astext() == text + assert node["classes"] == ["cli-command"] + + +# --- CLI Default Role Tests --- + + +class DefaultRoleFixture(t.NamedTuple): + """Test fixture for CLI default role.""" + + test_id: str + text: str + + +DEFAULT_ROLE_FIXTURES: list[DefaultRoleFixture] = [ + DefaultRoleFixture(test_id="none", text="None"), + DefaultRoleFixture(test_id="quoted_auto", text='"auto"'), + DefaultRoleFixture(test_id="number", text="0"), + DefaultRoleFixture(test_id="empty_string", text='""'), + DefaultRoleFixture(test_id="true", text="True"), + DefaultRoleFixture(test_id="false", text="False"), +] + + +@pytest.mark.parametrize( + list(DefaultRoleFixture._fields), + DEFAULT_ROLE_FIXTURES, + ids=[f.test_id for f in DEFAULT_ROLE_FIXTURES], +) +def test_cli_default_role( + test_id: str, + text: str, +) -> None: + """Test CLI default role generates correct node.""" + node_list, messages = cli_default_role( + "cli-default", + f":cli-default:`{text}`", + text, + 1, + None, + ) + + assert len(node_list) == 1 + assert len(messages) == 0 + + node = node_list[0] + assert isinstance(node, nodes.literal) + assert node.astext() == text + assert node["classes"] == ["cli-default"] + + +# --- CLI Choice Role Tests --- + + +class ChoiceRoleFixture(t.NamedTuple): + """Test fixture for CLI choice role.""" + + test_id: str + text: str + + +CHOICE_ROLE_FIXTURES: list[ChoiceRoleFixture] = [ + ChoiceRoleFixture(test_id="json", text="json"), + ChoiceRoleFixture(test_id="yaml", text="yaml"), + ChoiceRoleFixture(test_id="table", text="table"), + ChoiceRoleFixture(test_id="auto", text="auto"), + ChoiceRoleFixture(test_id="always", text="always"), + ChoiceRoleFixture(test_id="never", text="never"), +] + + +@pytest.mark.parametrize( + list(ChoiceRoleFixture._fields), + CHOICE_ROLE_FIXTURES, + ids=[f.test_id for f in CHOICE_ROLE_FIXTURES], +) +def test_cli_choice_role( + test_id: str, + text: str, +) -> None: + """Test CLI choice role generates correct node.""" + node_list, messages = cli_choice_role( + "cli-choice", + f":cli-choice:`{text}`", + text, + 1, + None, + ) + + assert len(node_list) == 1 + assert len(messages) == 0 + + node = node_list[0] + assert isinstance(node, nodes.literal) + assert node.astext() == text + assert node["classes"] == ["cli-choice"] + + +# --- Register Roles Test --- + + +def test_register_roles() -> None: + """Test register_roles doesn't raise errors.""" + # This should not raise any exceptions + register_roles() + + +# --- Role Return Type Tests --- + + +def test_all_roles_return_correct_types() -> None: + """Test all roles return proper tuple of (nodes, messages).""" + role_functions = [ + cli_option_role, + cli_metavar_role, + cli_command_role, + cli_default_role, + cli_choice_role, + ] + + for role_func in role_functions: + result = role_func("test", ":test:`value`", "value", 1, None) + + assert isinstance(result, tuple), f"{role_func.__name__} should return tuple" + assert len(result) == 2, f"{role_func.__name__} should return 2-tuple" + + node_list, messages = result + assert isinstance(node_list, list), ( + f"{role_func.__name__} first element should be list" + ) + assert isinstance(messages, list), ( + f"{role_func.__name__} second element should be list" + ) + assert len(node_list) == 1, f"{role_func.__name__} should return one node" + assert len(messages) == 0, ( + f"{role_func.__name__} should return no error messages" + ) + + +# --- Node Structure Tests --- + + +def test_cli_option_node_structure() -> None: + """Test CLI option node has expected structure.""" + node_list, _ = cli_option_role( + "cli-option", + ":cli-option:`--test`", + "--test", + 1, + None, + ) + + node = node_list[0] + + # Check node type + assert isinstance(node, nodes.literal) + + # Check rawsource is preserved + assert node.rawsource == ":cli-option:`--test`" + + # Check text content + assert len(node.children) == 1 + assert isinstance(node.children[0], nodes.Text) + assert str(node.children[0]) == "--test" + + +def test_roles_with_none_content_parameter() -> None: + """Test roles handle None content parameter correctly.""" + node_list, messages = cli_option_role( + "cli-option", + ":cli-option:`--test`", + "--test", + 1, + None, + options=None, + content=None, + ) + + assert len(node_list) == 1 + assert len(messages) == 0 + + +def test_roles_with_empty_content_parameter() -> None: + """Test roles handle empty content parameter correctly.""" + node_list, messages = cli_option_role( + "cli-option", + ":cli-option:`--test`", + "--test", + 1, + None, + options={}, + content=[], + ) + + assert len(node_list) == 1 + assert len(messages) == 0 + + +# --- Edge Case Tests --- + + +def test_cli_option_role_empty_text() -> None: + """Test CLI option role with empty text.""" + node_list, _messages = cli_option_role( + "cli-option", + ":cli-option:``", + "", + 1, + None, + ) + + assert len(node_list) == 1 + assert node_list[0].astext() == "" + # No dash prefix, so only base class + assert node_list[0]["classes"] == ["cli-option"] + + +def test_cli_option_role_special_characters() -> None: + """Test CLI option role with special characters in text.""" + node_list, _messages = cli_option_role( + "cli-option", + ":cli-option:`--foo-bar_baz`", + "--foo-bar_baz", + 1, + None, + ) + + assert len(node_list) == 1 + assert node_list[0].astext() == "--foo-bar_baz" + assert "cli-option-long" in node_list[0]["classes"] diff --git a/tests/docs/_ext/test_cli_usage_lexer.py b/tests/docs/_ext/test_cli_usage_lexer.py new file mode 100644 index 000000000..3c32ebac6 --- /dev/null +++ b/tests/docs/_ext/test_cli_usage_lexer.py @@ -0,0 +1,358 @@ +"""Tests for cli_usage_lexer Pygments extension.""" + +from __future__ import annotations + +import typing as t + +import pytest +from cli_usage_lexer import ( + CLIUsageLexer, + tokenize_usage, +) + +# --- Helper to extract token type names --- + + +def get_tokens(text: str) -> list[tuple[str, str]]: + """Get tokens as (type_name, value) tuples.""" + lexer = CLIUsageLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] + + +# --- Token type fixtures --- + + +class TokenTypeFixture(t.NamedTuple): + """Test fixture for verifying specific token types.""" + + test_id: str + input_text: str + expected_token_type: str + expected_value: str + + +TOKEN_TYPE_FIXTURES: list[TokenTypeFixture] = [ + TokenTypeFixture( + test_id="usage_heading", + input_text="usage:", + expected_token_type="Token.Generic.Heading", + expected_value="usage:", + ), + TokenTypeFixture( + test_id="short_option", + input_text="-h", + expected_token_type="Token.Name.Attribute", + expected_value="-h", + ), + TokenTypeFixture( + test_id="long_option", + input_text="--verbose", + expected_token_type="Token.Name.Tag", + expected_value="--verbose", + ), + TokenTypeFixture( + test_id="long_option_with_dashes", + input_text="--no-color", + expected_token_type="Token.Name.Tag", + expected_value="--no-color", + ), + TokenTypeFixture( + test_id="uppercase_metavar", + input_text="COMMAND", + expected_token_type="Token.Name.Constant", + expected_value="COMMAND", + ), + TokenTypeFixture( + test_id="uppercase_metavar_with_underscore", + input_text="FILE_PATH", + expected_token_type="Token.Name.Constant", + expected_value="FILE_PATH", + ), + TokenTypeFixture( + test_id="positional_arg", + input_text="repo-name", + expected_token_type="Token.Name.Label", + expected_value="repo-name", + ), + TokenTypeFixture( + test_id="command_name", + input_text="vcspull", + expected_token_type="Token.Name.Label", + expected_value="vcspull", + ), + TokenTypeFixture( + test_id="open_bracket", + input_text="[", + expected_token_type="Token.Punctuation", + expected_value="[", + ), + TokenTypeFixture( + test_id="close_bracket", + input_text="]", + expected_token_type="Token.Punctuation", + expected_value="]", + ), + TokenTypeFixture( + test_id="pipe_operator", + input_text="|", + expected_token_type="Token.Operator", + expected_value="|", + ), +] + + +@pytest.mark.parametrize( + TokenTypeFixture._fields, + TOKEN_TYPE_FIXTURES, + ids=[f.test_id for f in TOKEN_TYPE_FIXTURES], +) +def test_token_type( + test_id: str, + input_text: str, + expected_token_type: str, + expected_value: str, +) -> None: + """Test individual token type detection.""" + tokens = get_tokens(input_text) + # Find the expected token (skip whitespace) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t and v.strip()] + assert len(non_ws_tokens) >= 1, f"No non-whitespace tokens found for '{input_text}'" + token_type, token_value = non_ws_tokens[0] + assert token_type == expected_token_type, ( + f"Expected {expected_token_type}, got {token_type}" + ) + assert token_value == expected_value + + +# --- Short option with value fixtures --- + + +class ShortOptionValueFixture(t.NamedTuple): + """Test fixture for short options with values.""" + + test_id: str + input_text: str + option: str + value: str + + +SHORT_OPTION_VALUE_FIXTURES: list[ShortOptionValueFixture] = [ + ShortOptionValueFixture( + test_id="lowercase_value", + input_text="-c config-path", + option="-c", + value="config-path", + ), + ShortOptionValueFixture( + test_id="uppercase_value", + input_text="-d DIRECTORY", + option="-d", + value="DIRECTORY", + ), + ShortOptionValueFixture( + test_id="simple_value", + input_text="-r name", + option="-r", + value="name", + ), +] + + +@pytest.mark.parametrize( + ShortOptionValueFixture._fields, + SHORT_OPTION_VALUE_FIXTURES, + ids=[f.test_id for f in SHORT_OPTION_VALUE_FIXTURES], +) +def test_short_option_with_value( + test_id: str, + input_text: str, + option: str, + value: str, +) -> None: + """Test short option followed by value tokenization.""" + tokens = get_tokens(input_text) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t] + + assert len(non_ws_tokens) >= 2 + assert non_ws_tokens[0] == ("Token.Name.Attribute", option) + # Value could be Name.Variable or Name.Constant depending on case + assert non_ws_tokens[1][1] == value + + +# --- Long option with value fixtures --- + + +class LongOptionValueFixture(t.NamedTuple): + """Test fixture for long options with = values.""" + + test_id: str + input_text: str + option: str + value: str + + +LONG_OPTION_VALUE_FIXTURES: list[LongOptionValueFixture] = [ + LongOptionValueFixture( + test_id="uppercase_value", + input_text="--config=FILE", + option="--config", + value="FILE", + ), + LongOptionValueFixture( + test_id="lowercase_value", + input_text="--output=path", + option="--output", + value="path", + ), +] + + +@pytest.mark.parametrize( + LongOptionValueFixture._fields, + LONG_OPTION_VALUE_FIXTURES, + ids=[f.test_id for f in LONG_OPTION_VALUE_FIXTURES], +) +def test_long_option_with_value( + test_id: str, + input_text: str, + option: str, + value: str, +) -> None: + """Test long option with = value tokenization.""" + tokens = get_tokens(input_text) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t] + + assert len(non_ws_tokens) >= 3 + assert non_ws_tokens[0] == ("Token.Name.Tag", option) + assert non_ws_tokens[1] == ("Token.Operator", "=") + assert non_ws_tokens[2][1] == value + + +# --- Full usage string fixtures --- + + +class UsageStringFixture(t.NamedTuple): + """Test fixture for full usage string tokenization.""" + + test_id: str + input_text: str + expected_contains: list[tuple[str, str]] + + +USAGE_STRING_FIXTURES: list[UsageStringFixture] = [ + UsageStringFixture( + test_id="simple_usage", + input_text="usage: cmd [-h]", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "cmd"), + ("Token.Punctuation", "["), + ("Token.Name.Attribute", "-h"), + ("Token.Punctuation", "]"), + ], + ), + UsageStringFixture( + test_id="mutually_exclusive", + input_text="[--json | --ndjson | --table]", + expected_contains=[ + ("Token.Name.Tag", "--json"), + ("Token.Operator", "|"), + ("Token.Name.Tag", "--ndjson"), + ("Token.Operator", "|"), + ("Token.Name.Tag", "--table"), + ], + ), + UsageStringFixture( + test_id="subcommand", + input_text="usage: vcspull sync", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "vcspull"), + ("Token.Name.Label", "sync"), + ], + ), + UsageStringFixture( + test_id="positional_args", + input_text="[repo-name] [path]", + expected_contains=[ + ("Token.Punctuation", "["), + ("Token.Name.Label", "repo-name"), + ("Token.Punctuation", "]"), + ("Token.Punctuation", "["), + ("Token.Name.Label", "path"), + ("Token.Punctuation", "]"), + ], + ), +] + + +@pytest.mark.parametrize( + UsageStringFixture._fields, + USAGE_STRING_FIXTURES, + ids=[f.test_id for f in USAGE_STRING_FIXTURES], +) +def test_usage_string( + test_id: str, + input_text: str, + expected_contains: list[tuple[str, str]], +) -> None: + """Test full usage string tokenization contains expected tokens.""" + tokens = get_tokens(input_text) + for expected_type, expected_value in expected_contains: + assert (expected_type, expected_value) in tokens, ( + f"Expected ({expected_type}, {expected_value!r}) not found in tokens" + ) + + +# --- Real vcspull usage output test --- + + +def test_vcspull_sync_usage() -> None: + """Test real vcspull sync usage output tokenization.""" + usage_text = """\ +usage: vcspull sync [-h] [-c CONFIG] [-d DIRECTORY] + [--json | --ndjson | --table] [--color {auto,always,never}] + [--no-progress] [--verbose] + [repo-name] [path]""" + + tokens = get_tokens(usage_text) + + # Check key elements are present + # Note: DIRECTORY after -d is Name.Variable (option value), not Name.Constant + expected = [ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "vcspull"), + ("Token.Name.Label", "sync"), + ("Token.Name.Attribute", "-h"), + ("Token.Name.Attribute", "-c"), + ("Token.Name.Variable", "CONFIG"), # Option value, not standalone metavar + ("Token.Name.Attribute", "-d"), + ("Token.Name.Variable", "DIRECTORY"), # Option value, not standalone metavar + ("Token.Name.Tag", "--json"), + ("Token.Name.Tag", "--ndjson"), + ("Token.Name.Tag", "--table"), + ("Token.Name.Tag", "--color"), + ("Token.Name.Tag", "--no-progress"), + ("Token.Name.Tag", "--verbose"), + ("Token.Name.Label", "repo-name"), + ("Token.Name.Label", "path"), + ] + + for expected_type, expected_value in expected: + assert (expected_type, expected_value) in tokens, ( + f"Expected ({expected_type}, {expected_value!r}) not in tokens" + ) + + +# --- tokenize_usage helper function test --- + + +def test_tokenize_usage_helper() -> None: + """Test the tokenize_usage helper function.""" + result = tokenize_usage("usage: cmd [-h]") + + assert result[0] == ("Token.Generic.Heading", "usage:") + assert ("Token.Name.Label", "cmd") in result + assert ("Token.Name.Attribute", "-h") in result diff --git a/tests/docs/_ext/test_pretty_argparse.py b/tests/docs/_ext/test_pretty_argparse.py new file mode 100644 index 000000000..1d186205c --- /dev/null +++ b/tests/docs/_ext/test_pretty_argparse.py @@ -0,0 +1,967 @@ +"""Tests for pretty_argparse sphinx extension.""" + +from __future__ import annotations + +import typing as t + +import pytest +from docutils import nodes +from pretty_argparse import ( # type: ignore[import-not-found] + _is_examples_section, + _is_usage_block, + _reorder_nodes, + escape_rst_emphasis, + is_base_examples_term, + is_examples_term, + make_section_id, + make_section_title, + strip_ansi, + transform_definition_list, +) + +# --- strip_ansi tests --- + + +class StripAnsiFixture(t.NamedTuple): + """Test fixture for strip_ansi function.""" + + test_id: str + input_text: str + expected: str + + +STRIP_ANSI_FIXTURES: list[StripAnsiFixture] = [ + StripAnsiFixture( + test_id="plain_text", + input_text="hello", + expected="hello", + ), + StripAnsiFixture( + test_id="green_color", + input_text="\033[32mgreen\033[0m", + expected="green", + ), + StripAnsiFixture( + test_id="bold_blue", + input_text="\033[1;34mbold\033[0m", + expected="bold", + ), + StripAnsiFixture( + test_id="multiple_codes", + input_text="\033[1m\033[32mtest\033[0m", + expected="test", + ), + StripAnsiFixture( + test_id="empty_string", + input_text="", + expected="", + ), + StripAnsiFixture( + test_id="mixed_content", + input_text="pre\033[31mred\033[0mpost", + expected="preredpost", + ), + StripAnsiFixture( + test_id="reset_only", + input_text="\033[0m", + expected="", + ), + StripAnsiFixture( + test_id="sgr_params", + input_text="\033[38;5;196mred256\033[0m", + expected="red256", + ), +] + + +@pytest.mark.parametrize( + StripAnsiFixture._fields, + STRIP_ANSI_FIXTURES, + ids=[f.test_id for f in STRIP_ANSI_FIXTURES], +) +def test_strip_ansi(test_id: str, input_text: str, expected: str) -> None: + """Test ANSI escape code stripping.""" + assert strip_ansi(input_text) == expected + + +# --- escape_rst_emphasis tests --- + + +class EscapeRstEmphasisFixture(t.NamedTuple): + """Test fixture for escape_rst_emphasis function.""" + + test_id: str + input_text: str + expected: str + + +ESCAPE_RST_EMPHASIS_FIXTURES: list[EscapeRstEmphasisFixture] = [ + EscapeRstEmphasisFixture( + test_id="plain_text_unchanged", + input_text="plain text", + expected="plain text", + ), + EscapeRstEmphasisFixture( + test_id="glob_pattern_escaped", + input_text='vcspull list "django-*"', + expected='vcspull list "django-\\*"', + ), + EscapeRstEmphasisFixture( + test_id="multiple_glob_patterns", + input_text='vcspull sync "flask-*" "django-*"', + expected='vcspull sync "flask-\\*" "django-\\*"', + ), + EscapeRstEmphasisFixture( + test_id="asterisk_at_end", + input_text="pattern-*", + expected="pattern-\\*", + ), + EscapeRstEmphasisFixture( + test_id="already_escaped_unchanged", + input_text="already-\\* escaped", + expected="already-\\* escaped", + ), + EscapeRstEmphasisFixture( + test_id="valid_emphasis_unchanged", + input_text="*emphasis* is ok", + expected="*emphasis* is ok", + ), + EscapeRstEmphasisFixture( + test_id="strong_emphasis_unchanged", + input_text="**strong** text", + expected="**strong** text", + ), + EscapeRstEmphasisFixture( + test_id="space_before_asterisk_unchanged", + input_text="space * asterisk", + expected="space * asterisk", + ), + EscapeRstEmphasisFixture( + test_id="asterisk_after_dot_unchanged", + input_text="regex.*pattern", + expected="regex.*pattern", + ), + EscapeRstEmphasisFixture( + test_id="single_asterisk_unchanged", + input_text="vcspull sync '*'", + expected="vcspull sync '*'", + ), + EscapeRstEmphasisFixture( + test_id="empty_string", + input_text="", + expected="", + ), + EscapeRstEmphasisFixture( + test_id="underscore_asterisk_unchanged", + input_text="name_*pattern", + expected="name_*pattern", + ), + EscapeRstEmphasisFixture( + test_id="dash_asterisk_with_following_char", + input_text="repo-*-suffix", + expected="repo-\\*-suffix", + ), +] + + +@pytest.mark.parametrize( + EscapeRstEmphasisFixture._fields, + ESCAPE_RST_EMPHASIS_FIXTURES, + ids=[f.test_id for f in ESCAPE_RST_EMPHASIS_FIXTURES], +) +def test_escape_rst_emphasis(test_id: str, input_text: str, expected: str) -> None: + """Test RST emphasis escaping for argparse patterns.""" + assert escape_rst_emphasis(input_text) == expected + + +# --- is_examples_term tests --- + + +class IsExamplesTermFixture(t.NamedTuple): + """Test fixture for is_examples_term function.""" + + test_id: str + term_text: str + expected: bool + + +IS_EXAMPLES_TERM_FIXTURES: list[IsExamplesTermFixture] = [ + IsExamplesTermFixture( + test_id="base_examples_colon", + term_text="examples:", + expected=True, + ), + IsExamplesTermFixture( + test_id="base_examples_no_colon", + term_text="examples", + expected=True, + ), + IsExamplesTermFixture( + test_id="prefixed_machine_readable", + term_text="Machine-readable output examples:", + expected=True, + ), + IsExamplesTermFixture( + test_id="prefixed_field_scoped", + term_text="Field-scoped search examples:", + expected=True, + ), + IsExamplesTermFixture( + test_id="colon_pattern", + term_text="Machine-readable output: examples:", + expected=True, + ), + IsExamplesTermFixture( + test_id="usage_not_examples", + term_text="Usage:", + expected=False, + ), + IsExamplesTermFixture( + test_id="arguments_not_examples", + term_text="Named Arguments:", + expected=False, + ), + IsExamplesTermFixture( + test_id="case_insensitive_upper", + term_text="EXAMPLES:", + expected=True, + ), + IsExamplesTermFixture( + test_id="case_insensitive_mixed", + term_text="Examples:", + expected=True, + ), +] + + +@pytest.mark.parametrize( + IsExamplesTermFixture._fields, + IS_EXAMPLES_TERM_FIXTURES, + ids=[f.test_id for f in IS_EXAMPLES_TERM_FIXTURES], +) +def test_is_examples_term(test_id: str, term_text: str, expected: bool) -> None: + """Test examples term detection.""" + assert is_examples_term(term_text) == expected + + +# --- is_base_examples_term tests --- + + +class IsBaseExamplesTermFixture(t.NamedTuple): + """Test fixture for is_base_examples_term function.""" + + test_id: str + term_text: str + expected: bool + + +IS_BASE_EXAMPLES_TERM_FIXTURES: list[IsBaseExamplesTermFixture] = [ + IsBaseExamplesTermFixture( + test_id="base_with_colon", + term_text="examples:", + expected=True, + ), + IsBaseExamplesTermFixture( + test_id="base_no_colon", + term_text="examples", + expected=True, + ), + IsBaseExamplesTermFixture( + test_id="uppercase", + term_text="EXAMPLES", + expected=True, + ), + IsBaseExamplesTermFixture( + test_id="mixed_case", + term_text="Examples:", + expected=True, + ), + IsBaseExamplesTermFixture( + test_id="prefixed_not_base", + term_text="Field-scoped examples:", + expected=False, + ), + IsBaseExamplesTermFixture( + test_id="output_examples_not_base", + term_text="Machine-readable output examples:", + expected=False, + ), + IsBaseExamplesTermFixture( + test_id="colon_pattern_not_base", + term_text="Output: examples:", + expected=False, + ), +] + + +@pytest.mark.parametrize( + IsBaseExamplesTermFixture._fields, + IS_BASE_EXAMPLES_TERM_FIXTURES, + ids=[f.test_id for f in IS_BASE_EXAMPLES_TERM_FIXTURES], +) +def test_is_base_examples_term(test_id: str, term_text: str, expected: bool) -> None: + """Test base examples term detection.""" + assert is_base_examples_term(term_text) == expected + + +# --- make_section_id tests --- + + +class MakeSectionIdFixture(t.NamedTuple): + """Test fixture for make_section_id function.""" + + test_id: str + term_text: str + counter: int + is_subsection: bool + expected: str + + +MAKE_SECTION_ID_FIXTURES: list[MakeSectionIdFixture] = [ + MakeSectionIdFixture( + test_id="base_examples", + term_text="examples:", + counter=0, + is_subsection=False, + expected="examples", + ), + MakeSectionIdFixture( + test_id="prefixed_standard", + term_text="Machine-readable output examples:", + counter=0, + is_subsection=False, + expected="machine-readable-output-examples", + ), + MakeSectionIdFixture( + test_id="subsection_omits_suffix", + term_text="Field-scoped examples:", + counter=0, + is_subsection=True, + expected="field-scoped", + ), + MakeSectionIdFixture( + test_id="with_counter", + term_text="examples:", + counter=2, + is_subsection=False, + expected="examples-2", + ), + MakeSectionIdFixture( + test_id="counter_zero_no_suffix", + term_text="examples:", + counter=0, + is_subsection=False, + expected="examples", + ), + MakeSectionIdFixture( + test_id="colon_pattern", + term_text="Machine-readable output: examples:", + counter=0, + is_subsection=False, + expected="machine-readable-output-examples", + ), + MakeSectionIdFixture( + test_id="subsection_with_counter", + term_text="Field-scoped examples:", + counter=1, + is_subsection=True, + expected="field-scoped-1", + ), +] + + +@pytest.mark.parametrize( + MakeSectionIdFixture._fields, + MAKE_SECTION_ID_FIXTURES, + ids=[f.test_id for f in MAKE_SECTION_ID_FIXTURES], +) +def test_make_section_id( + test_id: str, + term_text: str, + counter: int, + is_subsection: bool, + expected: str, +) -> None: + """Test section ID generation.""" + assert make_section_id(term_text, counter, is_subsection=is_subsection) == expected + + +def test_make_section_id_with_page_prefix() -> None: + """Test section ID generation with page_prefix for cross-page uniqueness.""" + # Base "examples:" with page_prefix becomes "sync-examples" + assert make_section_id("examples:", page_prefix="sync") == "sync-examples" + assert make_section_id("examples:", page_prefix="add") == "add-examples" + + # Prefixed examples already unique - page_prefix not added + assert ( + make_section_id("Machine-readable output examples:", page_prefix="sync") + == "machine-readable-output-examples" + ) + + # Subsection with page_prefix + result = make_section_id( + "Field-scoped examples:", is_subsection=True, page_prefix="sync" + ) + assert result == "field-scoped" + + # Empty page_prefix behaves like before + assert make_section_id("examples:", page_prefix="") == "examples" + + +# --- make_section_title tests --- + + +class MakeSectionTitleFixture(t.NamedTuple): + """Test fixture for make_section_title function.""" + + test_id: str + term_text: str + is_subsection: bool + expected: str + + +MAKE_SECTION_TITLE_FIXTURES: list[MakeSectionTitleFixture] = [ + MakeSectionTitleFixture( + test_id="base_examples", + term_text="examples:", + is_subsection=False, + expected="Examples", + ), + MakeSectionTitleFixture( + test_id="prefixed_with_examples_suffix", + term_text="Machine-readable output examples:", + is_subsection=False, + expected="Machine-Readable Output Examples", + ), + MakeSectionTitleFixture( + test_id="subsection_omits_examples", + term_text="Field-scoped examples:", + is_subsection=True, + expected="Field-Scoped", + ), + MakeSectionTitleFixture( + test_id="colon_pattern", + term_text="Machine-readable output: examples:", + is_subsection=False, + expected="Machine-Readable Output Examples", + ), + MakeSectionTitleFixture( + test_id="subsection_colon_pattern", + term_text="Machine-readable output: examples:", + is_subsection=True, + expected="Machine-Readable Output", + ), + MakeSectionTitleFixture( + test_id="base_examples_no_colon", + term_text="examples", + is_subsection=False, + expected="Examples", + ), +] + + +@pytest.mark.parametrize( + MakeSectionTitleFixture._fields, + MAKE_SECTION_TITLE_FIXTURES, + ids=[f.test_id for f in MAKE_SECTION_TITLE_FIXTURES], +) +def test_make_section_title( + test_id: str, + term_text: str, + is_subsection: bool, + expected: str, +) -> None: + """Test section title generation.""" + assert make_section_title(term_text, is_subsection=is_subsection) == expected + + +# --- transform_definition_list integration tests --- + + +def _make_dl_item(term: str, definition: str) -> nodes.definition_list_item: + """Create a definition list item for testing. + + Parameters + ---------- + term : str + The definition term text. + definition : str + The definition content text. + + Returns + ------- + nodes.definition_list_item + A definition list item with term and definition. + """ + item = nodes.definition_list_item() + term_node = nodes.term(text=term) + def_node = nodes.definition() + def_node += nodes.paragraph(text=definition) + item += term_node + item += def_node + return item + + +def test_transform_definition_list_single_examples() -> None: + """Single examples section creates one section node.""" + dl = nodes.definition_list() + dl += _make_dl_item("examples:", "vcspull ls") + + result = transform_definition_list(dl) + + assert len(result) == 1 + assert isinstance(result[0], nodes.section) + assert result[0]["ids"] == ["examples"] + + +def test_transform_definition_list_nested_examples() -> None: + """Base examples with category creates nested sections.""" + dl = nodes.definition_list() + dl += _make_dl_item("examples:", "vcspull ls") + dl += _make_dl_item("Machine-readable output examples:", "vcspull ls --json") + + result = transform_definition_list(dl) + + # Should have single parent section containing nested subsection + assert len(result) == 1 + parent = result[0] + assert isinstance(parent, nodes.section) + assert parent["ids"] == ["examples"] + + # Find nested subsection + subsections = [c for c in parent.children if isinstance(c, nodes.section)] + assert len(subsections) == 1 + assert subsections[0]["ids"] == ["machine-readable-output"] + + +def test_transform_definition_list_multiple_categories() -> None: + """Multiple example categories all nest under parent.""" + dl = nodes.definition_list() + dl += _make_dl_item("examples:", "vcspull ls") + dl += _make_dl_item("Field-scoped examples:", "vcspull ls --field name") + dl += _make_dl_item("Machine-readable output examples:", "vcspull ls --json") + + result = transform_definition_list(dl) + + assert len(result) == 1 + parent = result[0] + assert isinstance(parent, nodes.section) + + subsections = [c for c in parent.children if isinstance(c, nodes.section)] + assert len(subsections) == 2 + + +def test_transform_definition_list_preserves_non_examples() -> None: + """Non-example items preserved as definition list.""" + dl = nodes.definition_list() + dl += _make_dl_item("Usage:", "How to use this command") + dl += _make_dl_item("examples:", "vcspull ls") + + result = transform_definition_list(dl) + + # Should have both definition list (non-examples) and section (examples) + has_dl = any(isinstance(n, nodes.definition_list) for n in result) + has_section = any(isinstance(n, nodes.section) for n in result) + assert has_dl, "Non-example items should be preserved as definition list" + assert has_section, "Example items should become sections" + + +def test_transform_definition_list_no_examples() -> None: + """Definition list without examples returns empty list.""" + dl = nodes.definition_list() + dl += _make_dl_item("Usage:", "How to use") + dl += _make_dl_item("Options:", "Available options") + + result = transform_definition_list(dl) + + # All items are non-examples, should return definition list + assert len(result) == 1 + assert isinstance(result[0], nodes.definition_list) + + +def test_transform_definition_list_only_category_no_base() -> None: + """Single category example without base examples stays flat.""" + dl = nodes.definition_list() + dl += _make_dl_item("Machine-readable output examples:", "vcspull ls --json") + + result = transform_definition_list(dl) + + # Without base "examples:", no nesting - just single section + assert len(result) == 1 + assert isinstance(result[0], nodes.section) + # Should have full title since it's not nested + assert result[0]["ids"] == ["machine-readable-output-examples"] + + +def test_transform_definition_list_code_blocks_created() -> None: + """Each command line becomes a separate code block.""" + dl = nodes.definition_list() + dl += _make_dl_item("examples:", "cmd1\ncmd2\ncmd3") + + result = transform_definition_list(dl) + + section = result[0] + code_blocks = [c for c in section.children if isinstance(c, nodes.literal_block)] + assert len(code_blocks) == 3 + assert code_blocks[0].astext() == "$ cmd1" + assert code_blocks[1].astext() == "$ cmd2" + assert code_blocks[2].astext() == "$ cmd3" + + +# --- _is_usage_block tests --- + + +class IsUsageBlockFixture(t.NamedTuple): + """Test fixture for _is_usage_block function.""" + + test_id: str + node_type: str + node_text: str + expected: bool + + +IS_USAGE_BLOCK_FIXTURES: list[IsUsageBlockFixture] = [ + IsUsageBlockFixture( + test_id="literal_block_usage_lowercase", + node_type="literal_block", + node_text="usage: cmd [-h]", + expected=True, + ), + IsUsageBlockFixture( + test_id="literal_block_usage_uppercase", + node_type="literal_block", + node_text="Usage: vcspull sync", + expected=True, + ), + IsUsageBlockFixture( + test_id="literal_block_usage_leading_space", + node_type="literal_block", + node_text=" usage: cmd", + expected=True, + ), + IsUsageBlockFixture( + test_id="literal_block_not_usage", + node_type="literal_block", + node_text="some other text", + expected=False, + ), + IsUsageBlockFixture( + test_id="literal_block_usage_in_middle", + node_type="literal_block", + node_text="see usage: for more", + expected=False, + ), + IsUsageBlockFixture( + test_id="paragraph_with_usage", + node_type="paragraph", + node_text="usage: cmd", + expected=False, + ), + IsUsageBlockFixture( + test_id="section_node", + node_type="section", + node_text="", + expected=False, + ), +] + + +def _make_test_node(node_type: str, node_text: str) -> nodes.Node: + """Create a test node of the specified type. + + Parameters + ---------- + node_type : str + Type of node to create ("literal_block", "paragraph", "section"). + node_text : str + Text content for the node. + + Returns + ------- + nodes.Node + The created node. + """ + if node_type == "literal_block": + return nodes.literal_block(text=node_text) + if node_type == "paragraph": + return nodes.paragraph(text=node_text) + if node_type == "section": + return nodes.section() + msg = f"Unknown node type: {node_type}" + raise ValueError(msg) + + +@pytest.mark.parametrize( + IsUsageBlockFixture._fields, + IS_USAGE_BLOCK_FIXTURES, + ids=[f.test_id for f in IS_USAGE_BLOCK_FIXTURES], +) +def test_is_usage_block( + test_id: str, + node_type: str, + node_text: str, + expected: bool, +) -> None: + """Test usage block detection.""" + node = _make_test_node(node_type, node_text) + assert _is_usage_block(node) == expected + + +# --- _is_examples_section tests --- + + +class IsExamplesSectionFixture(t.NamedTuple): + """Test fixture for _is_examples_section function.""" + + test_id: str + node_type: str + section_ids: list[str] + expected: bool + + +IS_EXAMPLES_SECTION_FIXTURES: list[IsExamplesSectionFixture] = [ + IsExamplesSectionFixture( + test_id="section_with_examples_id", + node_type="section", + section_ids=["examples"], + expected=True, + ), + IsExamplesSectionFixture( + test_id="section_with_prefixed_examples", + node_type="section", + section_ids=["machine-readable-output-examples"], + expected=True, + ), + IsExamplesSectionFixture( + test_id="section_with_uppercase_examples", + node_type="section", + section_ids=["EXAMPLES"], + expected=True, + ), + IsExamplesSectionFixture( + test_id="section_without_examples", + node_type="section", + section_ids=["positional-arguments"], + expected=False, + ), + IsExamplesSectionFixture( + test_id="section_with_multiple_ids", + node_type="section", + section_ids=["main-id", "examples-alias"], + expected=True, + ), + IsExamplesSectionFixture( + test_id="section_empty_ids", + node_type="section", + section_ids=[], + expected=False, + ), + IsExamplesSectionFixture( + test_id="paragraph_node", + node_type="paragraph", + section_ids=[], + expected=False, + ), + IsExamplesSectionFixture( + test_id="literal_block_node", + node_type="literal_block", + section_ids=[], + expected=False, + ), +] + + +def _make_section_node(node_type: str, section_ids: list[str]) -> nodes.Node: + """Create a test node with optional section IDs. + + Parameters + ---------- + node_type : str + Type of node to create. + section_ids : list[str] + IDs to assign if creating a section. + + Returns + ------- + nodes.Node + The created node. + """ + if node_type == "section": + section = nodes.section() + section["ids"] = section_ids + return section + if node_type == "paragraph": + return nodes.paragraph() + if node_type == "literal_block": + return nodes.literal_block(text="examples") + msg = f"Unknown node type: {node_type}" + raise ValueError(msg) + + +@pytest.mark.parametrize( + IsExamplesSectionFixture._fields, + IS_EXAMPLES_SECTION_FIXTURES, + ids=[f.test_id for f in IS_EXAMPLES_SECTION_FIXTURES], +) +def test_is_examples_section( + test_id: str, + node_type: str, + section_ids: list[str], + expected: bool, +) -> None: + """Test examples section detection.""" + node = _make_section_node(node_type, section_ids) + assert _is_examples_section(node) == expected + + +# --- _reorder_nodes tests --- + + +def _make_usage_node(text: str = "usage: cmd [-h]") -> nodes.literal_block: + """Create a usage block node. + + Parameters + ---------- + text : str + Text content for the usage block. + + Returns + ------- + nodes.literal_block + A literal block node with usage text. + """ + return nodes.literal_block(text=text) + + +def _make_examples_section(section_id: str = "examples") -> nodes.section: + """Create an examples section node. + + Parameters + ---------- + section_id : str + The ID for the section. + + Returns + ------- + nodes.section + A section node with the specified ID. + """ + section = nodes.section() + section["ids"] = [section_id] + return section + + +def test_reorder_nodes_usage_after_examples() -> None: + """Usage block after examples gets moved before examples.""" + desc = nodes.paragraph(text="Description") + examples = _make_examples_section() + usage = _make_usage_node() + + # Create a non-examples section + args_section = nodes.section() + args_section["ids"] = ["arguments"] + + result = _reorder_nodes([desc, examples, usage, args_section]) + + # Should be: desc, usage, examples, args + assert len(result) == 4 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.section) + assert result[2]["ids"] == ["examples"] + assert isinstance(result[3], nodes.section) + assert result[3]["ids"] == ["arguments"] + + +def test_reorder_nodes_no_examples() -> None: + """Without examples, original order is preserved.""" + desc = nodes.paragraph(text="Description") + usage = _make_usage_node() + args = nodes.section() + args["ids"] = ["arguments"] + + result = _reorder_nodes([desc, usage, args]) + + # Order unchanged: desc, usage, args + assert len(result) == 3 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.section) + + +def test_reorder_nodes_usage_already_before_examples() -> None: + """When usage is already before examples, order is preserved.""" + desc = nodes.paragraph(text="Description") + usage = _make_usage_node() + examples = _make_examples_section() + args = nodes.section() + args["ids"] = ["arguments"] + + result = _reorder_nodes([desc, usage, examples, args]) + + # Order should be: desc, usage, examples, args + assert len(result) == 4 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.section) + assert result[2]["ids"] == ["examples"] + + +def test_reorder_nodes_empty_list() -> None: + """Empty input returns empty output.""" + result = _reorder_nodes([]) + assert result == [] + + +def test_reorder_nodes_multiple_usage_blocks() -> None: + """Multiple usage blocks are all moved before examples.""" + desc = nodes.paragraph(text="Description") + examples = _make_examples_section() + usage1 = _make_usage_node("usage: cmd1 [-h]") + usage2 = _make_usage_node("usage: cmd2 [-v]") + + result = _reorder_nodes([desc, examples, usage1, usage2]) + + # Should be: desc, usage1, usage2, examples + assert len(result) == 4 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.literal_block) + assert isinstance(result[3], nodes.section) + + +def test_reorder_nodes_multiple_examples_sections() -> None: + """Multiple examples sections are grouped together.""" + desc = nodes.paragraph(text="Description") + examples1 = _make_examples_section("examples") + usage = _make_usage_node() + examples2 = _make_examples_section("machine-readable-output-examples") + args = nodes.section() + args["ids"] = ["arguments"] + + result = _reorder_nodes([desc, examples1, usage, examples2, args]) + + # Should be: desc, usage, examples1, examples2, args + assert len(result) == 5 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert result[2]["ids"] == ["examples"] + assert result[3]["ids"] == ["machine-readable-output-examples"] + assert result[4]["ids"] == ["arguments"] + + +def test_reorder_nodes_preserves_non_examples_after() -> None: + """Non-examples nodes after examples stay at the end.""" + desc = nodes.paragraph(text="Description") + examples = _make_examples_section() + usage = _make_usage_node() + epilog = nodes.paragraph(text="Epilog") + + result = _reorder_nodes([desc, examples, usage, epilog]) + + # Should be: desc, usage, examples, epilog + assert len(result) == 4 + assert result[0].astext() == "Description" + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.section) + assert result[3].astext() == "Epilog" diff --git a/tests/docs/_ext/test_vcspull_console_lexer.py b/tests/docs/_ext/test_vcspull_console_lexer.py new file mode 100644 index 000000000..8ec101ebe --- /dev/null +++ b/tests/docs/_ext/test_vcspull_console_lexer.py @@ -0,0 +1,158 @@ +"""Tests for vcspull_console_lexer Pygments extension.""" + +from __future__ import annotations + +import typing as t + +import pytest +from pygments.token import Token +from vcspull_console_lexer import ( # type: ignore[import-not-found] + VcspullConsoleLexer, +) + +# --- Console session tests --- + + +class ConsoleSessionFixture(t.NamedTuple): + """Test fixture for console session patterns.""" + + test_id: str + input_text: str + expected_tokens: list[tuple[t.Any, str]] + + +CONSOLE_SESSION_FIXTURES: list[ConsoleSessionFixture] = [ + ConsoleSessionFixture( + test_id="command_with_list_output", + input_text="$ vcspull list\n• flask → ~/code/flask\n", + expected_tokens=[ + (Token.Generic.Prompt, "$ "), + (Token.Text, "vcspull"), # BashLexer tokenizes as Text + (Token.Comment, "•"), + (Token.Name.Function, "flask"), + (Token.Comment, "→"), + (Token.Name.Variable, "~/code/flask"), + ], + ), + ConsoleSessionFixture( + test_id="command_with_status_output", + input_text="$ vcspull status\n✓ flask: up to date\n", + expected_tokens=[ + (Token.Generic.Prompt, "$ "), + (Token.Text, "vcspull"), # BashLexer tokenizes as Text + (Token.Generic.Inserted, "✓"), + (Token.Name.Function, "flask"), + (Token.Punctuation, ":"), + (Token.Generic.Inserted, "up to date"), + ], + ), + ConsoleSessionFixture( + test_id="command_with_sync_output", + input_text="$ vcspull sync\n+ new-repo ~/code/new-repo\n", + expected_tokens=[ + (Token.Generic.Prompt, "$ "), + (Token.Text, "vcspull"), # BashLexer tokenizes as Text + (Token.Generic.Inserted, "+"), + (Token.Name.Function, "new-repo"), + (Token.Name.Variable, "~/code/new-repo"), + ], + ), + ConsoleSessionFixture( + test_id="tree_view_with_workspace_header", + input_text="$ vcspull list --tree\n~/code/\n • flask → ~/code/flask\n", + expected_tokens=[ + (Token.Generic.Prompt, "$ "), + (Token.Text, "vcspull"), # BashLexer tokenizes as Text + (Token.Generic.Subheading, "~/code/"), + (Token.Comment, "•"), + (Token.Name.Function, "flask"), + (Token.Comment, "→"), + (Token.Name.Variable, "~/code/flask"), + ], + ), +] + + +@pytest.mark.parametrize( + ConsoleSessionFixture._fields, + CONSOLE_SESSION_FIXTURES, + ids=[f.test_id for f in CONSOLE_SESSION_FIXTURES], +) +def test_console_session( + test_id: str, + input_text: str, + expected_tokens: list[tuple[t.Any, str]], +) -> None: + """Test console session tokenization.""" + lexer = VcspullConsoleLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(input_text) if v.strip()] + for expected_token, expected_value in expected_tokens: + assert (expected_token, expected_value) in tokens, ( + f"Expected ({expected_token}, {expected_value!r}) not found in tokens" + ) + + +# --- Prompt handling tests --- + + +def test_prompt_detection() -> None: + """Test that shell prompts are detected and tokenized.""" + lexer = VcspullConsoleLexer() + text = "$ vcspull list\n• flask → ~/code/flask\n" + tokens = list(lexer.get_tokens(text)) + + # Check that prompt is detected + prompt_tokens = [(t, v) for t, v in tokens if t == Token.Generic.Prompt] + assert len(prompt_tokens) == 1 + assert prompt_tokens[0][1] == "$ " + + +def test_multiline_output() -> None: + """Test multiline vcspull output tokenization.""" + text = """$ vcspull list --tree +~/work/python/ + • flask → ~/work/python/flask + • requests → ~/work/python/requests +""" + lexer = VcspullConsoleLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(text) if v.strip()] + + # Check key tokens + assert (Token.Generic.Prompt, "$ ") in tokens + assert (Token.Generic.Subheading, "~/work/python/") in tokens + assert (Token.Name.Function, "flask") in tokens + assert (Token.Name.Function, "requests") in tokens + + +def test_warning_and_error_output() -> None: + """Test warning and error symbols in output.""" + text = """$ vcspull status +✓ good-repo: up to date +⚠ dirty-repo: dirty +✗ missing-repo: missing +""" + lexer = VcspullConsoleLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(text) if v.strip()] + + # Check success + assert (Token.Generic.Inserted, "✓") in tokens + assert (Token.Generic.Inserted, "up to date") in tokens + + # Check warning + assert (Token.Name.Exception, "⚠") in tokens + assert (Token.Name.Exception, "dirty") in tokens + + # Check error + assert (Token.Generic.Error, "✗") in tokens + assert (Token.Generic.Error, "missing") in tokens + + +def test_command_only_no_output() -> None: + """Test command without output.""" + text = "$ vcspull list django flask\n" + lexer = VcspullConsoleLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(text) if v.strip()] + + # Should have prompt and command tokens + assert (Token.Generic.Prompt, "$ ") in tokens + assert (Token.Text, "vcspull") in tokens # BashLexer tokenizes as Text diff --git a/tests/docs/_ext/test_vcspull_output_lexer.py b/tests/docs/_ext/test_vcspull_output_lexer.py new file mode 100644 index 000000000..1b4f159d9 --- /dev/null +++ b/tests/docs/_ext/test_vcspull_output_lexer.py @@ -0,0 +1,400 @@ +"""Tests for vcspull_output_lexer Pygments extension.""" + +from __future__ import annotations + +import typing as t + +import pytest +from pygments.token import Token +from vcspull_output_lexer import ( # type: ignore[import-not-found] + VcspullOutputLexer, + tokenize_output, +) + +# --- List output tests --- + + +class ListOutputFixture(t.NamedTuple): + """Test fixture for list output patterns.""" + + test_id: str + input_text: str + expected_tokens: list[tuple[t.Any, str]] + + +LIST_OUTPUT_FIXTURES: list[ListOutputFixture] = [ + ListOutputFixture( + test_id="basic_list_item", + input_text="• flask → ~/code/flask", + expected_tokens=[ + (Token.Comment, "•"), + (Token.Name.Function, "flask"), + (Token.Comment, "→"), + (Token.Name.Variable, "~/code/flask"), + ], + ), + ListOutputFixture( + test_id="path_with_plus", + input_text="• GeographicLib → ~/study/c++/GeographicLib", + expected_tokens=[ + (Token.Comment, "•"), + (Token.Name.Function, "GeographicLib"), + (Token.Comment, "→"), + (Token.Name.Variable, "~/study/c++/GeographicLib"), + ], + ), + ListOutputFixture( + test_id="repo_with_dots", + input_text="• pytest-django → ~/code/pytest-django", + expected_tokens=[ + (Token.Comment, "•"), + (Token.Name.Function, "pytest-django"), + (Token.Comment, "→"), + (Token.Name.Variable, "~/code/pytest-django"), + ], + ), +] + + +@pytest.mark.parametrize( + ListOutputFixture._fields, + LIST_OUTPUT_FIXTURES, + ids=[f.test_id for f in LIST_OUTPUT_FIXTURES], +) +def test_list_output( + test_id: str, + input_text: str, + expected_tokens: list[tuple[t.Any, str]], +) -> None: + """Test list command output tokenization.""" + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(input_text) if v.strip()] + assert tokens == expected_tokens + + +# --- Status output tests --- + + +class StatusOutputFixture(t.NamedTuple): + """Test fixture for status output patterns.""" + + test_id: str + input_text: str + expected_tokens: list[tuple[t.Any, str]] + + +STATUS_OUTPUT_FIXTURES: list[StatusOutputFixture] = [ + StatusOutputFixture( + test_id="success_up_to_date", + input_text="✓ flask: up to date", + expected_tokens=[ + (Token.Generic.Inserted, "✓"), + (Token.Name.Function, "flask"), + (Token.Punctuation, ":"), + (Token.Generic.Inserted, "up to date"), + ], + ), + StatusOutputFixture( + test_id="error_missing", + input_text="✗ missing-repo: missing", + expected_tokens=[ + (Token.Generic.Error, "✗"), + (Token.Name.Function, "missing-repo"), + (Token.Punctuation, ":"), + (Token.Generic.Error, "missing"), + ], + ), + StatusOutputFixture( + test_id="warning_dirty", + input_text="⚠ dirty-repo: dirty", + expected_tokens=[ + (Token.Name.Exception, "⚠"), + (Token.Name.Function, "dirty-repo"), + (Token.Punctuation, ":"), + (Token.Name.Exception, "dirty"), + ], + ), + StatusOutputFixture( + test_id="warning_behind", + input_text="⚠ behind-repo: behind by 5", + expected_tokens=[ + (Token.Name.Exception, "⚠"), + (Token.Name.Function, "behind-repo"), + (Token.Punctuation, ":"), + (Token.Name.Exception, "behind by 5"), + ], + ), +] + + +@pytest.mark.parametrize( + StatusOutputFixture._fields, + STATUS_OUTPUT_FIXTURES, + ids=[f.test_id for f in STATUS_OUTPUT_FIXTURES], +) +def test_status_output( + test_id: str, + input_text: str, + expected_tokens: list[tuple[t.Any, str]], +) -> None: + """Test status command output tokenization.""" + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(input_text) if v.strip()] + assert tokens == expected_tokens + + +# --- Sync output tests --- + + +class SyncOutputFixture(t.NamedTuple): + """Test fixture for sync output patterns.""" + + test_id: str + input_text: str + expected_tokens: list[tuple[t.Any, str]] + + +SYNC_OUTPUT_FIXTURES: list[SyncOutputFixture] = [ + SyncOutputFixture( + test_id="clone_with_url", + input_text="+ new-repo ~/code/new-repo git+https://github.com/user/repo", + expected_tokens=[ + (Token.Generic.Inserted, "+"), + (Token.Name.Function, "new-repo"), + (Token.Name.Variable, "~/code/new-repo"), + (Token.Name.Tag, "git+https://github.com/user/repo"), + ], + ), + SyncOutputFixture( + test_id="update_repo", + input_text="~ old-repo ~/code/old-repo", + expected_tokens=[ + (Token.Name.Exception, "~"), + (Token.Name.Function, "old-repo"), + (Token.Name.Variable, "~/code/old-repo"), + ], + ), + SyncOutputFixture( + test_id="unchanged_repo", + input_text="✓ stable ~/code/stable", + expected_tokens=[ + (Token.Generic.Inserted, "✓"), + (Token.Name.Function, "stable"), + (Token.Name.Variable, "~/code/stable"), + ], + ), +] + + +@pytest.mark.parametrize( + SyncOutputFixture._fields, + SYNC_OUTPUT_FIXTURES, + ids=[f.test_id for f in SYNC_OUTPUT_FIXTURES], +) +def test_sync_output( + test_id: str, + input_text: str, + expected_tokens: list[tuple[t.Any, str]], +) -> None: + """Test sync command output tokenization.""" + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(input_text) if v.strip()] + assert tokens == expected_tokens + + +# --- Summary output tests --- + + +class SummaryOutputFixture(t.NamedTuple): + """Test fixture for summary output patterns.""" + + test_id: str + input_text: str + expected_tokens: list[tuple[t.Any, str]] + + +SUMMARY_OUTPUT_FIXTURES: list[SummaryOutputFixture] = [ + SummaryOutputFixture( + test_id="basic_summary", + input_text="Summary: 10 repositories, 8 exist, 2 missing", + expected_tokens=[ + (Token.Generic.Heading, "Summary:"), + (Token.Literal.Number.Integer, "10"), + (Token.Name.Label, "repositories"), + (Token.Punctuation, ","), + (Token.Literal.Number.Integer, "8"), + (Token.Name.Label, "exist"), + (Token.Punctuation, ","), + (Token.Literal.Number.Integer, "2"), + (Token.Name.Label, "missing"), + ], + ), +] + + +@pytest.mark.parametrize( + SummaryOutputFixture._fields, + SUMMARY_OUTPUT_FIXTURES, + ids=[f.test_id for f in SUMMARY_OUTPUT_FIXTURES], +) +def test_summary_output( + test_id: str, + input_text: str, + expected_tokens: list[tuple[t.Any, str]], +) -> None: + """Test summary line tokenization.""" + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(input_text) if v.strip()] + assert tokens == expected_tokens + + +# --- Workspace header tests --- + + +class WorkspaceHeaderFixture(t.NamedTuple): + """Test fixture for workspace header patterns.""" + + test_id: str + input_text: str + expected_tokens: list[tuple[t.Any, str]] + + +WORKSPACE_HEADER_FIXTURES: list[WorkspaceHeaderFixture] = [ + WorkspaceHeaderFixture( + test_id="home_relative_path", + input_text="~/work/python/", + expected_tokens=[ + (Token.Generic.Subheading, "~/work/python/"), + ], + ), + WorkspaceHeaderFixture( + test_id="absolute_path", + input_text="/home/user/code/", + expected_tokens=[ + (Token.Generic.Subheading, "/home/user/code/"), + ], + ), +] + + +@pytest.mark.parametrize( + WorkspaceHeaderFixture._fields, + WORKSPACE_HEADER_FIXTURES, + ids=[f.test_id for f in WORKSPACE_HEADER_FIXTURES], +) +def test_workspace_header( + test_id: str, + input_text: str, + expected_tokens: list[tuple[t.Any, str]], +) -> None: + """Test workspace header tokenization.""" + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(input_text) if v.strip()] + assert tokens == expected_tokens + + +# --- Multiline tests --- + + +def test_multiline_list_output() -> None: + """Test multiline list output with workspace header.""" + text = """~/work/python/ + • flask → ~/work/python/flask + • requests → ~/work/python/requests""" + + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(text) if v.strip()] + + # Check key tokens are present + assert (Token.Generic.Subheading, "~/work/python/") in tokens + assert (Token.Name.Function, "flask") in tokens + assert (Token.Name.Function, "requests") in tokens + assert (Token.Name.Variable, "~/work/python/flask") in tokens + assert (Token.Name.Variable, "~/work/python/requests") in tokens + + +def test_multiline_sync_output() -> None: + """Test multiline sync plan output.""" + text = """~/work/python/ ++ new-lib ~/work/python/new-lib git+https://github.com/user/new-lib +~ old-lib ~/work/python/old-lib +✓ stable-lib ~/work/python/stable-lib""" + + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(text) if v.strip()] + + # Check symbols + assert (Token.Generic.Inserted, "+") in tokens + assert (Token.Name.Exception, "~") in tokens + assert (Token.Generic.Inserted, "✓") in tokens + + # Check repo names + assert (Token.Name.Function, "new-lib") in tokens + assert (Token.Name.Function, "old-lib") in tokens + assert (Token.Name.Function, "stable-lib") in tokens + + +# --- tokenize_output helper tests --- + + +def test_tokenize_output_basic() -> None: + """Test the tokenize_output helper function.""" + result = tokenize_output("• flask → ~/code/flask") + assert result[0] == ("Token.Comment", "•") + assert ("Token.Name.Function", "flask") in result + assert ("Token.Comment", "→") in result + assert ("Token.Name.Variable", "~/code/flask") in result + + +def test_tokenize_output_empty() -> None: + """Test tokenize_output with empty string.""" + result = tokenize_output("") + # Should only have a trailing newline token + assert len(result) == 1 + assert result[0][0] == "Token.Text.Whitespace" + + +# --- URL and prompt tests --- + + +def test_url_in_parentheses() -> None: + """Test plain HTTPS URLs in parentheses are tokenized correctly.""" + text = " + pytest-docker (https://github.com/avast/pytest-docker)" + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(text) if v.strip()] + + assert (Token.Generic.Inserted, "+") in tokens + assert (Token.Name.Function, "pytest-docker") in tokens + assert (Token.Punctuation, "(") in tokens + assert (Token.Name.Tag, "https://github.com/avast/pytest-docker") in tokens + assert (Token.Punctuation, ")") in tokens + + +def test_interactive_prompt() -> None: + """Test interactive prompt [y/N] patterns.""" + text = "? Import this repository? [y/N]: y" + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(text) if v.strip()] + + assert (Token.Generic.Prompt, "?") in tokens + assert (Token.Comment, "[y/N]") in tokens + + +def test_vcspull_add_output() -> None: + """Test full vcspull add output with all patterns.""" + text = """Found new repository to import: + + pytest-docker (https://github.com/avast/pytest-docker) + • workspace: ~/study/python/ +? Import this repository? [y/N]: y""" + + lexer = VcspullOutputLexer() + tokens = [(t, v) for t, v in lexer.get_tokens(text) if v.strip()] + + # Check key tokens + assert (Token.Generic.Inserted, "+") in tokens + assert (Token.Name.Function, "pytest-docker") in tokens + assert (Token.Name.Tag, "https://github.com/avast/pytest-docker") in tokens + assert (Token.Comment, "•") in tokens + assert (Token.Generic.Heading, "workspace:") in tokens + assert (Token.Generic.Prompt, "?") in tokens + assert (Token.Comment, "[y/N]") in tokens diff --git a/uv.lock b/uv.lock index 43b26c55c..11d4e3c15 100644 --- a/uv.lock +++ b/uv.lock @@ -313,11 +313,27 @@ toml = [ name = "docutils" version = "0.21.2" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.11'", +] sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444, upload-time = "2024-04-23T18:57:18.24Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408, upload-time = "2024-04-23T18:57:14.835Z" }, ] +[[package]] +name = "docutils" +version = "0.22.4" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/b6/03bb70946330e88ffec97aefd3ea75ba575cb2e762061e0e62a213befee8/docutils-0.22.4.tar.gz", hash = "sha256:4db53b1fde9abecbb74d91230d32ab626d94f6badfc575d6db9194a49df29968", size = 2291750, upload-time = "2025-12-18T19:00:26.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/10/5da547df7a391dcde17f59520a231527b8571e6f46fc8efb02ccb370ab12/docutils-0.22.4-py3-none-any.whl", hash = "sha256:d0013f540772d1420576855455d050a2180186c91c15779301ac2ccb3eeb68de", size = 633196, upload-time = "2025-12-18T19:00:18.077Z" }, +] + [[package]] name = "exceptiongroup" version = "1.3.1" @@ -338,7 +354,9 @@ dependencies = [ { name = "accessible-pygments" }, { name = "beautifulsoup4" }, { name = "pygments" }, - { name = "sphinx" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, { name = "sphinx-basic-ng" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ec/20/5f5ad4da6a5a27c80f2ed2ee9aee3f9e36c66e56e21c00fde467b2f8f88f/furo-2025.12.19.tar.gz", hash = "sha256:188d1f942037d8b37cd3985b955839fea62baa1730087dc29d157677c857e2a7", size = 1661473, upload-time = "2025-12-19T17:34:40.889Z" } @@ -351,7 +369,8 @@ name = "gp-libs" version = "0.0.17" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "docutils" }, + { name = "docutils", version = "0.21.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "docutils", version = "0.22.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "myst-parser", version = "4.0.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "myst-parser", version = "5.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, ] @@ -706,12 +725,12 @@ resolution-markers = [ "python_full_version < '3.11'", ] dependencies = [ - { name = "docutils", marker = "python_full_version < '3.11'" }, + { name = "docutils", version = "0.21.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "jinja2", marker = "python_full_version < '3.11'" }, { name = "markdown-it-py", version = "3.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "mdit-py-plugins", marker = "python_full_version < '3.11'" }, { name = "pyyaml", marker = "python_full_version < '3.11'" }, - { name = "sphinx", marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/66/a5/9626ba4f73555b3735ad86247a8077d4603aa8628537687c839ab08bfe44/myst_parser-4.0.1.tar.gz", hash = "sha256:5cfea715e4f3574138aecbf7d54132296bfd72bb614d31168f48c477a830a7c4", size = 93985, upload-time = "2025-02-12T10:53:03.833Z" } wheels = [ @@ -727,12 +746,13 @@ resolution-markers = [ "python_full_version == '3.11.*'", ] dependencies = [ - { name = "docutils", marker = "python_full_version >= '3.11'" }, + { name = "docutils", version = "0.22.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "jinja2", marker = "python_full_version >= '3.11'" }, { name = "markdown-it-py", version = "4.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "mdit-py-plugins", marker = "python_full_version >= '3.11'" }, { name = "pyyaml", marker = "python_full_version >= '3.11'" }, - { name = "sphinx", marker = "python_full_version >= '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/33/fa/7b45eef11b7971f0beb29d27b7bfe0d747d063aa29e170d9edd004733c8a/myst_parser-5.0.0.tar.gz", hash = "sha256:f6f231452c56e8baa662cc352c548158f6a16fcbd6e3800fc594978002b94f3a", size = 98535, upload-time = "2026-01-15T09:08:18.036Z" } wheels = [ @@ -938,6 +958,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, ] +[[package]] +name = "roman-numerals" +version = "4.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/f9/41dc953bbeb056c17d5f7a519f50fdf010bd0553be2d630bc69d1e022703/roman_numerals-4.1.0.tar.gz", hash = "sha256:1af8b147eb1405d5839e78aeb93131690495fe9da5c91856cb33ad55a7f1e5b2", size = 9077, upload-time = "2025-12-17T18:25:34.381Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/54/6f679c435d28e0a568d8e8a7c0a93a09010818634c3c3907fc98d8983770/roman_numerals-4.1.0-py3-none-any.whl", hash = "sha256:647ba99caddc2cc1e55a51e4360689115551bf4476d90e8162cf8c345fe233c7", size = 7676, upload-time = "2025-12-17T18:25:33.098Z" }, +] + [[package]] name = "ruff" version = "0.14.13" @@ -986,23 +1015,26 @@ wheels = [ name = "sphinx" version = "8.1.3" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.11'", +] dependencies = [ - { name = "alabaster" }, - { name = "babel" }, - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "docutils" }, - { name = "imagesize" }, - { name = "jinja2" }, - { name = "packaging" }, - { name = "pygments" }, - { name = "requests" }, - { name = "snowballstemmer" }, - { name = "sphinxcontrib-applehelp" }, - { name = "sphinxcontrib-devhelp" }, - { name = "sphinxcontrib-htmlhelp" }, - { name = "sphinxcontrib-jsmath" }, - { name = "sphinxcontrib-qthelp" }, - { name = "sphinxcontrib-serializinghtml" }, + { name = "alabaster", marker = "python_full_version < '3.11'" }, + { name = "babel", marker = "python_full_version < '3.11'" }, + { name = "colorama", marker = "python_full_version < '3.11' and sys_platform == 'win32'" }, + { name = "docutils", version = "0.21.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "imagesize", marker = "python_full_version < '3.11'" }, + { name = "jinja2", marker = "python_full_version < '3.11'" }, + { name = "packaging", marker = "python_full_version < '3.11'" }, + { name = "pygments", marker = "python_full_version < '3.11'" }, + { name = "requests", marker = "python_full_version < '3.11'" }, + { name = "snowballstemmer", marker = "python_full_version < '3.11'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version < '3.11'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version < '3.11'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version < '3.11'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version < '3.11'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version < '3.11'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version < '3.11'" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611, upload-time = "2024-10-13T20:27:13.93Z" } @@ -1011,16 +1043,65 @@ wheels = [ ] [[package]] -name = "sphinx-argparse" -version = "0.5.2" +name = "sphinx" +version = "9.0.4" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.11.*'", +] dependencies = [ - { name = "docutils" }, - { name = "sphinx" }, + { name = "alabaster", marker = "python_full_version == '3.11.*'" }, + { name = "babel", marker = "python_full_version == '3.11.*'" }, + { name = "colorama", marker = "python_full_version == '3.11.*' and sys_platform == 'win32'" }, + { name = "docutils", version = "0.22.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "imagesize", marker = "python_full_version == '3.11.*'" }, + { name = "jinja2", marker = "python_full_version == '3.11.*'" }, + { name = "packaging", marker = "python_full_version == '3.11.*'" }, + { name = "pygments", marker = "python_full_version == '3.11.*'" }, + { name = "requests", marker = "python_full_version == '3.11.*'" }, + { name = "roman-numerals", marker = "python_full_version == '3.11.*'" }, + { name = "snowballstemmer", marker = "python_full_version == '3.11.*'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version == '3.11.*'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version == '3.11.*'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version == '3.11.*'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version == '3.11.*'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version == '3.11.*'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version == '3.11.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/50/a8c6ccc36d5eacdfd7913ddccd15a9cee03ecafc5ee2bc40e1f168d85022/sphinx-9.0.4.tar.gz", hash = "sha256:594ef59d042972abbc581d8baa577404abe4e6c3b04ef61bd7fc2acbd51f3fa3", size = 8710502, upload-time = "2025-12-04T07:45:27.343Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/3f/4bbd76424c393caead2e1eb89777f575dee5c8653e2d4b6afd7a564f5974/sphinx-9.0.4-py3-none-any.whl", hash = "sha256:5bebc595a5e943ea248b99c13814c1c5e10b3ece718976824ffa7959ff95fffb", size = 3917713, upload-time = "2025-12-04T07:45:24.944Z" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3b/21/a8c64e6633652111e6e4f89703182a53cbc3ed67233523e47472101358b6/sphinx_argparse-0.5.2.tar.gz", hash = "sha256:e5352f8fa894b6fb6fda0498ba28a9f8d435971ef4bbc1a6c9c6414e7644f032", size = 27838, upload-time = "2024-07-17T12:08:08.219Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e5/43/9f0e9bfb3ce02cbf7747aa2185c48a9d6e42ba95736a5e8f511a5054d976/sphinx_argparse-0.5.2-py3-none-any.whl", hash = "sha256:d771b906c36d26dee669dbdbb5605c558d9440247a5608b810f7fa6e26ab1fd3", size = 12547, upload-time = "2024-07-17T12:08:06.307Z" }, + +[[package]] +name = "sphinx" +version = "9.1.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", +] +dependencies = [ + { name = "alabaster", marker = "python_full_version >= '3.12'" }, + { name = "babel", marker = "python_full_version >= '3.12'" }, + { name = "colorama", marker = "python_full_version >= '3.12' and sys_platform == 'win32'" }, + { name = "docutils", version = "0.22.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, + { name = "imagesize", marker = "python_full_version >= '3.12'" }, + { name = "jinja2", marker = "python_full_version >= '3.12'" }, + { name = "packaging", marker = "python_full_version >= '3.12'" }, + { name = "pygments", marker = "python_full_version >= '3.12'" }, + { name = "requests", marker = "python_full_version >= '3.12'" }, + { name = "roman-numerals", marker = "python_full_version >= '3.12'" }, + { name = "snowballstemmer", marker = "python_full_version >= '3.12'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version >= '3.12'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version >= '3.12'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version >= '3.12'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version >= '3.12'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version >= '3.12'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version >= '3.12'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cd/bd/f08eb0f4eed5c83f1ba2a3bd18f7745a2b1525fad70660a1c00224ec468a/sphinx-9.1.0.tar.gz", hash = "sha256:7741722357dd75f8190766926071fed3bdc211c74dd2d7d4df5404da95930ddb", size = 8718324, upload-time = "2025-12-31T15:09:27.646Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/f7/b1884cb3188ab181fc81fa00c266699dab600f927a964df02ec3d5d1916a/sphinx-9.1.0-py3-none-any.whl", hash = "sha256:c84fdd4e782504495fe4f2c0b3413d6c2bf388589bb352d439b2a3bb99991978", size = 3921742, upload-time = "2025-12-31T15:09:25.561Z" }, ] [[package]] @@ -1032,7 +1113,7 @@ resolution-markers = [ ] dependencies = [ { name = "colorama", marker = "python_full_version < '3.11'" }, - { name = "sphinx", marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "starlette", marker = "python_full_version < '3.11'" }, { name = "uvicorn", marker = "python_full_version < '3.11'" }, { name = "watchfiles", marker = "python_full_version < '3.11'" }, @@ -1053,7 +1134,8 @@ resolution-markers = [ ] dependencies = [ { name = "colorama", marker = "python_full_version >= '3.11'" }, - { name = "sphinx", marker = "python_full_version >= '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, { name = "starlette", marker = "python_full_version >= '3.11'" }, { name = "uvicorn", marker = "python_full_version >= '3.11'" }, { name = "watchfiles", marker = "python_full_version >= '3.11'" }, @@ -1068,20 +1150,55 @@ wheels = [ name = "sphinx-autodoc-typehints" version = "3.0.1" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.11'", +] dependencies = [ - { name = "sphinx" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/26/f0/43c6a5ff3e7b08a8c3b32f81b859f1b518ccc31e45f22e2b41ced38be7b9/sphinx_autodoc_typehints-3.0.1.tar.gz", hash = "sha256:b9b40dd15dee54f6f810c924f863f9cf1c54f9f3265c495140ea01be7f44fa55", size = 36282, upload-time = "2025-01-16T18:25:30.958Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/3c/dc/dc46c5c7c566b7ec5e8f860f9c89533bf03c0e6aadc96fb9b337867e4460/sphinx_autodoc_typehints-3.0.1-py3-none-any.whl", hash = "sha256:4b64b676a14b5b79cefb6628a6dc8070e320d4963e8ff640a2f3e9390ae9045a", size = 20245, upload-time = "2025-01-16T18:25:27.394Z" }, ] +[[package]] +name = "sphinx-autodoc-typehints" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.11.*'", +] +dependencies = [ + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1d/f6/bdd93582b2aaad2cfe9eb5695a44883c8bc44572dd3c351a947acbb13789/sphinx_autodoc_typehints-3.6.1.tar.gz", hash = "sha256:fa0b686ae1b85965116c88260e5e4b82faec3687c2e94d6a10f9b36c3743e2fe", size = 37563, upload-time = "2026-01-02T15:23:46.543Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/6a/c0360b115c81d449b3b73bf74b64ca773464d5c7b1b77bda87c5e874853b/sphinx_autodoc_typehints-3.6.1-py3-none-any.whl", hash = "sha256:dd818ba31d4c97f219a8c0fcacef280424f84a3589cedcb73003ad99c7da41ca", size = 20869, upload-time = "2026-01-02T15:23:45.194Z" }, +] + +[[package]] +name = "sphinx-autodoc-typehints" +version = "3.6.2" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", +] +dependencies = [ + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/97/51/6603ed3786a2d52366c66f49bc8afb31ae5c0e33d4a156afcb38d2bac62c/sphinx_autodoc_typehints-3.6.2.tar.gz", hash = "sha256:3d37709a21b7b765ad6e20a04ecefcb229b9eb0007cb24f6ebaa8a4576ea7f06", size = 37574, upload-time = "2026-01-02T21:25:28.216Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/6a/877e8a6ea52fc86d88ce110ebcfe4f8474ff590d8a8d322909673af3da7b/sphinx_autodoc_typehints-3.6.2-py3-none-any.whl", hash = "sha256:9e70bee1f487b087c83ba0f4949604a4630bee396e263a324aae1dc4268d2c0f", size = 20853, upload-time = "2026-01-02T21:25:26.853Z" }, +] + [[package]] name = "sphinx-basic-ng" version = "1.0.0b2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "sphinx" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/98/0b/a866924ded68efec7a1759587a4e478aec7559d8165fac8b2ad1c0e774d6/sphinx_basic_ng-1.0.0b2.tar.gz", hash = "sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9", size = 20736, upload-time = "2023-07-08T18:40:54.166Z" } wheels = [ @@ -1093,7 +1210,9 @@ name = "sphinx-copybutton" version = "0.5.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "sphinx" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/fc/2b/a964715e7f5295f77509e59309959f4125122d648f86b4fe7d70ca1d882c/sphinx-copybutton-0.5.2.tar.gz", hash = "sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd", size = 23039, upload-time = "2023-04-14T08:10:22.998Z" } wheels = [ @@ -1105,7 +1224,9 @@ name = "sphinx-inline-tabs" version = "2025.12.21.14" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "sphinx" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/76/6a/f39bde46a79b80a9983233d99b773bd24b468bdd9c1e87acb46ff69af441/sphinx_inline_tabs-2025.12.21.14.tar.gz", hash = "sha256:c71a75800326e613fb4e410eed92a0934214741326aca9897c18018b9f968cb6", size = 45572, upload-time = "2025-12-21T13:30:51.071Z" } wheels = [ @@ -1171,7 +1292,9 @@ name = "sphinxext-opengraph" version = "0.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "sphinx" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/f6/c0/eb6838e3bae624ce6c8b90b245d17e84252863150e95efdb88f92c8aa3fb/sphinxext_opengraph-0.13.0.tar.gz", hash = "sha256:103335d08567ad8468faf1425f575e3b698e9621f9323949a6c8b96d9793e80b", size = 1026875, upload-time = "2025-08-29T12:20:31.066Z" } wheels = [ @@ -1183,7 +1306,9 @@ name = "sphinxext-rediraffe" version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "sphinx" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e3/a9/ab13d156049eea633f992424f3e92cb40e3f1b606bb6d01d40a27457d38a/sphinxext_rediraffe-0.3.0.tar.gz", hash = "sha256:f319b3ccb7c3c3b6f63ffa6fd3eeb171b6d272df55075a9e84364394f391f507", size = 22114, upload-time = "2025-09-28T15:31:53.641Z" } wheels = [ @@ -1278,6 +1403,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/95/3a/44ccbbfef6235aeea84c74041dc6dfee6c17ff3ddba782a0250e41687ec7/types_colorama-0.4.15.20250801-py3-none-any.whl", hash = "sha256:b6e89bd3b250fdad13a8b6a465c933f4a5afe485ea2e2f104d739be50b13eea9", size = 10743, upload-time = "2025-08-01T03:48:21.774Z" }, ] +[[package]] +name = "types-docutils" +version = "0.22.3.20251115" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/d7/576ec24bf61a280f571e1f22284793adc321610b9bcfba1bf468cf7b334f/types_docutils-0.22.3.20251115.tar.gz", hash = "sha256:0f79ea6a7bd4d12d56c9f824a0090ffae0ea4204203eb0006392906850913e16", size = 56828, upload-time = "2025-11-15T02:59:57.371Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/01/61ac9eb38f1f978b47443dc6fd2e0a3b0f647c2da741ddad30771f1b2b6f/types_docutils-0.22.3.20251115-py3-none-any.whl", hash = "sha256:c6e53715b65395d00a75a3a8a74e352c669bc63959e65a207dffaa22f4a2ad6e", size = 91951, upload-time = "2025-11-15T02:59:56.413Z" }, +] + +[[package]] +name = "types-pygments" +version = "2.19.0.20251121" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "types-docutils" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/90/3b/cd650700ce9e26b56bd1a6aa4af397bbbc1784e22a03971cb633cdb0b601/types_pygments-2.19.0.20251121.tar.gz", hash = "sha256:eef114fde2ef6265365522045eac0f8354978a566852f69e75c531f0553822b1", size = 18590, upload-time = "2025-11-21T03:03:46.623Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/8a/9244b21f1d60dcc62e261435d76b02f1853b4771663d7ec7d287e47a9ba9/types_pygments-2.19.0.20251121-py3-none-any.whl", hash = "sha256:cb3bfde34eb75b984c98fb733ce4f795213bd3378f855c32e75b49318371bb25", size = 25674, upload-time = "2025-11-21T03:03:45.72Z" }, +] + [[package]] name = "types-pyyaml" version = "6.0.12.20250915" @@ -1372,17 +1518,22 @@ dev = [ { name = "pytest-rerunfailures" }, { name = "pytest-watcher" }, { name = "ruff" }, - { name = "sphinx" }, - { name = "sphinx-argparse" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, { name = "sphinx-autobuild", version = "2024.10.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "sphinx-autobuild", version = "2025.8.25", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "sphinx-autodoc-typehints" }, + { name = "sphinx-autodoc-typehints", version = "3.0.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx-autodoc-typehints", version = "3.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx-autodoc-typehints", version = "3.6.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, { name = "sphinx-copybutton" }, { name = "sphinx-inline-tabs" }, { name = "sphinxext-opengraph" }, { name = "sphinxext-rediraffe" }, { name = "syrupy" }, { name = "types-colorama" }, + { name = "types-docutils" }, + { name = "types-pygments" }, { name = "types-pyyaml" }, { name = "types-requests" }, ] @@ -1392,11 +1543,14 @@ docs = [ { name = "linkify-it-py" }, { name = "myst-parser", version = "4.0.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "myst-parser", version = "5.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "sphinx" }, - { name = "sphinx-argparse" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx", version = "9.0.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx", version = "9.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, { name = "sphinx-autobuild", version = "2024.10.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "sphinx-autobuild", version = "2025.8.25", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "sphinx-autodoc-typehints" }, + { name = "sphinx-autodoc-typehints", version = "3.0.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "sphinx-autodoc-typehints", version = "3.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.11.*'" }, + { name = "sphinx-autodoc-typehints", version = "3.6.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, { name = "sphinx-copybutton" }, { name = "sphinx-inline-tabs" }, { name = "sphinxext-opengraph" }, @@ -1417,6 +1571,8 @@ testing = [ ] typings = [ { name = "types-colorama" }, + { name = "types-docutils" }, + { name = "types-pygments" }, { name = "types-pyyaml" }, { name = "types-requests" }, ] @@ -1449,8 +1605,7 @@ dev = [ { name = "pytest-rerunfailures" }, { name = "pytest-watcher" }, { name = "ruff" }, - { name = "sphinx", specifier = "<9" }, - { name = "sphinx-argparse" }, + { name = "sphinx", specifier = ">=8" }, { name = "sphinx-autobuild" }, { name = "sphinx-autodoc-typehints" }, { name = "sphinx-copybutton" }, @@ -1459,6 +1614,8 @@ dev = [ { name = "sphinxext-rediraffe" }, { name = "syrupy" }, { name = "types-colorama" }, + { name = "types-docutils" }, + { name = "types-pygments" }, { name = "types-pyyaml" }, { name = "types-requests" }, ] @@ -1467,8 +1624,7 @@ docs = [ { name = "gp-libs" }, { name = "linkify-it-py" }, { name = "myst-parser" }, - { name = "sphinx", specifier = "<9" }, - { name = "sphinx-argparse" }, + { name = "sphinx", specifier = ">=8" }, { name = "sphinx-autobuild" }, { name = "sphinx-autodoc-typehints" }, { name = "sphinx-copybutton" }, @@ -1491,6 +1647,8 @@ testing = [ ] typings = [ { name = "types-colorama" }, + { name = "types-docutils" }, + { name = "types-pygments" }, { name = "types-pyyaml" }, { name = "types-requests" }, ]