From 19915ae4940f9070a48461209dcfd2371371fa93 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 10 Jan 2026 18:50:37 -0600 Subject: [PATCH 1/4] py(deps[dev]): Add types-Pygments for mypy Add Pygments type stubs to dev and lint dependency groups. Also add sphinxarg.* to mypy ignore_missing_imports. --- pyproject.toml | 3 +++ uv.lock | 18 +++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 67d7c8ee41..b8851d88cd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,6 +85,7 @@ dev = [ "mypy", "types-colorama", "types-docutils", + "types-Pygments", "types-PyYAML", ] @@ -121,6 +122,7 @@ lint = [ "mypy", "types-colorama", "types-docutils", + "types-Pygments", "types-PyYAML", ] @@ -170,6 +172,7 @@ module = [ "ptpython.*", "prompt_toolkit.*", "bpython", + "sphinxarg.*", ] ignore_missing_imports = true diff --git a/uv.lock b/uv.lock index e9ad39608f..b079e1f44b 100644 --- a/uv.lock +++ b/uv.lock @@ -323,7 +323,7 @@ name = "exceptiongroup" version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ @@ -1367,6 +1367,7 @@ dev = [ { name = "sphinxext-rediraffe" }, { name = "types-colorama" }, { name = "types-docutils" }, + { name = "types-pygments" }, { name = "types-pyyaml" }, ] docs = [ @@ -1393,6 +1394,7 @@ lint = [ { name = "ruff" }, { name = "types-colorama" }, { name = "types-docutils" }, + { name = "types-pygments" }, { name = "types-pyyaml" }, ] testing = [ @@ -1442,6 +1444,7 @@ dev = [ { name = "sphinxext-rediraffe" }, { name = "types-colorama" }, { name = "types-docutils" }, + { name = "types-pygments" }, { name = "types-pyyaml" }, ] docs = [ @@ -1465,6 +1468,7 @@ lint = [ { name = "ruff" }, { name = "types-colorama" }, { name = "types-docutils" }, + { name = "types-pygments" }, { name = "types-pyyaml" }, ] testing = [ @@ -1542,6 +1546,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9c/01/61ac9eb38f1f978b47443dc6fd2e0a3b0f647c2da741ddad30771f1b2b6f/types_docutils-0.22.3.20251115-py3-none-any.whl", hash = "sha256:c6e53715b65395d00a75a3a8a74e352c669bc63959e65a207dffaa22f4a2ad6e", size = 91951, upload-time = "2025-11-15T02:59:56.413Z" }, ] +[[package]] +name = "types-pygments" +version = "2.19.0.20251121" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "types-docutils" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/90/3b/cd650700ce9e26b56bd1a6aa4af397bbbc1784e22a03971cb633cdb0b601/types_pygments-2.19.0.20251121.tar.gz", hash = "sha256:eef114fde2ef6265365522045eac0f8354978a566852f69e75c531f0553822b1", size = 18590, upload-time = "2025-11-21T03:03:46.623Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/8a/9244b21f1d60dcc62e261435d76b02f1853b4771663d7ec7d287e47a9ba9/types_pygments-2.19.0.20251121-py3-none-any.whl", hash = "sha256:cb3bfde34eb75b984c98fb733ce4f795213bd3378f855c32e75b49318371bb25", size = 25674, upload-time = "2025-11-21T03:03:45.72Z" }, +] + [[package]] name = "types-pyyaml" version = "6.0.12.20250915" From b3dd9ccacbfca98d15dec5fb1df42761b4fd38db Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 10 Jan 2026 18:50:57 -0600 Subject: [PATCH 2/4] docs(_ext): Add pretty_argparse extension with CLI usage highlighting A comprehensive Sphinx extension that enhances sphinx-argparse output: Features: - Strip ANSI escape codes from argparse help text (FORCE_COLOR support) - Transform "examples:" definition lists into proper doc sections - Nest category-specific examples under parent Examples section - Custom Pygments lexer (cli-usage) for usage block syntax highlighting - Reorder sections so usage appears before examples CLI usage lexer token types: - Generic.Heading: "usage:" prefix - Name.Tag: long options (--verbose, --no-color) - Name.Attribute: short options (-h, -S) - Name.Variable: option values (socket-path, COMMAND) - Name.Constant: uppercase metavars (FILE, PATH) - Name.Label: positional args and command names - Punctuation: brackets [], parentheses () - Operator: pipe | and equals = Files added: - docs/_ext/pretty_argparse.py: Main extension (680 lines) - docs/_ext/cli_usage_lexer.py: Pygments lexer (116 lines) - tests/docs/_ext/test_pretty_argparse.py: 66 tests (854 lines) - tests/docs/_ext/test_cli_usage_lexer.py: 22 tests (357 lines) --- docs/_ext/cli_usage_lexer.py | 115 ++++ docs/_ext/pretty_argparse.py | 680 +++++++++++++++++++ docs/conf.py | 1 + pyproject.toml | 1 + tests/docs/__init__.py | 1 + tests/docs/_ext/__init__.py | 1 + tests/docs/_ext/conftest.py | 11 + tests/docs/_ext/test_cli_usage_lexer.py | 357 ++++++++++ tests/docs/_ext/test_pretty_argparse.py | 854 ++++++++++++++++++++++++ 9 files changed, 2021 insertions(+) create mode 100644 docs/_ext/cli_usage_lexer.py create mode 100644 docs/_ext/pretty_argparse.py create mode 100644 tests/docs/__init__.py create mode 100644 tests/docs/_ext/__init__.py create mode 100644 tests/docs/_ext/conftest.py create mode 100644 tests/docs/_ext/test_cli_usage_lexer.py create mode 100644 tests/docs/_ext/test_pretty_argparse.py diff --git a/docs/_ext/cli_usage_lexer.py b/docs/_ext/cli_usage_lexer.py new file mode 100644 index 0000000000..40170e3178 --- /dev/null +++ b/docs/_ext/cli_usage_lexer.py @@ -0,0 +1,115 @@ +"""Pygments lexer for CLI usage/help output. + +This module provides a custom Pygments lexer for highlighting command-line +usage text typically generated by argparse, getopt, or similar libraries. +""" + +from __future__ import annotations + +from pygments.lexer import RegexLexer, bygroups, include +from pygments.token import Generic, Name, Operator, Punctuation, Text, Whitespace + + +class CLIUsageLexer(RegexLexer): + """Lexer for CLI usage/help text (argparse, etc.). + + Highlights usage patterns including options, arguments, and meta-variables. + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = CLIUsageLexer() + >>> tokens = list(lexer.get_tokens("usage: cmd [-h]")) + >>> tokens[0] + (Token.Generic.Heading, 'usage:') + >>> tokens[2] + (Token.Name.Label, 'cmd') + """ + + name = "CLI Usage" + aliases = ["cli-usage", "usage"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-cli-usage"] # noqa: RUF012 + + tokens = { # noqa: RUF012 + "root": [ + # "usage:" at start of line + (r"^(usage:)(\s+)", bygroups(Generic.Heading, Whitespace)), # type: ignore[no-untyped-call] + # Continuation lines (leading whitespace for wrapped usage) + (r"^(\s+)(?=\S)", Whitespace), + include("inline"), + ], + "inline": [ + # Whitespace + (r"\s+", Whitespace), + # Long options with = value (e.g., --log-level=VALUE) + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with space-separated value (e.g., -S socket-path) + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # UPPERCASE meta-variables (COMMAND, FILE, PATH) + (r"\b[A-Z][A-Z0-9_]+\b", Name.Constant), + # Opening bracket - enter optional state + (r"\[", Punctuation, "optional"), + # Closing bracket (fallback for unmatched) + (r"\]", Punctuation), + # Choice separator (pipe) + (r"\|", Operator), + # Parentheses for grouping + (r"[()]", Punctuation), + # Positional/command names (lowercase with dashes) + (r"\b[a-z][-a-z0-9]*\b", Name.Label), + # Catch-all for any other text + (r"[^\s\[\]|()]+", Text), + ], + "optional": [ + # Nested optional bracket + (r"\[", Punctuation, "#push"), + # End optional + (r"\]", Punctuation, "#pop"), + # Contents use inline rules + include("inline"), + ], + } + + +def tokenize_usage(text: str) -> list[tuple[str, str]]: + """Tokenize usage text and return list of (token_type, value) tuples. + + Parameters + ---------- + text : str + CLI usage text to tokenize. + + Returns + ------- + list[tuple[str, str]] + List of (token_type_name, text_value) tuples. + + Examples + -------- + >>> result = tokenize_usage("usage: cmd [-h]") + >>> result[0] + ('Token.Generic.Heading', 'usage:') + >>> result[2] + ('Token.Name.Label', 'cmd') + >>> result[4] + ('Token.Punctuation', '[') + >>> result[5] + ('Token.Name.Attribute', '-h') + >>> result[6] + ('Token.Punctuation', ']') + """ + lexer = CLIUsageLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] diff --git a/docs/_ext/pretty_argparse.py b/docs/_ext/pretty_argparse.py new file mode 100644 index 0000000000..a7c0357cb9 --- /dev/null +++ b/docs/_ext/pretty_argparse.py @@ -0,0 +1,680 @@ +"""Enhanced sphinx-argparse output formatting. + +This extension wraps sphinx-argparse's directive to: +1. Remove ANSI escape codes that may be present when FORCE_COLOR is set +2. Convert "examples:" definition lists into proper documentation sections +3. Nest category-specific examples under a parent Examples section +4. Apply cli-usage syntax highlighting to usage blocks +5. Reorder sections so usage appears before examples +""" + +from __future__ import annotations + +import re +import typing as t + +from docutils import nodes +from sphinxarg.ext import ArgParseDirective + +if t.TYPE_CHECKING: + from sphinx.application import Sphinx + +_ANSI_RE = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") + + +def strip_ansi(text: str) -> str: + r"""Remove ANSI escape codes from text. + + Parameters + ---------- + text : str + Text potentially containing ANSI codes. + + Returns + ------- + str + Text with ANSI codes removed. + + Examples + -------- + >>> strip_ansi("plain text") + 'plain text' + >>> strip_ansi("\033[32mgreen\033[0m") + 'green' + >>> strip_ansi("\033[1;34mbold blue\033[0m") + 'bold blue' + """ + return _ANSI_RE.sub("", text) + + +def is_examples_term(term_text: str) -> bool: + """Check if a definition term is an examples header. + + Parameters + ---------- + term_text : str + The text content of a definition term. + + Returns + ------- + bool + True if this is an examples header. + + Examples + -------- + >>> is_examples_term("examples:") + True + >>> is_examples_term("Machine-readable output examples:") + True + >>> is_examples_term("Usage:") + False + """ + return term_text.lower().rstrip(":").endswith("examples") + + +def is_base_examples_term(term_text: str) -> bool: + """Check if a definition term is a base "examples:" header (no prefix). + + Parameters + ---------- + term_text : str + The text content of a definition term. + + Returns + ------- + bool + True if this is just "examples:" with no category prefix. + + Examples + -------- + >>> is_base_examples_term("examples:") + True + >>> is_base_examples_term("Examples") + True + >>> is_base_examples_term("Field-scoped examples:") + False + """ + return term_text.lower().rstrip(":").strip() == "examples" + + +def make_section_id( + term_text: str, counter: int = 0, *, is_subsection: bool = False +) -> str: + """Generate a section ID from an examples term. + + Parameters + ---------- + term_text : str + The examples term text (e.g., "Machine-readable output: examples:") + counter : int + Counter for uniqueness if multiple examples sections exist. + is_subsection : bool + If True, omit "-examples" suffix for cleaner nested IDs. + + Returns + ------- + str + A normalized section ID. + + Examples + -------- + >>> make_section_id("examples:") + 'examples' + >>> make_section_id("Machine-readable output examples:") + 'machine-readable-output-examples' + >>> make_section_id("Field-scoped examples:", is_subsection=True) + 'field-scoped' + >>> make_section_id("examples:", counter=1) + 'examples-1' + """ + # Extract prefix before "examples" (e.g., "Machine-readable output") + lower_text = term_text.lower().rstrip(":") + if "examples" in lower_text: + prefix = lower_text.rsplit("examples", 1)[0].strip() + # Remove trailing colon from prefix (handles ": examples" pattern) + prefix = prefix.rstrip(":").strip() + if prefix: + normalized_prefix = prefix.replace(" ", "-") + # Subsections don't need "-examples" suffix + if is_subsection: + section_id = normalized_prefix + else: + section_id = f"{normalized_prefix}-examples" + else: + section_id = "examples" + else: + section_id = "examples" + + # Add counter suffix for uniqueness + if counter > 0: + section_id = f"{section_id}-{counter}" + + return section_id + + +def make_section_title(term_text: str, *, is_subsection: bool = False) -> str: + """Generate a section title from an examples term. + + Parameters + ---------- + term_text : str + The examples term text (e.g., "Machine-readable output: examples:") + is_subsection : bool + If True, omit "Examples" suffix for cleaner nested titles. + + Returns + ------- + str + A proper title (e.g., "Machine-readable Output Examples" or just + "Machine-Readable Output" if is_subsection=True). + + Examples + -------- + >>> make_section_title("examples:") + 'Examples' + >>> make_section_title("Machine-readable output examples:") + 'Machine-Readable Output Examples' + >>> make_section_title("Field-scoped examples:", is_subsection=True) + 'Field-Scoped' + """ + # Remove trailing colon and normalize + text = term_text.rstrip(":").strip() + # Handle base "examples:" case + if text.lower() == "examples": + return "Examples" + + # Extract the prefix (category name) before "examples" + lower = text.lower() + if lower.endswith(": examples"): + prefix = text[: -len(": examples")] + elif lower.endswith(" examples"): + prefix = text[: -len(" examples")] + else: + prefix = text + + # Title case the prefix + titled_prefix = prefix.title() + + # For subsections, just use the prefix (cleaner nested titles) + if is_subsection: + return titled_prefix + + # For top-level sections, append "Examples" + return f"{titled_prefix} Examples" + + +def _create_example_section( + term_text: str, + def_node: nodes.definition, + *, + is_subsection: bool = False, +) -> nodes.section: + """Create a section node for an examples item. + + Parameters + ---------- + term_text : str + The examples term text. + def_node : nodes.definition + The definition node containing example commands. + is_subsection : bool + If True, create a subsection with simpler title/id. + + Returns + ------- + nodes.section + A section node with title and code blocks. + """ + section_id = make_section_id(term_text, is_subsection=is_subsection) + section_title = make_section_title(term_text, is_subsection=is_subsection) + + section = nodes.section() + section["ids"] = [section_id] + section["names"] = [nodes.fully_normalize_name(section_title)] + + title = nodes.title(text=section_title) + section += title + + # Extract commands from definition and create separate code blocks + def_text = strip_ansi(def_node.astext()) + for line in def_text.split("\n"): + line = line.strip() + if line: + code_block = nodes.literal_block( + text=f"$ {line}", + classes=["highlight-console"], + ) + code_block["language"] = "console" + section += code_block + + return section + + +def transform_definition_list(dl_node: nodes.definition_list) -> list[nodes.Node]: + """Transform a definition list, converting examples items to code blocks. + + If there's a base "examples:" item followed by category-specific examples + (e.g., "Field-scoped: examples:"), the categories are nested under the + parent Examples section for cleaner ToC structure. + + Parameters + ---------- + dl_node : nodes.definition_list + A definition list node. + + Returns + ------- + list[nodes.Node] + Transformed nodes - code blocks for examples, original for others. + """ + # First pass: collect examples and non-examples items separately + example_items: list[tuple[str, nodes.definition]] = [] # (term_text, def_node) + non_example_items: list[nodes.Node] = [] + base_examples_index: int | None = None + + for item in dl_node.children: + if not isinstance(item, nodes.definition_list_item): + continue + + # Get the term and definition + term_node = None + def_node = None + for child in item.children: + if isinstance(child, nodes.term): + term_node = child + elif isinstance(child, nodes.definition): + def_node = child + + if term_node is None or def_node is None: + non_example_items.append(item) + continue + + term_text = strip_ansi(term_node.astext()) + + if is_examples_term(term_text): + if is_base_examples_term(term_text): + base_examples_index = len(example_items) + example_items.append((term_text, def_node)) + else: + non_example_items.append(item) + + # Build result nodes + result_nodes: list[nodes.Node] = [] + + # Flush non-example items first (if any appeared before examples) + if non_example_items: + new_dl = nodes.definition_list() + new_dl.extend(non_example_items) + result_nodes.append(new_dl) + + # Determine nesting strategy + # Nest if: there's a base "examples:" AND at least one other example category + should_nest = base_examples_index is not None and len(example_items) > 1 + + if should_nest and base_examples_index is not None: + # Create parent "Examples" section + base_term, base_def = example_items[base_examples_index] + parent_section = _create_example_section( + base_term, base_def, is_subsection=False + ) + + # Add other examples as nested subsections + for i, (term_text, def_node) in enumerate(example_items): + if i == base_examples_index: + continue # Skip the base (already used as parent) + subsection = _create_example_section( + term_text, def_node, is_subsection=True + ) + parent_section += subsection + + result_nodes.append(parent_section) + else: + # No nesting - create flat sections (backwards compatible) + for term_text, def_node in example_items: + section = _create_example_section(term_text, def_node, is_subsection=False) + result_nodes.append(section) + + return result_nodes + + +def process_node(node: nodes.Node) -> nodes.Node | list[nodes.Node]: + """Process a node: strip ANSI codes and transform examples. + + Parameters + ---------- + node : nodes.Node + A docutils node to process. + + Returns + ------- + nodes.Node | list[nodes.Node] + The processed node(s). + """ + # Handle text nodes - strip ANSI + if isinstance(node, nodes.Text): + cleaned = strip_ansi(node.astext()) + if cleaned != node.astext(): + return nodes.Text(cleaned) + return node + + # Handle definition lists - transform examples + if isinstance(node, nodes.definition_list): + # Check if any items are examples + has_examples = False + for item in node.children: + if isinstance(item, nodes.definition_list_item): + for child in item.children: + if isinstance(child, nodes.term) and is_examples_term( + strip_ansi(child.astext()) + ): + has_examples = True + break + if has_examples: + break + + if has_examples: + return transform_definition_list(node) + + # Handle literal_block nodes - strip ANSI and apply usage highlighting + if isinstance(node, nodes.literal_block): + text = strip_ansi(node.astext()) + needs_update = text != node.astext() + + # Check if this is a usage block (starts with "usage:") + is_usage_block = text.lstrip().lower().startswith("usage:") + + if needs_update or is_usage_block: + new_block = nodes.literal_block(text=text) + # Preserve attributes + for attr in ("language", "classes"): + if attr in node: + new_block[attr] = node[attr] + # Apply cli-usage language to usage blocks + if is_usage_block: + new_block["language"] = "cli-usage" + return new_block + return node + + # Handle paragraph nodes - strip ANSI and lift sections out + if isinstance(node, nodes.paragraph): + # Process children and check if any become sections + processed_children: list[nodes.Node] = [] + changed = False + has_sections = False + + for child in node.children: + if isinstance(child, nodes.Text): + cleaned = strip_ansi(child.astext()) + if cleaned != child.astext(): + processed_children.append(nodes.Text(cleaned)) + changed = True + else: + processed_children.append(child) + else: + result = process_node(child) + if isinstance(result, list): + processed_children.extend(result) + changed = True + # Check if any results are sections + if any(isinstance(r, nodes.section) for r in result): + has_sections = True + elif result is not child: + processed_children.append(result) + changed = True + if isinstance(result, nodes.section): + has_sections = True + else: + processed_children.append(child) + + if not changed: + return node + + # If no sections, return a normal paragraph + if not has_sections: + new_para = nodes.paragraph() + new_para.extend(processed_children) + return new_para + + # Sections found - lift them out of the paragraph + # Return a list: [para_before, section1, section2, ..., para_after] + result_nodes: list[nodes.Node] = [] + current_para_children: list[nodes.Node] = [] + + for child in processed_children: + if isinstance(child, nodes.section): + # Flush current paragraph content + if current_para_children: + para = nodes.paragraph() + para.extend(current_para_children) + result_nodes.append(para) + current_para_children = [] + # Add section as a sibling + result_nodes.append(child) + else: + current_para_children.append(child) + + # Flush remaining paragraph content + if current_para_children: + para = nodes.paragraph() + para.extend(current_para_children) + result_nodes.append(para) + + return result_nodes + + # Recursively process children for other node types + if hasattr(node, "children"): + new_children: list[nodes.Node] = [] + children_changed = False + for child in node.children: + result = process_node(child) + if isinstance(result, list): + new_children.extend(result) + children_changed = True + elif result is not child: + new_children.append(result) + children_changed = True + else: + new_children.append(child) + if children_changed: + node.children = new_children + + return node + + +def _is_usage_block(node: nodes.Node) -> bool: + """Check if a node is a usage literal block. + + Parameters + ---------- + node : nodes.Node + A docutils node to check. + + Returns + ------- + bool + True if this is a usage block (literal_block starting with "usage:"). + + Examples + -------- + >>> from docutils import nodes + >>> _is_usage_block(nodes.literal_block(text="usage: cmd [-h]")) + True + >>> _is_usage_block(nodes.literal_block(text="Usage: tmuxp load")) + True + >>> _is_usage_block(nodes.literal_block(text=" usage: cmd")) + True + >>> _is_usage_block(nodes.literal_block(text="some other text")) + False + >>> _is_usage_block(nodes.paragraph(text="usage: cmd")) + False + >>> _is_usage_block(nodes.section()) + False + """ + if not isinstance(node, nodes.literal_block): + return False + text = node.astext() + return text.lstrip().lower().startswith("usage:") + + +def _is_examples_section(node: nodes.Node) -> bool: + """Check if a node is an examples section. + + Parameters + ---------- + node : nodes.Node + A docutils node to check. + + Returns + ------- + bool + True if this is an examples section (section with "examples" in its ID). + + Examples + -------- + >>> from docutils import nodes + >>> section = nodes.section() + >>> section["ids"] = ["examples"] + >>> _is_examples_section(section) + True + >>> section2 = nodes.section() + >>> section2["ids"] = ["machine-readable-output-examples"] + >>> _is_examples_section(section2) + True + >>> section3 = nodes.section() + >>> section3["ids"] = ["positional-arguments"] + >>> _is_examples_section(section3) + False + >>> _is_examples_section(nodes.paragraph()) + False + >>> _is_examples_section(nodes.literal_block(text="examples")) + False + """ + if not isinstance(node, nodes.section): + return False + ids: list[str] = node.get("ids", []) + return any("examples" in id_str.lower() for id_str in ids) + + +def _reorder_nodes(processed: list[nodes.Node]) -> list[nodes.Node]: + """Reorder nodes so usage blocks appear before examples sections. + + This ensures the CLI usage synopsis appears above examples in the + documentation, making it easier to understand command syntax before + seeing example invocations. + + Parameters + ---------- + processed : list[nodes.Node] + List of processed docutils nodes. + + Returns + ------- + list[nodes.Node] + Reordered nodes with usage before examples. + + Examples + -------- + >>> from docutils import nodes + + Create test nodes: + + >>> desc = nodes.paragraph(text="Description") + >>> examples = nodes.section() + >>> examples["ids"] = ["examples"] + >>> usage = nodes.literal_block(text="usage: cmd [-h]") + >>> args = nodes.section() + >>> args["ids"] = ["arguments"] + + When usage appears after examples, it gets moved before: + + >>> result = _reorder_nodes([desc, examples, usage, args]) + >>> [type(n).__name__ for n in result] + ['paragraph', 'literal_block', 'section', 'section'] + + When no examples exist, order is unchanged: + + >>> result = _reorder_nodes([desc, usage, args]) + >>> [type(n).__name__ for n in result] + ['paragraph', 'literal_block', 'section'] + + When usage already before examples, order is preserved: + + >>> result = _reorder_nodes([desc, usage, examples, args]) + >>> [type(n).__name__ for n in result] + ['paragraph', 'literal_block', 'section', 'section'] + + Empty list returns empty: + + >>> _reorder_nodes([]) + [] + """ + # First pass: check if there are any examples sections + has_examples = any(_is_examples_section(node) for node in processed) + if not has_examples: + # No examples, preserve original order + return processed + + usage_blocks: list[nodes.Node] = [] + examples_sections: list[nodes.Node] = [] + other_before_examples: list[nodes.Node] = [] + other_after_examples: list[nodes.Node] = [] + + seen_examples = False + for node in processed: + if _is_usage_block(node): + usage_blocks.append(node) + elif _is_examples_section(node): + examples_sections.append(node) + seen_examples = True + elif not seen_examples: + other_before_examples.append(node) + else: + other_after_examples.append(node) + + # Order: before_examples → usage → examples → after_examples + return ( + other_before_examples + usage_blocks + examples_sections + other_after_examples + ) + + +class CleanArgParseDirective(ArgParseDirective): # type: ignore[misc] + """ArgParse directive that strips ANSI codes and formats examples.""" + + def run(self) -> list[nodes.Node]: + """Run the directive, clean output, format examples, and reorder.""" + result = super().run() + + processed: list[nodes.Node] = [] + for node in result: + processed_node = process_node(node) + if isinstance(processed_node, list): + processed.extend(processed_node) + else: + processed.append(processed_node) + + # Reorder: usage blocks before examples sections + return _reorder_nodes(processed) + + +def setup(app: Sphinx) -> dict[str, t.Any]: + """Register the clean argparse directive and CLI usage lexer. + + Parameters + ---------- + app : Sphinx + The Sphinx application object. + + Returns + ------- + dict + Extension metadata. + """ + # Override the default argparse directive + app.add_directive("argparse", CleanArgParseDirective, override=True) + + # Register CLI usage lexer for usage block highlighting + from cli_usage_lexer import CLIUsageLexer + + app.add_lexer("cli-usage", CLIUsageLexer) + + return {"version": "2.0", "parallel_read_safe": True} diff --git a/docs/conf.py b/docs/conf.py index bbc5217346..cbd0d84299 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -37,6 +37,7 @@ "sphinx.ext.linkcode", "aafig", "sphinxarg.ext", # sphinx-argparse + "pretty_argparse", # Enhanced sphinx-argparse: strip ANSI, format examples "sphinx_inline_tabs", "sphinx_copybutton", "sphinxext.opengraph", diff --git a/pyproject.toml b/pyproject.toml index b8851d88cd..d67d34d23f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -173,6 +173,7 @@ module = [ "prompt_toolkit.*", "bpython", "sphinxarg.*", + "cli_usage_lexer", ] ignore_missing_imports = true diff --git a/tests/docs/__init__.py b/tests/docs/__init__.py new file mode 100644 index 0000000000..5a82d0c1f7 --- /dev/null +++ b/tests/docs/__init__.py @@ -0,0 +1 @@ +"""Tests for documentation extensions.""" diff --git a/tests/docs/_ext/__init__.py b/tests/docs/_ext/__init__.py new file mode 100644 index 0000000000..deec786b4f --- /dev/null +++ b/tests/docs/_ext/__init__.py @@ -0,0 +1 @@ +"""Tests for docs/_ext/ Sphinx extensions.""" diff --git a/tests/docs/_ext/conftest.py b/tests/docs/_ext/conftest.py new file mode 100644 index 0000000000..bb2cf99b57 --- /dev/null +++ b/tests/docs/_ext/conftest.py @@ -0,0 +1,11 @@ +"""Fixtures and configuration for docs extension tests.""" + +from __future__ import annotations + +import sys +from pathlib import Path + +# Add docs/_ext to path so we can import the extension module +docs_ext_path = Path(__file__).parent.parent.parent.parent / "docs" / "_ext" +if str(docs_ext_path) not in sys.path: + sys.path.insert(0, str(docs_ext_path)) diff --git a/tests/docs/_ext/test_cli_usage_lexer.py b/tests/docs/_ext/test_cli_usage_lexer.py new file mode 100644 index 0000000000..88ff40fb82 --- /dev/null +++ b/tests/docs/_ext/test_cli_usage_lexer.py @@ -0,0 +1,357 @@ +"""Tests for cli_usage_lexer Pygments extension.""" + +from __future__ import annotations + +import typing as t + +import pytest +from cli_usage_lexer import ( + CLIUsageLexer, + tokenize_usage, +) + +# --- Helper to extract token type names --- + + +def get_tokens(text: str) -> list[tuple[str, str]]: + """Get tokens as (type_name, value) tuples.""" + lexer = CLIUsageLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] + + +# --- Token type fixtures --- + + +class TokenTypeFixture(t.NamedTuple): + """Test fixture for verifying specific token types.""" + + test_id: str + input_text: str + expected_token_type: str + expected_value: str + + +TOKEN_TYPE_FIXTURES: list[TokenTypeFixture] = [ + TokenTypeFixture( + test_id="usage_heading", + input_text="usage:", + expected_token_type="Token.Generic.Heading", + expected_value="usage:", + ), + TokenTypeFixture( + test_id="short_option", + input_text="-h", + expected_token_type="Token.Name.Attribute", + expected_value="-h", + ), + TokenTypeFixture( + test_id="long_option", + input_text="--verbose", + expected_token_type="Token.Name.Tag", + expected_value="--verbose", + ), + TokenTypeFixture( + test_id="long_option_with_dashes", + input_text="--no-color", + expected_token_type="Token.Name.Tag", + expected_value="--no-color", + ), + TokenTypeFixture( + test_id="uppercase_metavar", + input_text="COMMAND", + expected_token_type="Token.Name.Constant", + expected_value="COMMAND", + ), + TokenTypeFixture( + test_id="uppercase_metavar_with_underscore", + input_text="FILE_PATH", + expected_token_type="Token.Name.Constant", + expected_value="FILE_PATH", + ), + TokenTypeFixture( + test_id="positional_arg", + input_text="session-name", + expected_token_type="Token.Name.Label", + expected_value="session-name", + ), + TokenTypeFixture( + test_id="command_name", + input_text="tmuxp", + expected_token_type="Token.Name.Label", + expected_value="tmuxp", + ), + TokenTypeFixture( + test_id="open_bracket", + input_text="[", + expected_token_type="Token.Punctuation", + expected_value="[", + ), + TokenTypeFixture( + test_id="close_bracket", + input_text="]", + expected_token_type="Token.Punctuation", + expected_value="]", + ), + TokenTypeFixture( + test_id="pipe_operator", + input_text="|", + expected_token_type="Token.Operator", + expected_value="|", + ), +] + + +@pytest.mark.parametrize( + TokenTypeFixture._fields, + TOKEN_TYPE_FIXTURES, + ids=[f.test_id for f in TOKEN_TYPE_FIXTURES], +) +def test_token_type( + test_id: str, + input_text: str, + expected_token_type: str, + expected_value: str, +) -> None: + """Test individual token type detection.""" + tokens = get_tokens(input_text) + # Find the expected token (skip whitespace) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t and v.strip()] + assert len(non_ws_tokens) >= 1, f"No non-whitespace tokens found for '{input_text}'" + token_type, token_value = non_ws_tokens[0] + assert token_type == expected_token_type, ( + f"Expected {expected_token_type}, got {token_type}" + ) + assert token_value == expected_value + + +# --- Short option with value fixtures --- + + +class ShortOptionValueFixture(t.NamedTuple): + """Test fixture for short options with values.""" + + test_id: str + input_text: str + option: str + value: str + + +SHORT_OPTION_VALUE_FIXTURES: list[ShortOptionValueFixture] = [ + ShortOptionValueFixture( + test_id="lowercase_value", + input_text="-S socket-path", + option="-S", + value="socket-path", + ), + ShortOptionValueFixture( + test_id="uppercase_value", + input_text="-c COMMAND", + option="-c", + value="COMMAND", + ), + ShortOptionValueFixture( + test_id="simple_value", + input_text="-L name", + option="-L", + value="name", + ), +] + + +@pytest.mark.parametrize( + ShortOptionValueFixture._fields, + SHORT_OPTION_VALUE_FIXTURES, + ids=[f.test_id for f in SHORT_OPTION_VALUE_FIXTURES], +) +def test_short_option_with_value( + test_id: str, + input_text: str, + option: str, + value: str, +) -> None: + """Test short option followed by value tokenization.""" + tokens = get_tokens(input_text) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t] + + assert len(non_ws_tokens) >= 2 + assert non_ws_tokens[0] == ("Token.Name.Attribute", option) + # Value could be Name.Variable or Name.Constant depending on case + assert non_ws_tokens[1][1] == value + + +# --- Long option with value fixtures --- + + +class LongOptionValueFixture(t.NamedTuple): + """Test fixture for long options with = values.""" + + test_id: str + input_text: str + option: str + value: str + + +LONG_OPTION_VALUE_FIXTURES: list[LongOptionValueFixture] = [ + LongOptionValueFixture( + test_id="uppercase_value", + input_text="--config=FILE", + option="--config", + value="FILE", + ), + LongOptionValueFixture( + test_id="lowercase_value", + input_text="--output=path", + option="--output", + value="path", + ), +] + + +@pytest.mark.parametrize( + LongOptionValueFixture._fields, + LONG_OPTION_VALUE_FIXTURES, + ids=[f.test_id for f in LONG_OPTION_VALUE_FIXTURES], +) +def test_long_option_with_value( + test_id: str, + input_text: str, + option: str, + value: str, +) -> None: + """Test long option with = value tokenization.""" + tokens = get_tokens(input_text) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t] + + assert len(non_ws_tokens) >= 3 + assert non_ws_tokens[0] == ("Token.Name.Tag", option) + assert non_ws_tokens[1] == ("Token.Operator", "=") + assert non_ws_tokens[2][1] == value + + +# --- Full usage string fixtures --- + + +class UsageStringFixture(t.NamedTuple): + """Test fixture for full usage string tokenization.""" + + test_id: str + input_text: str + expected_contains: list[tuple[str, str]] + + +USAGE_STRING_FIXTURES: list[UsageStringFixture] = [ + UsageStringFixture( + test_id="simple_usage", + input_text="usage: cmd [-h]", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "cmd"), + ("Token.Punctuation", "["), + ("Token.Name.Attribute", "-h"), + ("Token.Punctuation", "]"), + ], + ), + UsageStringFixture( + test_id="mutually_exclusive", + input_text="[--best | --pdb | --code]", + expected_contains=[ + ("Token.Name.Tag", "--best"), + ("Token.Operator", "|"), + ("Token.Name.Tag", "--pdb"), + ("Token.Operator", "|"), + ("Token.Name.Tag", "--code"), + ], + ), + UsageStringFixture( + test_id="subcommand", + input_text="usage: tmuxp shell", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "tmuxp"), + ("Token.Name.Label", "shell"), + ], + ), + UsageStringFixture( + test_id="positional_args", + input_text="[session-name] [window-name]", + expected_contains=[ + ("Token.Punctuation", "["), + ("Token.Name.Label", "session-name"), + ("Token.Punctuation", "]"), + ("Token.Punctuation", "["), + ("Token.Name.Label", "window-name"), + ("Token.Punctuation", "]"), + ], + ), +] + + +@pytest.mark.parametrize( + UsageStringFixture._fields, + USAGE_STRING_FIXTURES, + ids=[f.test_id for f in USAGE_STRING_FIXTURES], +) +def test_usage_string( + test_id: str, + input_text: str, + expected_contains: list[tuple[str, str]], +) -> None: + """Test full usage string tokenization contains expected tokens.""" + tokens = get_tokens(input_text) + for expected_type, expected_value in expected_contains: + assert (expected_type, expected_value) in tokens, ( + f"Expected ({expected_type}, {expected_value!r}) not found in tokens" + ) + + +# --- Real tmuxp usage output test --- + + +def test_tmuxp_shell_usage() -> None: + """Test real tmuxp shell usage output tokenization.""" + usage_text = """\ +usage: tmuxp shell [-h] [-S socket-path] [-L socket-name] [-c COMMAND] + [--best | --pdb | --code | --ptipython | --ptpython | + --ipython | --bpython] [--use-pythonrc] [--no-startup] + [--use-vi-mode] [--no-vi-mode] + [session-name] [window-name]""" + + tokens = get_tokens(usage_text) + + # Check key elements are present + # Note: COMMAND after -c is Name.Variable (option value), not Name.Constant + expected = [ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "tmuxp"), + ("Token.Name.Label", "shell"), + ("Token.Name.Attribute", "-h"), + ("Token.Name.Attribute", "-S"), + ("Token.Name.Variable", "socket-path"), + ("Token.Name.Attribute", "-c"), + ("Token.Name.Variable", "COMMAND"), # Option value, not standalone metavar + ("Token.Name.Tag", "--best"), + ("Token.Name.Tag", "--pdb"), + ("Token.Name.Tag", "--use-pythonrc"), + ("Token.Name.Tag", "--no-vi-mode"), + ("Token.Name.Label", "session-name"), + ("Token.Name.Label", "window-name"), + ] + + for expected_type, expected_value in expected: + assert (expected_type, expected_value) in tokens, ( + f"Expected ({expected_type}, {expected_value!r}) not in tokens" + ) + + +# --- tokenize_usage helper function test --- + + +def test_tokenize_usage_helper() -> None: + """Test the tokenize_usage helper function.""" + result = tokenize_usage("usage: cmd [-h]") + + assert result[0] == ("Token.Generic.Heading", "usage:") + assert ("Token.Name.Label", "cmd") in result + assert ("Token.Name.Attribute", "-h") in result diff --git a/tests/docs/_ext/test_pretty_argparse.py b/tests/docs/_ext/test_pretty_argparse.py new file mode 100644 index 0000000000..f74e0af4c8 --- /dev/null +++ b/tests/docs/_ext/test_pretty_argparse.py @@ -0,0 +1,854 @@ +"""Tests for pretty_argparse sphinx extension.""" + +from __future__ import annotations + +import typing as t + +import pytest +from docutils import nodes +from pretty_argparse import ( # type: ignore[import-not-found] + _is_examples_section, + _is_usage_block, + _reorder_nodes, + is_base_examples_term, + is_examples_term, + make_section_id, + make_section_title, + strip_ansi, + transform_definition_list, +) + +# --- strip_ansi tests --- + + +class StripAnsiFixture(t.NamedTuple): + """Test fixture for strip_ansi function.""" + + test_id: str + input_text: str + expected: str + + +STRIP_ANSI_FIXTURES: list[StripAnsiFixture] = [ + StripAnsiFixture( + test_id="plain_text", + input_text="hello", + expected="hello", + ), + StripAnsiFixture( + test_id="green_color", + input_text="\033[32mgreen\033[0m", + expected="green", + ), + StripAnsiFixture( + test_id="bold_blue", + input_text="\033[1;34mbold\033[0m", + expected="bold", + ), + StripAnsiFixture( + test_id="multiple_codes", + input_text="\033[1m\033[32mtest\033[0m", + expected="test", + ), + StripAnsiFixture( + test_id="empty_string", + input_text="", + expected="", + ), + StripAnsiFixture( + test_id="mixed_content", + input_text="pre\033[31mred\033[0mpost", + expected="preredpost", + ), + StripAnsiFixture( + test_id="reset_only", + input_text="\033[0m", + expected="", + ), + StripAnsiFixture( + test_id="sgr_params", + input_text="\033[38;5;196mred256\033[0m", + expected="red256", + ), +] + + +@pytest.mark.parametrize( + StripAnsiFixture._fields, + STRIP_ANSI_FIXTURES, + ids=[f.test_id for f in STRIP_ANSI_FIXTURES], +) +def test_strip_ansi(test_id: str, input_text: str, expected: str) -> None: + """Test ANSI escape code stripping.""" + assert strip_ansi(input_text) == expected + + +# --- is_examples_term tests --- + + +class IsExamplesTermFixture(t.NamedTuple): + """Test fixture for is_examples_term function.""" + + test_id: str + term_text: str + expected: bool + + +IS_EXAMPLES_TERM_FIXTURES: list[IsExamplesTermFixture] = [ + IsExamplesTermFixture( + test_id="base_examples_colon", + term_text="examples:", + expected=True, + ), + IsExamplesTermFixture( + test_id="base_examples_no_colon", + term_text="examples", + expected=True, + ), + IsExamplesTermFixture( + test_id="prefixed_machine_readable", + term_text="Machine-readable output examples:", + expected=True, + ), + IsExamplesTermFixture( + test_id="prefixed_field_scoped", + term_text="Field-scoped search examples:", + expected=True, + ), + IsExamplesTermFixture( + test_id="colon_pattern", + term_text="Machine-readable output: examples:", + expected=True, + ), + IsExamplesTermFixture( + test_id="usage_not_examples", + term_text="Usage:", + expected=False, + ), + IsExamplesTermFixture( + test_id="arguments_not_examples", + term_text="Named Arguments:", + expected=False, + ), + IsExamplesTermFixture( + test_id="case_insensitive_upper", + term_text="EXAMPLES:", + expected=True, + ), + IsExamplesTermFixture( + test_id="case_insensitive_mixed", + term_text="Examples:", + expected=True, + ), +] + + +@pytest.mark.parametrize( + IsExamplesTermFixture._fields, + IS_EXAMPLES_TERM_FIXTURES, + ids=[f.test_id for f in IS_EXAMPLES_TERM_FIXTURES], +) +def test_is_examples_term(test_id: str, term_text: str, expected: bool) -> None: + """Test examples term detection.""" + assert is_examples_term(term_text) == expected + + +# --- is_base_examples_term tests --- + + +class IsBaseExamplesTermFixture(t.NamedTuple): + """Test fixture for is_base_examples_term function.""" + + test_id: str + term_text: str + expected: bool + + +IS_BASE_EXAMPLES_TERM_FIXTURES: list[IsBaseExamplesTermFixture] = [ + IsBaseExamplesTermFixture( + test_id="base_with_colon", + term_text="examples:", + expected=True, + ), + IsBaseExamplesTermFixture( + test_id="base_no_colon", + term_text="examples", + expected=True, + ), + IsBaseExamplesTermFixture( + test_id="uppercase", + term_text="EXAMPLES", + expected=True, + ), + IsBaseExamplesTermFixture( + test_id="mixed_case", + term_text="Examples:", + expected=True, + ), + IsBaseExamplesTermFixture( + test_id="prefixed_not_base", + term_text="Field-scoped examples:", + expected=False, + ), + IsBaseExamplesTermFixture( + test_id="output_examples_not_base", + term_text="Machine-readable output examples:", + expected=False, + ), + IsBaseExamplesTermFixture( + test_id="colon_pattern_not_base", + term_text="Output: examples:", + expected=False, + ), +] + + +@pytest.mark.parametrize( + IsBaseExamplesTermFixture._fields, + IS_BASE_EXAMPLES_TERM_FIXTURES, + ids=[f.test_id for f in IS_BASE_EXAMPLES_TERM_FIXTURES], +) +def test_is_base_examples_term(test_id: str, term_text: str, expected: bool) -> None: + """Test base examples term detection.""" + assert is_base_examples_term(term_text) == expected + + +# --- make_section_id tests --- + + +class MakeSectionIdFixture(t.NamedTuple): + """Test fixture for make_section_id function.""" + + test_id: str + term_text: str + counter: int + is_subsection: bool + expected: str + + +MAKE_SECTION_ID_FIXTURES: list[MakeSectionIdFixture] = [ + MakeSectionIdFixture( + test_id="base_examples", + term_text="examples:", + counter=0, + is_subsection=False, + expected="examples", + ), + MakeSectionIdFixture( + test_id="prefixed_standard", + term_text="Machine-readable output examples:", + counter=0, + is_subsection=False, + expected="machine-readable-output-examples", + ), + MakeSectionIdFixture( + test_id="subsection_omits_suffix", + term_text="Field-scoped examples:", + counter=0, + is_subsection=True, + expected="field-scoped", + ), + MakeSectionIdFixture( + test_id="with_counter", + term_text="examples:", + counter=2, + is_subsection=False, + expected="examples-2", + ), + MakeSectionIdFixture( + test_id="counter_zero_no_suffix", + term_text="examples:", + counter=0, + is_subsection=False, + expected="examples", + ), + MakeSectionIdFixture( + test_id="colon_pattern", + term_text="Machine-readable output: examples:", + counter=0, + is_subsection=False, + expected="machine-readable-output-examples", + ), + MakeSectionIdFixture( + test_id="subsection_with_counter", + term_text="Field-scoped examples:", + counter=1, + is_subsection=True, + expected="field-scoped-1", + ), +] + + +@pytest.mark.parametrize( + MakeSectionIdFixture._fields, + MAKE_SECTION_ID_FIXTURES, + ids=[f.test_id for f in MAKE_SECTION_ID_FIXTURES], +) +def test_make_section_id( + test_id: str, + term_text: str, + counter: int, + is_subsection: bool, + expected: str, +) -> None: + """Test section ID generation.""" + assert make_section_id(term_text, counter, is_subsection=is_subsection) == expected + + +# --- make_section_title tests --- + + +class MakeSectionTitleFixture(t.NamedTuple): + """Test fixture for make_section_title function.""" + + test_id: str + term_text: str + is_subsection: bool + expected: str + + +MAKE_SECTION_TITLE_FIXTURES: list[MakeSectionTitleFixture] = [ + MakeSectionTitleFixture( + test_id="base_examples", + term_text="examples:", + is_subsection=False, + expected="Examples", + ), + MakeSectionTitleFixture( + test_id="prefixed_with_examples_suffix", + term_text="Machine-readable output examples:", + is_subsection=False, + expected="Machine-Readable Output Examples", + ), + MakeSectionTitleFixture( + test_id="subsection_omits_examples", + term_text="Field-scoped examples:", + is_subsection=True, + expected="Field-Scoped", + ), + MakeSectionTitleFixture( + test_id="colon_pattern", + term_text="Machine-readable output: examples:", + is_subsection=False, + expected="Machine-Readable Output Examples", + ), + MakeSectionTitleFixture( + test_id="subsection_colon_pattern", + term_text="Machine-readable output: examples:", + is_subsection=True, + expected="Machine-Readable Output", + ), + MakeSectionTitleFixture( + test_id="base_examples_no_colon", + term_text="examples", + is_subsection=False, + expected="Examples", + ), +] + + +@pytest.mark.parametrize( + MakeSectionTitleFixture._fields, + MAKE_SECTION_TITLE_FIXTURES, + ids=[f.test_id for f in MAKE_SECTION_TITLE_FIXTURES], +) +def test_make_section_title( + test_id: str, + term_text: str, + is_subsection: bool, + expected: str, +) -> None: + """Test section title generation.""" + assert make_section_title(term_text, is_subsection=is_subsection) == expected + + +# --- transform_definition_list integration tests --- + + +def _make_dl_item(term: str, definition: str) -> nodes.definition_list_item: + """Create a definition list item for testing. + + Parameters + ---------- + term : str + The definition term text. + definition : str + The definition content text. + + Returns + ------- + nodes.definition_list_item + A definition list item with term and definition. + """ + item = nodes.definition_list_item() + term_node = nodes.term(text=term) + def_node = nodes.definition() + def_node += nodes.paragraph(text=definition) + item += term_node + item += def_node + return item + + +def test_transform_definition_list_single_examples() -> None: + """Single examples section creates one section node.""" + dl = nodes.definition_list() + dl += _make_dl_item("examples:", "tmuxp ls") + + result = transform_definition_list(dl) + + assert len(result) == 1 + assert isinstance(result[0], nodes.section) + assert result[0]["ids"] == ["examples"] + + +def test_transform_definition_list_nested_examples() -> None: + """Base examples with category creates nested sections.""" + dl = nodes.definition_list() + dl += _make_dl_item("examples:", "tmuxp ls") + dl += _make_dl_item("Machine-readable output examples:", "tmuxp ls --json") + + result = transform_definition_list(dl) + + # Should have single parent section containing nested subsection + assert len(result) == 1 + parent = result[0] + assert isinstance(parent, nodes.section) + assert parent["ids"] == ["examples"] + + # Find nested subsection + subsections = [c for c in parent.children if isinstance(c, nodes.section)] + assert len(subsections) == 1 + assert subsections[0]["ids"] == ["machine-readable-output"] + + +def test_transform_definition_list_multiple_categories() -> None: + """Multiple example categories all nest under parent.""" + dl = nodes.definition_list() + dl += _make_dl_item("examples:", "tmuxp ls") + dl += _make_dl_item("Field-scoped examples:", "tmuxp ls --field name") + dl += _make_dl_item("Machine-readable output examples:", "tmuxp ls --json") + + result = transform_definition_list(dl) + + assert len(result) == 1 + parent = result[0] + assert isinstance(parent, nodes.section) + + subsections = [c for c in parent.children if isinstance(c, nodes.section)] + assert len(subsections) == 2 + + +def test_transform_definition_list_preserves_non_examples() -> None: + """Non-example items preserved as definition list.""" + dl = nodes.definition_list() + dl += _make_dl_item("Usage:", "How to use this command") + dl += _make_dl_item("examples:", "tmuxp ls") + + result = transform_definition_list(dl) + + # Should have both definition list (non-examples) and section (examples) + has_dl = any(isinstance(n, nodes.definition_list) for n in result) + has_section = any(isinstance(n, nodes.section) for n in result) + assert has_dl, "Non-example items should be preserved as definition list" + assert has_section, "Example items should become sections" + + +def test_transform_definition_list_no_examples() -> None: + """Definition list without examples returns empty list.""" + dl = nodes.definition_list() + dl += _make_dl_item("Usage:", "How to use") + dl += _make_dl_item("Options:", "Available options") + + result = transform_definition_list(dl) + + # All items are non-examples, should return definition list + assert len(result) == 1 + assert isinstance(result[0], nodes.definition_list) + + +def test_transform_definition_list_only_category_no_base() -> None: + """Single category example without base examples stays flat.""" + dl = nodes.definition_list() + dl += _make_dl_item("Machine-readable output examples:", "tmuxp ls --json") + + result = transform_definition_list(dl) + + # Without base "examples:", no nesting - just single section + assert len(result) == 1 + assert isinstance(result[0], nodes.section) + # Should have full title since it's not nested + assert result[0]["ids"] == ["machine-readable-output-examples"] + + +def test_transform_definition_list_code_blocks_created() -> None: + """Each command line becomes a separate code block.""" + dl = nodes.definition_list() + dl += _make_dl_item("examples:", "cmd1\ncmd2\ncmd3") + + result = transform_definition_list(dl) + + section = result[0] + code_blocks = [c for c in section.children if isinstance(c, nodes.literal_block)] + assert len(code_blocks) == 3 + assert code_blocks[0].astext() == "$ cmd1" + assert code_blocks[1].astext() == "$ cmd2" + assert code_blocks[2].astext() == "$ cmd3" + + +# --- _is_usage_block tests --- + + +class IsUsageBlockFixture(t.NamedTuple): + """Test fixture for _is_usage_block function.""" + + test_id: str + node_type: str + node_text: str + expected: bool + + +IS_USAGE_BLOCK_FIXTURES: list[IsUsageBlockFixture] = [ + IsUsageBlockFixture( + test_id="literal_block_usage_lowercase", + node_type="literal_block", + node_text="usage: cmd [-h]", + expected=True, + ), + IsUsageBlockFixture( + test_id="literal_block_usage_uppercase", + node_type="literal_block", + node_text="Usage: tmuxp load", + expected=True, + ), + IsUsageBlockFixture( + test_id="literal_block_usage_leading_space", + node_type="literal_block", + node_text=" usage: cmd", + expected=True, + ), + IsUsageBlockFixture( + test_id="literal_block_not_usage", + node_type="literal_block", + node_text="some other text", + expected=False, + ), + IsUsageBlockFixture( + test_id="literal_block_usage_in_middle", + node_type="literal_block", + node_text="see usage: for more", + expected=False, + ), + IsUsageBlockFixture( + test_id="paragraph_with_usage", + node_type="paragraph", + node_text="usage: cmd", + expected=False, + ), + IsUsageBlockFixture( + test_id="section_node", + node_type="section", + node_text="", + expected=False, + ), +] + + +def _make_test_node(node_type: str, node_text: str) -> nodes.Node: + """Create a test node of the specified type. + + Parameters + ---------- + node_type : str + Type of node to create ("literal_block", "paragraph", "section"). + node_text : str + Text content for the node. + + Returns + ------- + nodes.Node + The created node. + """ + if node_type == "literal_block": + return nodes.literal_block(text=node_text) + if node_type == "paragraph": + return nodes.paragraph(text=node_text) + if node_type == "section": + return nodes.section() + msg = f"Unknown node type: {node_type}" + raise ValueError(msg) + + +@pytest.mark.parametrize( + IsUsageBlockFixture._fields, + IS_USAGE_BLOCK_FIXTURES, + ids=[f.test_id for f in IS_USAGE_BLOCK_FIXTURES], +) +def test_is_usage_block( + test_id: str, + node_type: str, + node_text: str, + expected: bool, +) -> None: + """Test usage block detection.""" + node = _make_test_node(node_type, node_text) + assert _is_usage_block(node) == expected + + +# --- _is_examples_section tests --- + + +class IsExamplesSectionFixture(t.NamedTuple): + """Test fixture for _is_examples_section function.""" + + test_id: str + node_type: str + section_ids: list[str] + expected: bool + + +IS_EXAMPLES_SECTION_FIXTURES: list[IsExamplesSectionFixture] = [ + IsExamplesSectionFixture( + test_id="section_with_examples_id", + node_type="section", + section_ids=["examples"], + expected=True, + ), + IsExamplesSectionFixture( + test_id="section_with_prefixed_examples", + node_type="section", + section_ids=["machine-readable-output-examples"], + expected=True, + ), + IsExamplesSectionFixture( + test_id="section_with_uppercase_examples", + node_type="section", + section_ids=["EXAMPLES"], + expected=True, + ), + IsExamplesSectionFixture( + test_id="section_without_examples", + node_type="section", + section_ids=["positional-arguments"], + expected=False, + ), + IsExamplesSectionFixture( + test_id="section_with_multiple_ids", + node_type="section", + section_ids=["main-id", "examples-alias"], + expected=True, + ), + IsExamplesSectionFixture( + test_id="section_empty_ids", + node_type="section", + section_ids=[], + expected=False, + ), + IsExamplesSectionFixture( + test_id="paragraph_node", + node_type="paragraph", + section_ids=[], + expected=False, + ), + IsExamplesSectionFixture( + test_id="literal_block_node", + node_type="literal_block", + section_ids=[], + expected=False, + ), +] + + +def _make_section_node(node_type: str, section_ids: list[str]) -> nodes.Node: + """Create a test node with optional section IDs. + + Parameters + ---------- + node_type : str + Type of node to create. + section_ids : list[str] + IDs to assign if creating a section. + + Returns + ------- + nodes.Node + The created node. + """ + if node_type == "section": + section = nodes.section() + section["ids"] = section_ids + return section + if node_type == "paragraph": + return nodes.paragraph() + if node_type == "literal_block": + return nodes.literal_block(text="examples") + msg = f"Unknown node type: {node_type}" + raise ValueError(msg) + + +@pytest.mark.parametrize( + IsExamplesSectionFixture._fields, + IS_EXAMPLES_SECTION_FIXTURES, + ids=[f.test_id for f in IS_EXAMPLES_SECTION_FIXTURES], +) +def test_is_examples_section( + test_id: str, + node_type: str, + section_ids: list[str], + expected: bool, +) -> None: + """Test examples section detection.""" + node = _make_section_node(node_type, section_ids) + assert _is_examples_section(node) == expected + + +# --- _reorder_nodes tests --- + + +def _make_usage_node(text: str = "usage: cmd [-h]") -> nodes.literal_block: + """Create a usage block node. + + Parameters + ---------- + text : str + Text content for the usage block. + + Returns + ------- + nodes.literal_block + A literal block node with usage text. + """ + return nodes.literal_block(text=text) + + +def _make_examples_section(section_id: str = "examples") -> nodes.section: + """Create an examples section node. + + Parameters + ---------- + section_id : str + The ID for the section. + + Returns + ------- + nodes.section + A section node with the specified ID. + """ + section = nodes.section() + section["ids"] = [section_id] + return section + + +def test_reorder_nodes_usage_after_examples() -> None: + """Usage block after examples gets moved before examples.""" + desc = nodes.paragraph(text="Description") + examples = _make_examples_section() + usage = _make_usage_node() + + # Create a non-examples section + args_section = nodes.section() + args_section["ids"] = ["arguments"] + + result = _reorder_nodes([desc, examples, usage, args_section]) + + # Should be: desc, usage, examples, args + assert len(result) == 4 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.section) + assert result[2]["ids"] == ["examples"] + assert isinstance(result[3], nodes.section) + assert result[3]["ids"] == ["arguments"] + + +def test_reorder_nodes_no_examples() -> None: + """Without examples, original order is preserved.""" + desc = nodes.paragraph(text="Description") + usage = _make_usage_node() + args = nodes.section() + args["ids"] = ["arguments"] + + result = _reorder_nodes([desc, usage, args]) + + # Order unchanged: desc, usage, args + assert len(result) == 3 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.section) + + +def test_reorder_nodes_usage_already_before_examples() -> None: + """When usage is already before examples, order is preserved.""" + desc = nodes.paragraph(text="Description") + usage = _make_usage_node() + examples = _make_examples_section() + args = nodes.section() + args["ids"] = ["arguments"] + + result = _reorder_nodes([desc, usage, examples, args]) + + # Order should be: desc, usage, examples, args + assert len(result) == 4 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.section) + assert result[2]["ids"] == ["examples"] + + +def test_reorder_nodes_empty_list() -> None: + """Empty input returns empty output.""" + result = _reorder_nodes([]) + assert result == [] + + +def test_reorder_nodes_multiple_usage_blocks() -> None: + """Multiple usage blocks are all moved before examples.""" + desc = nodes.paragraph(text="Description") + examples = _make_examples_section() + usage1 = _make_usage_node("usage: cmd1 [-h]") + usage2 = _make_usage_node("usage: cmd2 [-v]") + + result = _reorder_nodes([desc, examples, usage1, usage2]) + + # Should be: desc, usage1, usage2, examples + assert len(result) == 4 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.literal_block) + assert isinstance(result[3], nodes.section) + + +def test_reorder_nodes_multiple_examples_sections() -> None: + """Multiple examples sections are grouped together.""" + desc = nodes.paragraph(text="Description") + examples1 = _make_examples_section("examples") + usage = _make_usage_node() + examples2 = _make_examples_section("machine-readable-output-examples") + args = nodes.section() + args["ids"] = ["arguments"] + + result = _reorder_nodes([desc, examples1, usage, examples2, args]) + + # Should be: desc, usage, examples1, examples2, args + assert len(result) == 5 + assert isinstance(result[0], nodes.paragraph) + assert isinstance(result[1], nodes.literal_block) + assert result[2]["ids"] == ["examples"] + assert result[3]["ids"] == ["machine-readable-output-examples"] + assert result[4]["ids"] == ["arguments"] + + +def test_reorder_nodes_preserves_non_examples_after() -> None: + """Non-examples nodes after examples stay at the end.""" + desc = nodes.paragraph(text="Description") + examples = _make_examples_section() + usage = _make_usage_node() + epilog = nodes.paragraph(text="Epilog") + + result = _reorder_nodes([desc, examples, usage, epilog]) + + # Should be: desc, usage, examples, epilog + assert len(result) == 4 + assert result[0].astext() == "Description" + assert isinstance(result[1], nodes.literal_block) + assert isinstance(result[2], nodes.section) + assert result[3].astext() == "Epilog" From 9ec2fd4c55ae5e08e34e48e5e30b38ab9b26e140 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 10 Jan 2026 18:56:29 -0600 Subject: [PATCH 3/4] CHANGES: Add pretty_argparse extension entry (#1007) --- CHANGES | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGES b/CHANGES index e78555bd46..4064aceb6e 100644 --- a/CHANGES +++ b/CHANGES @@ -40,6 +40,15 @@ $ pipx install --suffix=@next 'tmuxp' --pip-args '\--pre' --force ### Documentation +#### pretty_argparse Sphinx extension (#1007) + +New Sphinx extension that enhances sphinx-argparse CLI documentation: + +- Strip ANSI escape codes from help text (FORCE_COLOR support) +- Transform examples into proper documentation sections +- Custom Pygments lexer (`cli-usage`) for usage block syntax highlighting +- Reorder sections so usage appears before examples + - Migrate docs deployment to AWS OIDC authentication and AWS CLI ## tmuxp 1.62.0 (2025-12-14) From c24f4b0aa836e16a9364e2f2cc2fa0e253af2a60 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 10 Jan 2026 19:02:26 -0600 Subject: [PATCH 4/4] py(coverage): Exclude docs/_ext from coverage Sphinx extension files (pretty_argparse.py, cli_usage_lexer.py) are documentation infrastructure, not core application code. Exclude them from coverage measurement similar to tests/, _vendor/, etc. --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index d67d34d23f..0d4211004d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -141,6 +141,7 @@ omit = [ "*/_*", "pkg/*", "*/log.py", + "docs/_ext/*", ] [tool.coverage.report]