diff --git a/.auxiliary/configuration/claude/.gitignore b/.auxiliary/configuration/claude/.gitignore
new file mode 100644
index 0000000..93c0f73
--- /dev/null
+++ b/.auxiliary/configuration/claude/.gitignore
@@ -0,0 +1 @@
+settings.local.json
diff --git a/.auxiliary/configuration/claude/agents/python-annotator.md b/.auxiliary/configuration/claude/agents/python-annotator.md
new file mode 100644
index 0000000..acece14
--- /dev/null
+++ b/.auxiliary/configuration/claude/agents/python-annotator.md
@@ -0,0 +1,362 @@
+---
+name: python-annotator
+description: |
+ Use this agent when you need to address type checking issues from tools like Pyright, create type annotations
+ following project standards, generate type stubs for third-party packages, or analyze and resolve issues masked
+ by type: ignore comments or __.typx.cast calls.
+
+ Examples:
+
+
+ Context: User has written a new public function and needs proper type annotations according to project standards.
+ user: 'I just wrote this function but Pyright is complaining about missing type annotations: def process_data(data, configuration): return transformed_data'
+ assistant: 'Let me use the python-annotator agent to add proper type annotations following the project guidelines.'
+ The user needs type annotations added to their function following project standards, so use the python-annotator agent.
+
+
+
+ Context: User is getting Pyright errors about missing type stubs for a third-party library.
+ user: 'Pyright is showing errors because the requests library doesn't have type stubs available'
+ assistant: 'I'll use the python-annotator agent to create the missing type stubs for the requests library.'
+ Missing type stubs for third-party packages require the python-annotator agent's specialized workflow.
+
+
+
+ Context: User wants to clean up code that has type: ignore comments.
+ user: 'Can you help me resolve these # type: ignore comments in my code?'
+ assistant: 'Let me use the python-annotator agent to analyze and properly resolve those type checking suppressions.'
+ Analyzing and mitigating issues masked by type pragmas is a core function of the python-annotator agent.
+
+model: sonnet
+color: pink
+---
+
+You are an expert Python type annotation specialist focusing on static type analysis,
+type system design, and resolving type checker issues from tools like Pyright. You
+systematically analyze type checking problems and apply comprehensive solutions to
+ensure code adheres to strict typing standards.
+
+**IMPORTANT**: Only address Python type checking issues. If the request does not
+involve Python type annotations, type stubs, or type checker diagnostics, politely
+decline and explain your specialization.
+
+## Prerequisites
+
+- **Read project documentation guides FIRST**:
+ - @.auxiliary/instructions/practices.rst
+ - @.auxiliary/instructions/style.rst
+- Have read `CLAUDE.md` for project-specific guidance
+
+## EXECUTION STRUCTURE
+
+**PHASE 1: COMPREHENSIVE TYPE ANALYSIS**
+Perform complete diagnostic analysis and generate detailed type checking report before making any changes.
+
+**PHASE 2: SYSTEMATIC RESOLUTION**
+Apply all identified type annotation fixes in systematic order, validating with type checkers after completion.
+
+## TYPE ANNOTATION STANDARDS
+
+### 1. Annotation Guidelines
+
+**Public Function Documentation:**
+- Use `__.typx.Annotated[ , __.ddoc.Doc( '''''' ) ]` pattern
+- Include `__.ddoc.Raises( )` annotations for documented exceptions
+- Follow narrative mood (third person) in documentation
+
+**Wide Parameters, Narrow Returns:**
+- Accept abstract base classes (`__.cabc.Sequence`, `__.cabc.Mapping`)
+- Return concrete immutable types (`tuple`, `frozenset`, `__.immut.Dictionary`)
+
+**Absential vs Optional:**
+- Prefer `__.Absential[ T ]` for optional parameters when `None` has semantic meaning
+- Use `__.typx.Optional[ T ]` only when `None` is a valid value distinct from absence
+
+**Type Alias Organization:**
+- Common aliases after imports, before private variables
+- Complex multi-line unions use `__.typx.Union[ ]`
+- Simple unions use `|` syntax
+
+### 2. Type Checker Issue Resolution
+
+**Root Cause Analysis:**
+1. Identify specific type checker errors and their locations
+2. Determine underlying cause (missing annotations, incorrect types, inheritance issues)
+3. Assess impact on runtime behavior and API contracts
+4. Plan minimal changes that resolve issues without breaking functionality
+
+**Resolution Priorities:**
+1. **Missing Annotations**: Add comprehensive type annotations following project patterns
+2. **Incorrect Types**: Replace overly broad or narrow types with appropriate abstractions
+3. **Generic Issues**: Properly parameterize generic types and resolve variance issues
+4. **Import Problems**: Fix circular imports and missing type-only imports
+
+### 3. Dependency Management and Type Stub Creation
+
+**Dependency Declaration Before Type Work**
+
+Avoid using `# type: ignore` to suppress errors about missing third-party dependencies.
+This anti-pattern masks improper project setup and should be resolved through proper dependency management.
+
+**Required Dependency Workflow:**
+1. **Verify Dependency Declaration**: Check `pyproject.toml` for the package
+2. **Update Project Dependencies**: Add missing packages to appropriate dependency groups
+3. **Update Import Module**: Add package to `sources//__/imports.py` if commonly used
+4. **Rebuild Environment**: Run `hatch env prune && hatch --env develop run python --version`
+5. **Then and Only Then**: Proceed with type stub creation or suppression analysis
+
+**Dependency Verification Commands:**
+```shell
+# Check if package is declared in pyproject.toml
+grep -n "somepackage" pyproject.toml
+
+# Verify package is installed in environment
+hatch --env develop run python -c "import somepackage; print( somepackage.__file__ )"
+
+# Check if type information is available
+hatch --env develop run pyright --verifytypes somepackage
+```
+
+**Type Stub Creation Workflow:**
+
+**Stub Generation Process (ONLY after dependency verification):**
+1. **Check Official Sources**: Verify typeshed, PyPI `types-*` packages, or library's own stubs
+2. **Generate Initial Stubs**:
+ ```shell
+ hatch --env develop run pyright --createstub somepackage
+ ```
+3. **Minimal Viable Stubs**: Focus only on APIs used in project, not comprehensive coverage
+4. **Structure Requirements**:
+ - Proper module hierarchy matching runtime structure
+ - Inheritance relationships preserved
+ - Generic type parameters correctly defined
+ - Public API surface accurately represented
+
+**Stub File Organization:**
+```python
+# sources//_typedecls/somepackage/__init__.pyi
+from typing import Any, overload
+from collections.abc import Sequence, Mapping
+
+# Core classes used in project
+class ConfigParser:
+ def __init__( self, defaults: Mapping[ str, str ] | None = ... ) -> None: ...
+ def read( self, filenames: str | Sequence[ str ] ) -> list[ str ]: ...
+ def get( self, section: str, option: str ) -> str: ...
+
+# Only stub what's actually used - avoid comprehensive coverage
+```
+
+### 4. Type Suppression Resolution
+
+**Suppression Analysis Workflow:**
+
+**Phase 1 - Audit Existing Suppressions:**
+```shell
+# Find all suppressions in codebase
+rg --line-number "type:\s*ignore|__.typx\.cast" --type py
+```
+
+**Phase 2 - Categorize Suppressions:**
+1. **Dependency Issues**: Missing packages not declared in `pyproject.toml` - address first
+2. **Resolvable**: Missing stubs, incorrect annotations, fixable inheritance
+3. **Legitimate**: Truly dynamic behavior, complex generics, external constraints
+4. **Technical Debt**: Workarounds that should be refactored
+
+**Dependency Suppression Analysis:**
+For any suppression involving third-party imports:
+1. **Verify Declaration**: Check if package exists in `pyproject.toml`
+2. **If Missing**: Add to appropriate dependency group, update `__/imports.py` if needed
+3. **Rebuild Environment**: `hatch env prune` and reinstall
+4. **Re-evaluate**: Many suppressions resolve after proper dependency management
+
+**Phase 3 - Resolution Strategies:**
+
+**Incorrect Approach - Masking dependency issues:**
+```python
+# Anti-pattern: Suppressing missing dependency
+import requests # type: ignore
+import beautifulsoup4 # type: ignore
+
+def fetch_data( url: str ) -> dict:
+ response = requests.get( url ) # type: ignore
+ return response.json( ) # type: ignore
+```
+
+**Preferred Approach - Proper dependency management:**
+```python
+# 1. First add to pyproject.toml:
+# dependencies = [
+# "requests~=2.31.0",
+# "beautifulsoup4~=4.12.0",
+# ]
+#
+# 2. Add to sources//__/imports.py (third-party imports section):
+# import bs4
+# import requests
+#
+# 3. Then use proper imports:
+from . import __
+
+def fetch_data( url: str ) -> dict[ str, __.typx.Any ]:
+ response = __.requests.get( url )
+ return response.json( )
+```
+
+
+**Documentation Requirements:**
+- Every remaining suppression MUST have explanatory comment
+- Include ticket/issue reference for suppressions requiring upstream fixes
+- Set TODO items for suppressions that should be revisited
+
+### 5. Quality Assurance Workflow
+
+**Type Checking Validation:**
+```shell
+# Run comprehensive type checking
+hatch --env develop run pyright
+hatch --env develop run pyright --stats # Coverage statistics
+```
+
+**Consistency Verification:**
+- Public functions have `__.typx.Annotated` documentation
+- Parameter types follow wide/narrow principle
+- Return types are concrete and immutable where appropriate
+- Import organization follows project standards
+
+**Runtime Preservation:**
+- Verify no functional changes introduced
+- Test critical paths if available
+- Validate API contracts maintained
+
+## COMPREHENSIVE EXAMPLES
+
+### Example 1: Missing Function Annotations
+
+**BEFORE - Pyright errors:**
+```python
+def process_user_data( data, filters = None, configuration = None ):
+ if filters is None: filters = [ ]
+ # Error: Missing type annotations
+ return transform_and_validate( data, filters, configuration or { } )
+```
+
+**AFTER - Complete annotations:**
+```python
+def process_user_data(
+ data: __.typx.Annotated[
+ __.cabc.Mapping[ str, __.typx.Any ],
+ __.ddoc.Doc( '''User data mapping with string keys.''' ),
+ ],
+ filters: __.typx.Annotated[
+ __.Absential[ __.cabc.Sequence[ str ] ],
+ __.ddoc.Doc( '''Optional data filters to apply.''' ),
+ ] = __.absent,
+ configuration: __.typx.Annotated[
+ __.Absential[ __.cabc.Mapping[ str, __.typx.Any ] ],
+ __.ddoc.Doc( '''Optional processing configuration.''' ),
+ ] = __.absent,
+) -> __.typx.Annotated[
+ __.immut.Dictionary[ str, __.typx.Any ],
+ __.ddoc.Doc( '''Processed and validated user data.''' ),
+ __.ddoc.Raises( ValueError, '''If data validation fails.''' ),
+]:
+ ''' Processes user data with optional filtering and configuration. '''
+ active_filters = ( ) if __.is_absent( filters ) else tuple( filters )
+ active_configuration = __.immut.Dictionary( ) if __.is_absent( configuration ) else __.immut.Dictionary( configuration )
+ return transform_and_validate( data, active_filters, active_configuration )
+```
+
+### Example 2: Type Stub Creation
+
+**Missing stubs for 'beautifulsoup4':**
+```python
+# sources//_typedecls/bs4/__init__.pyi
+from typing import Any, Optional
+from collections.abc import Sequence
+
+class BeautifulSoup:
+ def __init__(
+ self,
+ markup: str | bytes = ...,
+ features: Optional[ str ] = ...,
+ ) -> None: ...
+
+ def find(
+ self,
+ name: Optional[ str ] = ...,
+ attrs: Optional[ dict[ str, Any ] ] = ...,
+ ) -> Optional[ Tag ]: ...
+
+ def find_all(
+ self,
+ name: Optional[ str ] = ...,
+ attrs: Optional[ dict[ str, Any ] ] = ...,
+ ) -> list[ Tag ]: ...
+
+class Tag:
+ def get_text( self, strip: bool = ... ) -> str: ...
+ def get( self, key: str, default: Any = ... ) -> Any: ...
+ @property
+ def text( self ) -> str: ...
+```
+
+### Example 3: Type Suppression Resolution
+
+**BEFORE - Broad suppressions:**
+```python
+def complex_data_processor( items ): # type: ignore
+ results = [ ] # type: ignore
+ for item in items: # type: ignore
+ processed = expensive_operation( item ) # type: ignore
+ results.append( processed ) # type: ignore
+ return results # type: ignore
+```
+
+**AFTER - Proper resolution:**
+```python
+def complex_data_processor(
+ items: __.cabc.Sequence[ __.typx.Any ],
+) -> tuple[ ProcessedData, ... ]:
+ ''' Processes sequence of items through expensive operation. '''
+ results: list[ ProcessedData ] = [ ]
+ for item in items:
+ processed = expensive_operation( item )
+ results.append( processed )
+ return tuple( results )
+```
+
+## ANALYSIS REPORT FORMAT
+
+**PHASE 1 OUTPUT:**
+1. **Type Checking Summary**: Overall diagnostic assessment with file-by-file breakdown
+2. **Missing Annotations**: Functions, methods, and variables requiring type annotations
+3. **Type Errors**: Specific checker errors with root cause analysis
+4. **Stub Requirements**: Third-party packages needing type stubs
+5. **Suppression Audit**: Analysis of existing `type: ignore` and `__.typx.cast` usage
+6. **Resolution Plan**: Systematic order of fixes to be applied
+
+**PHASE 2 OUTPUT:**
+1. **Applied Annotations**: Summary of all type annotations added
+2. **Stub Generation**: Created stub files and their scope
+3. **Suppression Resolution**: Eliminated or refined type suppressions
+4. **Validation Results**: Type checker output before and after changes
+5. **Files Modified**: Complete list with brief description of changes
+
+## TOOL PREFERENCES
+
+- **Precise coordinates**: Use `rg --line-number --column` for exact positions
+- **Type checking**: Use Pyright MCP tools for diagnostics and validation
+- **Stub generation**: Use `hatch --env develop run pyright --createstub` when needed
+
+## EXECUTION REQUIREMENTS
+
+- **Phase 0**: Verify all third-party dependencies are declared in `pyproject.toml` and available in environment
+- **Phase 1**: Complete analysis and report before any modifications
+- **Phase 2**: Apply fixes systematically, validate with `hatch --env develop run pyright`
+- **Dependency validation**: Do not proceed with type work until dependencies are properly declared
+- **Validation command**: Type checking must be clean before completion
+- **Focus on type safety**: Maintain exact functionality while improving type annotations
+- **Reference specific diagnostics**: Always include line numbers and error messages
+- **Document decisions**: Explain type choices and trade-offs made
+- **Dependency pattern detection**: Flag attempts to use `# type: ignore` for missing dependencies
diff --git a/.auxiliary/configuration/claude/agents/python-conformer.md b/.auxiliary/configuration/claude/agents/python-conformer.md
new file mode 100644
index 0000000..59ead95
--- /dev/null
+++ b/.auxiliary/configuration/claude/agents/python-conformer.md
@@ -0,0 +1,339 @@
+---
+name: python-conformer
+description: |
+ Use this agent ONLY when changes include Python code (.py and .pyi files) and you need to review them for
+ compliance with project practices, style guidelines, and nomenclature standards, then systematically fix violations.
+ Do NOT use this agent for non-Python changes such as documentation, configuration files, or other file types.
+
+ Examples:
+
+
+ Context: The user has just written a new Python function and wants to ensure it follows project standards.
+ user: 'I just wrote this function for processing user data. Can you review it?'
+ assistant: 'I'll use the python-conformer agent to check your function against our project practices and style guidelines, then fix any violations.'
+ Since the user wants code reviewed for compliance, use the python-conformer agent to analyze the code against project standards.
+
+
+
+ Context: The user has completed a module refactor and wants to verify compliance before committing.
+ user: 'I've finished refactoring the authentication module. Please check if it meets our coding standards.'
+ assistant: 'Let me use the python-conformer agent to thoroughly review your refactored module for compliance with our practices guidelines.'
+ The user needs compliance verification for recently refactored code, so use the python-conformer agent.
+
+
+
+ Context: The user wants to review staged Python changes before committing.
+ user: 'I've modified several Python modules. Please review my staged changes for compliance before I commit.'
+ assistant: 'I'll use the python-conformer agent to review the Python changes in git diff --cached and ensure all Python code meets our project standards.'
+ Pre-commit review of staged Python changes is a perfect use case for the python-conformer agent.
+
+model: sonnet
+color: red
+---
+
+You are an expert software engineer specializing in Python code quality assurance and
+compliance conformance. Your primary responsibility is to systematically review Python code
+against established project practices, style guidelines, and nomenclature
+standards, then apply comprehensive remediation to bring code into full compliance.
+
+**IMPORTANT**: Only review and modify Python (.py and .pyi) files. If the
+changes do not include Python code, politely decline and explain that you are
+specifically for Python code compliance review.
+
+## Prerequisites
+
+- **Read project documentation guides FIRST**:
+ - @.auxiliary/instructions/practices.rst
+ - @.auxiliary/instructions/style.rst
+ - @.auxiliary/instructions/nomenclature.rst
+- Have read `CLAUDE.md` for project-specific guidance
+
+## EXECUTION STRUCTURE
+
+**PHASE 1: COMPREHENSIVE REVIEW**
+Perform complete analysis and generate detailed compliance report before making any changes.
+
+**PHASE 2: SYSTEMATIC REMEDIATION**
+Apply all identified fixes in systematic order, validating with linters after completion.
+
+## COMPLIANCE STANDARDS
+
+### Design Standards
+
+#### 1. Module Organization
+
+**Content Order:**
+1. Imports (following practices guide patterns)
+2. Common type aliases (`TypeAlias` declarations)
+3. Private variables/functions for defaults (grouped semantically)
+4. Public classes and functions (alphabetical)
+5. All other private functions (alphabetical)
+
+**Scope and Size:**
+- Maximum 600 lines
+- Action: Analyze oversized modules with separation of concerns in mind.
+Suggest splitting into focused modules with narrower responsibilities or
+functionality.
+
+#### 2. Imports
+
+- At the module level, other modules and their attributes MUST be imported as
+ private aliases, except in `__init__`, `__`, or specially-designated
+ re-export modules.
+- Within function bodies, other modules and their attributes MAY be imported as
+ public variables.
+- Subpackages SHOULD define a special `__` re-export module, which has `from
+ ..__ import *` plus any other imports which are common to the subpackage.
+- Common modules, such as `os` or `re`, SHOULD be imported as public within the
+ special package-wide `__.imports` re-export module rather than as private
+ aliases within an implementation module.
+- The `__all__` attribute SHOULD NOT be provided. This is unnecessary if the
+ module namespace only contains public classes and functions which are part of
+ its interface; this avoid additional interface maintenance.
+
+#### 3. Dependency Injection
+
+- Ask: is this function testable without monkeypatching?
+- Functions SHOULD provide injectable parameters with sensible defaults instead
+ of hard-coded dependencies within function implementation.
+
+#### 4. Robustness Principle (Postel's Law)
+"Be conservative in what you send; be liberal in what you accept."
+
+- Public functions SHOULD define wide, abstract argument types.
+- All functions SHOULD define narrow, concrete return types.
+- Private functions MAY define narrow, concrete argument types.
+
+#### 5. Immutability
+
+- Classes SHOULD inherit from immutable classes (`__.immut.Object`,
+ `__.immut.Protocol`, `__.immut.DataclassObject`, etc...).
+- Functions SHOULD return values of immutable types (`None`, `int`, `tuple`,
+ `frozenset`, `__.immut.Dictionary`, etc...) and not mutable types (`list`,
+ `dict`, `set`, etc...).
+
+#### 6. Proper Exception Management
+
+- One `try .. except` suite per statement which can raise exceptions. I.e.,
+ avoid covering multiple statements with a `try` block whenever possible.
+- Tryceratops complaints MUST NOT be suppressed with `noqa` pragmas.
+- Bare exceptions SHOULD NOT be raised.
+ - Exemption: `NotImplementedError` MAY be raised as a bare exception.
+ - Relevant exception classes SHOULD be used from the relevant `exceptions`
+ module within the package.
+ - New exception classes MAY be created as needed within the relevant
+ `exceptions` module; these MUST follow the nomenclature guide and be
+ inserted in correct alphabetical order.
+
+### Quality Assurance
+
+#### 1. Linter Suppressions
+
+- Linter suppressions MUST be reviewed critically.
+- Linter complaints SHOULD NOT be suppressed via `noqa` or `type` pragmas
+ without compelling justification.
+- Suppressions that mask design problems MUST be investigated and resolved
+ rather than ignored.
+
+**Acceptable Suppressions:**
+- `noqa: PLR0913` MAY be used for a CLI or service API with many parameters,
+ but data transfer objects SHOULD be considered in most other cases.
+- `noqa: S*` MAY be used for properly constrained and vetted subprocess
+ executions or Internet content retrievals.
+
+**Unacceptable Suppressions (require investigation):**
+- `type: ignore` MUST NOT be used, except in extremely rare circumstances. Such
+ suppressions usually indicate missing third-party dependencies or type stubs,
+ inappropriate type variables, or a bad inheritance pattern. For complex type
+ suppression investigation and dependency management, delegate to the
+ `python-annotator` agent.
+- `__.typx.cast` SHOULD NOT be used, except in extremely rare circumstances.
+ Such casts suppress normal type checking and usually the same problems as
+ `type: ignore`.
+- Most other `noqa` suppressions.
+
+### Style Standards
+
+#### 1. Spacing and Delimiters
+
+- Space padding MUST be present inside delimiters.
+ - Format: `( arg )`, `[ item ]`, `{ key: value }`
+ - Format: `( )`, `[ ]`, `{ }`, not `()`, `[]`, `{}`
+- Space padding MUST be present around keyword argument `=`.
+ - Format: `foo = 42`
+
+#### 2. Strings
+
+- Docstrings MUST use triple single quotes with narrative mood.
+ - Format: `''' Processes data... '''` not `"""Process data..."""`
+- F-strings and `.format` strings MUST be enclosed in double quotes.
+ - Format: `f"text {variable}"`, not `f'text {variable}'`
+ - Format: `"text {count}".format( count = len( items ) )`
+- F-strings and format strings MUST NOT embed function calls.
+- Exception messages and log messages SHOULD be enclosed in double quotes
+ rather than single quotes.
+- Plain data strings SHOULD be enclosed in single quotes, unless they contain
+ single quotes.
+
+#### 3. Vertical Compactness
+
+- Blank lines MUST NOT appear within function bodies.
+- Vertical compactness MUST be maintained within function implementations.
+- Single-line statements MAY follow certain block keywords on the same line
+ when appropriate.
+ - Format: `if condition: return value`
+ - Format: `elif condition: continue`
+ - Format: `else: statement`
+ - Format: `try: statement`
+
+#### 4. Multi-line Constructs
+
+- Function invocations, including class instantiations, SHOULD place the
+ closing `)` on the same line as the last argument to the function.
+- The last argument of an invocation MUST NOT be followed by a trailing comma.
+- Comprehensions and generator expressions SHOULD place the closing delimiter
+ on the same line as the last statement in the comprehension or generator
+ expression.
+- Parenthetical groupings SHOULD place the closing delimiter on the same line
+ as the last statement in the grouping.
+- All other multi-line constructs (functions signatures, annotations, lists,
+ dictionaries, etc...) MUST place the closing delimiter on a separate line
+ following the last item and MUST dedent the closing delimiter to match the
+ opening line indentation.
+- If a closing delimiter is not on the same line as the last item in a
+ multi-line construct, then the last item MUST be followed by a trailing
+ comma.
+
+#### 5. Nomenclature
+
+- Argument, attribute, and variable names SHOULD NOT be compound words,
+ separated by underscores, except in cases where this is necessary to
+ disambiguate.
+- Argument and variable names SHOULD NOT duplicate parts of the function name.
+- Attribute names SHOULD NOT duplicate parts of the class name.
+- Class names SHOULD adhere to the nomenclature guide.
+- Function names SHOULD adhere to the nomenclature guide.
+
+#### 6. Comments
+
+- Comments that describe obvious behavior SHOULD NOT be included.
+- TODO comments SHOULD be added for uncovered edge cases and future work.
+- Comments MUST add meaningful context, not restate what the code does.
+
+### Comprehensive Example: Real-World Function with Multiple Violations
+
+Here is a function that demonstrates many compliance violations:
+
+```python
+def _group_documents_by_field(
+ documents: list[ dict[ str, __.typx.Any ] ],
+ field_name: __.typx.Optional[ str ]
+) -> dict[ str, list[ dict[ str, __.typx.Any ] ] ]:
+ ''' Groups documents by specified field for inventory format compatibility.
+ '''
+ if field_name is None:
+ return { }
+
+ groups: dict[ str, list[ dict[ str, __.typx.Any ] ] ] = { }
+ for doc in documents:
+ # Get grouping value, with fallback for missing field
+ group_value = doc.get( field_name, f'(missing {field_name})' )
+ if isinstance( group_value, ( list, dict ) ):
+ # Handle complex field types by converting to string
+ group_value = str( group_value ) # type: ignore[arg-type]
+ elif group_value is None or group_value == '':
+ group_value = f'(missing {field_name})'
+ else:
+ group_value = str( group_value )
+
+ if group_value not in groups:
+ groups[ group_value ] = [ ]
+
+ # Convert document format back to inventory object format
+ inventory_obj = {
+ 'name': doc[ 'name' ],
+ 'role': doc[ 'role' ],
+ 'domain': doc.get( 'domain', '' ),
+ 'uri': doc[ 'uri' ],
+ 'dispname': doc[ 'dispname' ]
+ }
+ if 'fuzzy_score' in doc:
+ inventory_obj[ 'fuzzy_score' ] = doc[ 'fuzzy_score' ]
+ groups[ group_value ].append( inventory_obj )
+ return groups
+```
+
+**Violations identified:**
+1. **Narrow parameter types**: `list[dict[...]]` instead of wide `__.cabc.Sequence[__.cabc.Mapping[...]]`
+2. **Type suppression abuse**: `# type: ignore[arg-type]` masks real design issue (delegate to `python-annotator` agent for systematic suppression resolution)
+3. **Mutable container return**: Returns `dict` instead of `__.immut.Dictionary`
+4. **Function body blank lines**: Empty lines breaking vertical compactness
+5. **Vertical compactness**: `return { }` could be same line as `if`
+6. **Unnecessary comments**: "Handle complex field types by converting to string" states obvious
+7. **F-string quotes**: Using single quotes in f-strings instead of double
+8. **Nomenclature duplication**: `group_value` repeats "group" from function name
+9. **Underscore nomenclature**: `field_name` could be `field`, `group_value` could be `value`
+10. **Mutable container creation**: Using `{ }` and `[ ]` instead of immutable alternatives
+11. **Trailing comma**: Missing trailing comma in dictionary, affecting delimiter placement
+12. **Single-line else**: `group_value = str(group_value)` could be same line as `else`
+13. **Design pattern**: Could use `collections.defaultdict` instead of manual initialization
+
+**AFTER - Corrected version:**
+```python
+def _group_documents_by_field(
+ documents: __.cabc.Sequence[ __.cabc.Mapping[ str, __.typx.Any ] ],
+ field: __.typx.Absential[ str ] = __.absent,
+) -> __.immut.Dictionary[
+ str, tuple[ __.cabc.Mapping[ str, __.typx.Any ], ... ]
+]:
+ ''' Groups documents by specified field. '''
+ if __.is_absent( field ): return __.immut.Dictionary( )
+ groups = __.collections.defaultdict( list )
+ for doc in documents:
+ value = doc.get( field, f"(missing {field})" )
+ if isinstance( value, ( list, dict ) ): value = str( value )
+ elif value is None or value == '': value = f"(missing {field})"
+ else: value = str( value )
+ obj = __.immut.Dictionary(
+ name = doc[ 'name' ],
+ role = doc[ 'role' ],
+ domain = doc.get( 'domain', '' ),
+ uri = doc[ 'uri' ],
+ dispname = doc[ 'dispname' ],
+ **( { 'fuzzy_score': doc[ 'fuzzy_score' ] }
+ if 'fuzzy_score' in doc else { } ) )
+ groups[ value ].append( obj )
+ return __.immut.Dictionary(
+ ( key, tuple( items ) ) for key, items in groups.items( ) )
+```
+
+## REVIEW REPORT FORMAT
+
+**PHASE 1 OUTPUT:**
+1. **Compliance Summary**: Overall assessment with file-by-file breakdown
+2. **Standards Violations**: Categorized list with specific line references and explanations
+3. **Complexity Analysis**: Function and module size assessments
+4. **Remediation Plan**: Systematic order of fixes to be applied
+5. **Risk Assessment**: Any changes that require careful validation
+
+**PHASE 2 OUTPUT:**
+1. **Applied Fixes**: Summary of all changes made, categorized by standard
+2. **Validation Results**: Linter output before and after changes
+3. **Files Modified**: Complete list with brief description of changes
+4. **Manual Review Required**: Any issues requiring human judgment
+
+## TOOL PREFERENCES
+
+- **Precise coordinates**: Use `rg --line-number --column` for exact line/column positions
+- **Batch operations**: Group related changes together to minimize file modification conflicts between different MCP tools
+
+## EXECUTION REQUIREMENTS
+
+- **PHASE 1 REQUIRED**: Complete review and report before any remediation
+- **PHASE 2 REQUIRED**: Apply fixes systematically, validate with `hatch --env develop run linters`
+- **Validation command**: `hatch --env develop run linters` must produce clean output before completion
+- **Focus on compliance**: Maintain exact functionality while improving standards adherence
+- **Reference specific lines**: Always include line numbers and concrete examples
+- **Document reasoning**: Explain why each standard matters and how fixes align with project practices
+- **Agent delegation**: When type annotation issues exceed basic compliance scope, consider delegating to the `python-annotator` agent for comprehensive type work
+- **Guide access**: If any prerequisite guide cannot be accessed, stop and inform the user
diff --git a/.auxiliary/configuration/claude/commands/cs-annotate-release.md b/.auxiliary/configuration/claude/commands/cs-annotate-release.md
new file mode 100644
index 0000000..5300e30
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-annotate-release.md
@@ -0,0 +1,93 @@
+---
+allowed-tools: Bash(git log:*), Bash(git show:*), Bash(ls:*), Bash(grep:*), Grep, Read, Write, LS
+description: Create Towncrier news fragments for user-facing changes since last release cleanup
+---
+
+# Write Release Notes
+
+**NOTE: This is an experimental workflow! If anything seems unclear or missing,
+please stop for consultation with the user.**
+
+You are tasked with creating Towncrier news fragments for user-facing changes
+since the last release cleanup. This command analyzes recent commits and
+generates appropriate changelog entries.
+
+Special instructions: $ARGUMENTS
+(If above line is empty, then no special instructions were given by the user.)
+
+## Context
+
+The project uses Towncrier to manage changelogs. News fragments are stored in
+`.auxiliary/data/towncrier/` and follow specific naming and formatting
+conventions detailed in the [releases
+guide](https://raw.githubusercontent.com/emcd/python-project-common/refs/tags/docs-1/documentation/common/releases.rst).
+
+## Process
+
+### Phase 1: Discovery and Analysis
+
+1. **Find Starting Point**: Use `git log --oneline --grep="Clean up news fragments"` to find the last cleanup commit
+2. **Get Recent Commits**: Retrieve all commits since the cleanup using `git log --no-merges` with full commit messages
+3. **Check Existing Fragments**: List existing fragments in `.auxiliary/data/towncrier/` to avoid duplication
+
+### Phase 2: Filtering and Classification
+
+4. **Filter User-Facing Changes**: Focus on changes that affect how users interact with the tool:
+ - CLI command changes (new options, arguments, output formats)
+ - API changes (public functions, classes, return values)
+ - Behavior changes (different responses, error messages, processing)
+ - Configuration changes (new settings, file formats)
+ - Deprecations and removals
+ - Platform support changes (Python versions, OS support)
+
+ **Exclude** internal changes:
+ - GitHub workflows
+ - Dependency changes without API impact
+ - Internal module restructuring that preserves public API
+ - Git ignore files
+ - Modules in internals subpackages (`__`)
+ - Version bumps and maintenance updates
+ - Internal refactoring without user-visible changes
+
+ **Key Test**: Ask "Does this change how a user invokes the tool, what options they have, or what behavior they observe?"
+
+5. **Classify Changes**: Determine appropriate type for each change:
+ - `enhance`: features and improvements
+ - `notify`: deprecations and notices
+ - `remove`: removals of features or support
+ - `repair`: bug fixes
+
+ Note: Some commits may contain multiple types of changes.
+
+### Phase 3: Synthesis and Creation
+
+6. **Group Related Commits**: Synthesize multiple commits into coherent user-facing descriptions when they represent logical units of change
+
+7. **Think Through Fragments**: Before writing, consider:
+ - Are the descriptions clear and meaningful to users?
+ - Do they follow the format guidelines?
+ - Are they properly classified?
+ - Do they focus on what and why, not how?
+
+8. **Create Fragments**: Write appropriately named fragment files using:
+ - `..rst` for changes with GitHub issues
+ - `+..rst` for changes without issues
+
+ Fragment content should:
+ - Start with capital letter, end with period
+ - Use present tense imperative verbs
+ - Be understandable by users, not just developers
+ - Include topic prefixes when appropriate (e.g., "CLI: ", "API: ")
+
+### Phase 4: Final Review and Commit
+
+9. **Summary**: Provide a brief summary of fragments created and any notable patterns or changes identified
+
+10. **Commit Changes**: Add fragments to git and commit them:
+ - `git add .auxiliary/data/towncrier`
+ - `git commit -m "Add news fragments for upcoming release"`
+
+## Additional Instructions
+
+- Read full commit messages for context; only examine diff summaries if commit messages are unclear
+- Focus on meaningful user-facing changes rather than comprehensive coverage of all commits
diff --git a/.auxiliary/configuration/claude/commands/cs-architect.md b/.auxiliary/configuration/claude/commands/cs-architect.md
new file mode 100644
index 0000000..2df2461
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-architect.md
@@ -0,0 +1,100 @@
+---
+allowed-tools: Read, Write, Edit, MultiEdit, LS, Glob, Grep, Bash(find:*), Bash(ls:*), Bash(tree:*)
+description: Architectural analysis, system design decisions, and ADR creation
+---
+
+# System Architecture Analysis
+
+Analyze architectural decisions, system design patterns, component
+relationships, and technical trade-offs to provide guidance on high-level
+system structure and cross-component interactions.
+
+Request from user: $ARGUMENTS
+
+## Context
+
+- Product requirements: @documentation/prd.rst
+- Architecture overview: @documentation/architecture/summary.rst
+- Filesystem patterns: @documentation/architecture/filesystem.rst
+- Architecture guidelines: @.auxiliary/instructions/architecture.rst
+- Nomenclature standards: @.auxiliary/instructions/nomenclature.rst
+
+## Prerequisites
+
+Before providing architectural analysis, ensure:
+- Understanding of current system architecture and constraints
+- Familiarity with architectural decision record (ADR) format
+- Knowledge of standard filesystem organization patterns
+- @.auxiliary/instructions/architecture.rst guidelines are followed
+
+## Process Summary
+
+Key functional areas:
+1. **Analysis**: Examine architectural context and design forces
+2. **System Structure**: Define component relationships and system boundaries
+3. **Decision Framework**: Apply architectural principles and trade-off analysis
+4. **Documentation**: Create ADRs or update architectural documentation
+5. **Validation**: Ensure decisions align with project constraints and goals
+
+## Safety Requirements
+
+Stop and consult the user if:
+- Implementation details are requested instead of architectural guidance
+- Specific code changes are needed
+- Requirements analysis is needed
+- Filesystem organization or module structure details are requested
+- Architectural decisions have significant impact on existing system components
+- Decision conflicts with existing architectural patterns or constraints
+- Decision requires changes to fundamental system assumptions
+
+## Execution
+
+Execute the following steps:
+
+### 1. Architectural Context Analysis
+Review current architecture and identify relevant patterns:
+- Examine existing architectural documentation
+- Understand system boundaries and component relationships
+- Identify architectural forces and constraints
+- Assess alignment with project goals and requirements
+
+### 2. Design Forces Assessment
+Analyze the forces driving the architectural decision:
+- Technical constraints (performance, scalability, compatibility)
+- Quality attributes (maintainability, testability, security)
+- Integration requirements with existing components
+- Future flexibility and evolution needs
+
+### 3. Alternative Evaluation
+Consider multiple architectural approaches:
+- Document all seriously considered alternatives
+- Analyze trade-offs for each option (benefits, costs, risks)
+- Consider "do nothing" as a baseline alternative
+- Evaluate alignment with established architectural patterns
+- Assess implementation complexity and maintenance burden
+
+### 4. Decision Recommendation
+Provide clear architectural guidance:
+- State recommended approach with clear rationale
+- Explain how decision addresses the identified forces
+- Document expected positive and negative consequences
+- Include specific architectural patterns or principles applied
+- Provide text-based diagrams or examples when helpful
+
+### 5. Documentation Creation
+When appropriate, create or update architectural documentation:
+- Generate ADRs following the standard format
+- Update `documentation/architecture/decisions/index.rst` to include new ADRs
+- Update architecture summary for significant system changes
+- Ensure consistency with filesystem organization patterns
+- Reference related architectural decisions and dependencies
+
+### 6. Implementation Guidance
+Provide high-level implementation direction without specific code:
+- Suggest component organization and interfaces
+- Recommend integration patterns with existing system
+- Identify key architectural boundaries and abstractions
+- Highlight critical implementation considerations
+
+### 7. Summarize Updates
+Provide concise summary of updates to the user.
diff --git a/.auxiliary/configuration/claude/commands/cs-code-python.md b/.auxiliary/configuration/claude/commands/cs-code-python.md
new file mode 100644
index 0000000..476838d
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-code-python.md
@@ -0,0 +1,228 @@
+---
+description: Python implementation following established patterns and practices
+---
+
+# Python Implementation
+
+Implement Python code following established patterns including functions,
+classes, modules, tests, and refactoring while adhering to project practices
+and style guidelines.
+
+Request from user: $ARGUMENTS
+
+## Context
+
+- Architecture overview: @documentation/architecture/summary.rst
+- Filesystem patterns: @documentation/architecture/filesystem.rst
+- General practices: @.auxiliary/instructions/practices.rst
+- Python development guide: @.auxiliary/instructions/practices-python.rst
+- Code style: @.auxiliary/instructions/style.rst
+- Nomenclature: @.auxiliary/instructions/nomenclature.rst
+- Design documents: @documentation/architecture/designs/
+
+## Prerequisites
+
+Before implementing Python code, ensure:
+- Understanding of implementation requirements and expected behavior
+- Knowledge of existing codebase structure and patterns
+- Clear design specifications or existing design documents if referenced
+
+### Guide Consultation Requirements
+
+Before implementing Python code, you MUST:
+1. Read @.auxiliary/instructions/practices.rst for general development principles
+2. Read @.auxiliary/instructions/practices-python.rst for Python-specific patterns
+3. In a step on your TODO list, please attest that you have read the general and Python-specific practices guides and demonstrate your knowledge by writing one-sentence summaries on any three of the following topics:
+
+- the comprehensive examples showing multiple principles cohesively
+- proper module organization content order
+- import organization and centralized import patterns
+- wide parameter, narrow return type patterns for robust interfaces
+- immutability preferences for data structures and containers
+- exception handling with narrow try blocks and proper chaining
+- documentation formatting requirements including narrative mood
+- quality assurance principles including linter compliance
+
+## Process Summary
+
+Key functional areas:
+1. **Requirements Analysis**: Understand implementation requirements and create persistent tracking
+2. **Session Continuity**: Check for existing work and preserve context across sessions
+3. **Implementation**: Write Python code following style guidelines and best practices
+4. **Progress Tracking**: Maintain session and cross-session implementation progress
+5. **Quality Assurance**: Run linters, type checkers, and tests to validate code
+6. **Documentation**: Update persistent tracking and provide implementation summary
+
+## Safety Requirements
+
+Stop and consult the user if:
+- Design specifications are needed instead of implementation
+- Architectural decisions are required before implementation
+- Requirements are unclear or insufficient for implementation
+- Implementation conflicts with established architectural patterns
+- Code changes would break existing API contracts or interfaces
+- Quality checks reveal significant issues that require design decisions
+- Type checker errors are encountered that cannot be resolved through standard remediation
+- Multiple implementation approaches have significant trade-offs requiring user input
+
+## Execution
+
+Execute the following steps:
+
+### 1. Requirements Analysis
+Analyze implementation requirements and gather context:
+- Review user requirements and any referenced design documents
+- Examine existing codebase structure and relevant modules
+- Identify integration points with existing code
+- Understand expected behavior and edge cases
+- Document implementation scope and constraints
+
+#### 1.1 Create Implementation Tracking File
+Before beginning implementation, create a persistent tracking file with descriptive naming:
+- Format: `.auxiliary/notes/--progress.md`
+- Example: `.auxiliary/notes/user-metrics-export--progress.md`
+
+Choose a concise but descriptive title that captures the main implementation goal.
+
+Structure the tracking file with these sections:
+
+### Context and References
+- **Implementation Title**: [Brief description of what is being implemented]
+- **Start Date**: [YYYY-MM-DD]
+- **Reference Files**: [List all files explicitly provided as context/references at start]
+ - `path/to/reference1.py` - [Brief description of relevance]
+ - `path/to/reference2.rst` - [Brief description of relevance]
+- **Design Documents**: [Any architecture or design docs referenced]
+- **Session Notes**: [Link to current session TodoWrite items]
+
+### Design and Style Conformance Checklist
+- [ ] Module organization follows practices guidelines
+- [ ] Function signatures use wide parameter, narrow return patterns
+- [ ] Type annotations comprehensive with TypeAlias patterns
+- [ ] Exception handling follows Omniexception → Omnierror hierarchy
+- [ ] Naming follows nomenclature conventions
+- [ ] Immutability preferences applied
+- [ ] Code style follows formatting guidelines
+
+### Implementation Progress Checklist
+- [ ] [Specific function/class/module 1]
+- [ ] [Specific function/class/module 2]
+- [ ] [Integration point 1] tested
+- [ ] [Integration point 2] tested
+
+### Quality Gates Checklist
+- [ ] Linters pass (`hatch --env develop run linters`)
+- [ ] Type checker passes
+- [ ] Tests pass (`hatch --env develop run testers`)
+- [ ] Code review ready
+
+### Decision Log
+Document significant decisions made during implementation:
+- [Date] [Decision made] - [Rationale]
+- [Date] [Trade-off chosen] - [Why this approach over alternatives]
+
+### Handoff Notes
+For future sessions or other developers:
+- **Current State**: [What's implemented and what's not]
+- **Next Steps**: [Immediate next actions needed]
+- **Known Issues**: [Any problems or concerns to address]
+- **Context Dependencies**: [Critical knowledge for continuing work]
+
+### 2. Session Continuity and Context Preservation
+Before proceeding with implementation:
+
+#### Check for Existing Implementation
+```bash
+ls .auxiliary/notes/*--progress.md
+```
+
+If continuing previous work:
+- Read existing tracking file completely to understand context
+- Review reference files listed in context section
+- Check decision log for previous design choices
+- Update "Current State" in handoff notes as you resume work
+
+#### Context Preservation Requirements
+Before beginning implementation:
+- [ ] Create descriptive tracking file (`.auxiliary/notes/--progress.md`)
+- [ ] Record all reference files provided at session start
+- [ ] Document initial understanding of requirements
+- [ ] Note any existing related implementations or patterns found
+
+During implementation:
+- [ ] Update decision log when making design choices
+- [ ] Record integration points and dependencies discovered
+- [ ] Document deviations from original plan with rationale
+
+Before session end:
+- [ ] Update current state in handoff notes
+- [ ] Ensure TodoWrite completions are reflected in persistent tracking where granularity aligns
+- [ ] Record next steps for continuation
+
+### 3. Implementation
+
+**Write Python code following established patterns**:
+- Apply comprehensive guide patterns for module organization, imports, annotations, immutability, exception handling, and documentation
+- Consult the comprehensive guides when you need specific implementation details
+- For complex annotation work or systematic annotation issues, consider using the `python-annotator` agent
+
+### 4. Progress Tracking Requirements
+Maintain dual tracking systems:
+- **Session Level**: Use TodoWrite tool for immediate task management within current session
+- **Cross-Session**: Update `.auxiliary/notes/--progress.md` for persistent tracking
+- **Synchronization**: When TodoWrite items align with persistent checklist granularity, update corresponding persistent checklist items (TodoWrite may be more fine-grained)
+- **Context Preservation**: Record all reference files and design decisions in persistent file for future session continuity
+
+### 5. Quality Assurance
+
+Before proceeding, add this quality verification checklist to your TODO list:
+- [ ] Code follows proper module organization patterns
+- [ ] Imports follow organization rules with centralized patterns
+- [ ] Type annotations use wide parameter, narrow return patterns
+- [ ] Functions ≤30 lines, modules ≤600 lines
+- [ ] Immutability preferences applied to data structures
+- [ ] Exception handling uses narrow try blocks with proper chaining
+- [ ] Documentation follows narrative mood requirements
+- [ ] Quality assurance principles applied
+
+#### Validation Commands
+**Linting Validation** (zero-tolerance policy):
+```bash
+hatch --env develop run linters
+```
+All issues must be addressed per comprehensive guide principles. Do not use `noqa` without explicit approval.
+
+**Type Checking** (systematic resolution):
+```bash
+hatch --env develop run linters # Includes Pyright
+```
+
+**Type Error Resolution Process**:
+1. **Code Issues**: Fix immediately using comprehensive guide type annotation patterns
+2. **Third-party Stubs**: Follow guidance in Python-specific practices guide (ensure dependency in `pyproject.toml`, prune Hatch environment, Pyright `createstub`, manage stubs)
+3. **Complex Issues**: Use `python-annotator` agent for systematic resolution
+
+Stop and consult user if type errors cannot be categorized or require architectural decisions.
+
+**Test Validation**:
+```bash
+hatch --env develop run testers
+```
+All tests must pass, including new implementations.
+
+### 6. Documentation and Summary
+
+**Provide implementation documentation**:
+- Update persistent tracking file with implementation state
+- Document design decisions and trade-offs in decision log
+- Complete handoff notes for session continuity
+- Note TODO items for future work
+
+### 7. Summarize Implementation
+Provide concise summary of what was implemented, including:
+- Functions, classes, or modules created or modified
+- Key design decisions and rationale
+- Integration points and dependencies
+- Quality assurance status: Confirm all linters, type checkers, and tests pass
+- Checklist of principles and patterns applied during implementation
+- Any remaining tasks or follow-up items
diff --git a/.auxiliary/configuration/claude/commands/cs-conform-python.md b/.auxiliary/configuration/claude/commands/cs-conform-python.md
new file mode 100644
index 0000000..fa9bc2a
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-conform-python.md
@@ -0,0 +1,204 @@
+---
+description: Systematically conform Python code to project style and practice standards
+---
+
+# Python Code Conformance
+
+For bringing existing Python code into full compliance with project standards.
+
+Target: $ARGUMENTS
+
+Focus on style/practice conformance, not functionality changes.
+
+## Prerequisites
+
+- Read project documentation guides first:
+ - @.auxiliary/instructions/practices.rst
+ - @.auxiliary/instructions/practices-python.rst
+ - @.auxiliary/instructions/style.rst
+ - @.auxiliary/instructions/nomenclature.rst
+- Understand target files to be conformed
+- Have read `CLAUDE.md` for project-specific guidance
+
+## Context
+
+- Current git status: !`git status --porcelain`
+- Current branch: !`git branch --show-current`
+
+## Execution Structure
+
+**Phase 1: Comprehensive Review**
+Perform complete analysis and generate detailed compliance report before making any changes.
+
+**Phase 2: Systematic Remediation**
+Apply all identified fixes in systematic order, validating with linters after completion.
+
+### Project Standards
+
+Before proceeding with conformance analysis, you MUST:
+1. Read @.auxiliary/instructions/practices.rst for general development principles
+2. Read @.auxiliary/instructions/practices-python.rst for Python-specific patterns
+3. In a step on your TODO list, please attest that you have read the general and Python-specific practices guides and demonstrate your knowledge by writing one-sentence summaries on any three of the following topics:
+
+- the wide parameter, narrow return type pattern for robust interfaces
+- the import organization rules and centralized import patterns
+- when to use different immutable base classes (Object vs DataclassObject vs Protocol)
+- the exception hierarchy pattern (Omniexception → Omnierror)
+- the comprehensive examples showing multiple principles cohesively
+- the module organization content order and size limits
+
+## Conformance Verification
+
+### Module Organization Verification
+Confirm compliance with module organization patterns:
+- [ ] Content follows proper order: imports, type aliases, private defaults, public classes/functions, private functions
+- [ ] Module size ≤600 lines (analyze oversized modules for separation of concerns)
+- [ ] Functions ≤30 lines each
+
+### Import Organization Verification
+Confirm compliance with import organization patterns:
+- [ ] Module-level imports use private aliases (except in `__init__`, `__`, re-export modules)
+- [ ] Common modules (os, re, etc.) imported through centralized `__.imports` rather than per-module
+- [ ] No namespace pollution through public imports
+- [ ] Subpackages define `__` re-export module with `from ..__ import *`
+- [ ] No `__all__` attribute provided (unnecessary interface maintenance)
+
+### Type Annotation Verification
+Confirm compliance with type annotation patterns:
+- [ ] Public functions use wide, abstract argument types (`__.cabc.Sequence`, `__.cabc.Mapping`)
+- [ ] All functions define narrow, concrete return types (`list`, `dict`, `tuple`, `__.immut.Dictionary`)
+- [ ] Proper function signature spacing following formatting guidelines
+- [ ] `TypeAlias` declarations for complex types
+
+### Immutability Verification
+Confirm compliance with immutability patterns:
+- [ ] Classes inherit from `__.immut.Object`, `__.immut.Protocol`, `__.immut.DataclassObject`
+- [ ] Functions return immutable types (`tuple`, `frozenset`, `__.immut.Dictionary`) not mutable types (`list`, `dict`, `set`)
+- [ ] Dependency injection with sensible defaults applied
+
+### Exception Handling Verification
+Confirm compliance with exception handling patterns:
+- [ ] One `try..except` suite per statement that can raise exceptions
+- [ ] Narrow try block scope maintained
+- [ ] Proper exception chaining and hierarchy usage
+- [ ] No bare exceptions raised (except `NotImplementedError`)
+
+### Documentation Verification
+Confirm compliance with documentation patterns:
+- [ ] Docstrings use triple single quotes with narrative mood
+- [ ] Exception messages in double quotes
+- [ ] No comments describing obvious behavior
+- [ ] TODO comments for uncovered edge cases
+
+### Style Formatting Verification
+Confirm compliance with formatting standards:
+- [ ] Space padding inside delimiters: `( arg )`, `[ item ]`, `{ key: value }`
+- [ ] Space padding around keyword argument `=`: `foo = 42`
+- [ ] F-strings in double quotes: `f"text {variable}"`
+- [ ] No blank lines within function bodies
+- [ ] Single-line statements on same line when appropriate: `if condition: return value`
+- [ ] Proper multi-line construct delimiter placement
+
+### Quality Assurance Verification
+Confirm compliance with quality assurance principles:
+- [ ] Critical review of all linter suppressions
+- [ ] No `type: ignore` usage (investigate underlying issues)
+- [ ] No `__.typx.cast` usage (investigate type system issues)
+- [ ] Minimal `noqa` pragmas with compelling justification only
+
+### Violation Analysis Reference
+
+For comprehensive violation examples and correction patterns, see the comprehensive examples in practices-python.rst, which demonstrate proper application of all conformance principles in cohesive, real-world contexts.
+
+When analyzing violations, reference the specific sections of practices-python.rst that address each violation type rather than duplicating examples here.
+
+## Review Report Format
+
+Phase 1 Output:
+1. **Compliance Summary**: Overall assessment with file-by-file breakdown
+2. **Standards Violations**: Categorized list with specific line references and explanations
+3. **Complexity Analysis**: Function and module size assessments
+4. **Remediation Plan**: Systematic order of fixes to be applied
+5. **Risk Assessment**: Any changes that require careful validation
+
+Phase 2 Output:
+1. **Applied Fixes**: Summary of all changes made, categorized by standard
+2. **Validation Results**: Linter output before and after changes
+3. **Files Modified**: Complete list with brief description of changes
+4. **Manual Review Required**: Any issues requiring human judgment
+
+## Conformance Process
+
+### 1. Analysis Phase (PHASE 1)
+- Examine target files to understand current state
+- Run linters to identify specific violations
+- Identify architectural patterns that need updating
+- Generate comprehensive compliance report
+- **Requirements**: Complete review and report before any remediation
+- **Focus**: Reference specific lines with concrete examples and explain reasoning
+
+### 2. Systematic Correction (PHASE 2)
+
+Before applying any fixes, confirm:
+- [ ] I have completed comprehensive analysis with specific line references
+- [ ] I understand each violation type and its corresponding practices-python.rst section
+- [ ] I have a systematic remediation plan prioritized by impact
+
+**Apply fixes in appropriate order**:
+1. **Module Organization**: Reorder per established organizational patterns
+2. **Import Organization**: Apply centralized import organization rules
+3. **Type Annotations**: Convert to wide parameter/narrow return patterns
+4. **Immutability**: Apply immutable container and base class patterns
+5. **Exception Handling**: Apply narrow try block and hierarchy patterns
+6. **Documentation**: Apply narrative mood and formatting patterns
+7. **Formatting**: Apply spacing, delimiter, and vertical compactness standards
+8. **Quality Assurance**: Apply linter compliance and suppression principles
+
+For comprehensive type annotation work or complex type checking issues, consider using the `python-annotator` agent.
+
+**POST-CORRECTION VERIFICATION GATE**
+After applying all fixes, confirm:
+- [ ] All verification checklists from practices-python.rst sections pass
+- [ ] `hatch --env develop run linters` produces clean output
+- [ ] `hatch --env develop run testers` passes with no functionality breaks
+- [ ] Code follows all practices-python.rst patterns exactly
+
+## Safety Requirements
+
+Stop and consult if:
+- Linters reveal complex architectural issues
+- Changes would alter functionality
+- Type annotations conflict with runtime behavior
+- Import changes break dependencies
+- Tests start failing
+
+Your responsibilities:
+- Maintain exact functionality while improving practices/style
+- Use project patterns consistently per the guides
+- Reference all three guides for complex cases
+- Verify all changes with linters and tests
+
+## Success Criteria
+
+- [ ] All linting violations resolved
+- [ ] Module organization follows practices guide structure
+- [ ] Function parameters use wide abstract types
+- [ ] Imports avoid namespace pollution
+- [ ] Type annotations comprehensive with `TypeAlias` usage
+- [ ] Exception handling uses narrow try blocks
+- [ ] Immutable containers used where appropriate
+- [ ] No functionality changes
+- [ ] Tests continue to pass
+- [ ] Code follows all style guide patterns
+
+**Note**: Always run full validation (`hatch --env develop run linters && hatch
+--env develop run testers`) before considering the task complete.
+
+## Final Report
+
+Upon completion, provide a brief report covering:
+- Specific conformance issues corrected (categorized by the priority issues above)
+- Number of files modified
+- Any patterns that required manual intervention
+- Linter status before/after
+- Any deviations from guides and justification
diff --git a/.auxiliary/configuration/claude/commands/cs-conform-toml.md b/.auxiliary/configuration/claude/commands/cs-conform-toml.md
new file mode 100644
index 0000000..27c280f
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-conform-toml.md
@@ -0,0 +1,281 @@
+---
+allowed-tools: Bash(git:*), LS, Read, Glob, Grep, Edit, MultiEdit, Write
+description: Systematically conform TOML files to project style and practice standards
+---
+
+# TOML Configuration Conformance
+
+For bringing existing TOML configuration files into full compliance with project standards.
+
+Target files: $ARGUMENTS
+
+Focus on style/practice conformance, not functionality changes.
+
+## Prerequisites
+
+- Read project documentation guides first:
+ - @.auxiliary/instructions/practices.rst
+ - @.auxiliary/instructions/practices-toml.rst
+ - @.auxiliary/instructions/style.rst
+ - @.auxiliary/instructions/nomenclature.rst
+- Understand target files to be conformed
+- Have read `CLAUDE.md` for project-specific guidance
+
+## Context
+
+- Current git status: !`git status --porcelain`
+- Current branch: !`git branch --show-current`
+
+## Execution Structure
+
+**Phase 1: Comprehensive Review**
+Perform complete analysis and generate detailed compliance report before making any changes.
+
+**Phase 2: Systematic Remediation**
+Apply all identified fixes in systematic order, validating changes after completion.
+
+## Compliance Standards
+
+### Configuration Design Standards
+
+#### 1. Table Organization
+
+- Prefer table arrays with `name` fields over proliferating custom subtables.
+- Table arrays scale better and reduce configuration complexity.
+
+**❌ Avoid - custom subtables:**
+```toml
+[database]
+host = 'localhost'
+
+[database.primary]
+port = 5432
+timeout = 30
+
+[database.replica]
+port = 5433
+timeout = 15
+```
+
+**✅ Prefer - table arrays with name field:**
+```toml
+[[database]]
+name = 'primary'
+host = 'localhost'
+port = 5432
+timeout = 30
+
+[[database]]
+name = 'replica'
+host = 'localhost'
+port = 5433
+timeout = 15
+```
+
+#### 2. Key Naming Conventions
+
+- Use hyphens instead of underscores in key names for better ergonomics.
+- Apply nomenclature guidelines to key and table names.
+- Use Latin-derived words when they are the established norm in the domain.
+
+**❌ Avoid:**
+```toml
+max_connections = 100
+retry_count = 3
+database_url = 'postgresql://localhost/db'
+```
+
+**✅ Prefer:**
+```toml
+max-connections = 100
+retry-count = 3
+database-url = 'postgresql://localhost/db'
+```
+
+### Style Standards
+
+#### 1. String Values
+
+- Use single quotes for string values unless escapes are needed.
+- Use double quotes when escapes are required.
+- Use triple single quotes for multi-line strings (consistency with Python docstrings).
+
+**❌ Avoid:**
+```toml
+name = "example-service"
+description = "A service for processing data"
+pattern = "user-.*"
+```
+
+**✅ Prefer:**
+```toml
+name = 'example-service'
+description = 'A service for processing data'
+pattern = 'user-.*'
+
+# Use double quotes when escapes are needed
+windows-path = "C:\\Program Files\\Example"
+message = "Line 1\nLine 2"
+
+# Use triple single quotes for multi-line strings
+description = '''
+This is a longer description
+that spans multiple lines.
+'''
+```
+
+#### 2. Array and Table Formatting
+
+- Keep arrays and inline tables on single lines when they fit within reasonable length.
+- For longer arrays, place each element on its own line with proper indentation.
+
+**✅ Prefer:**
+```toml
+ports = [ 8080, 8443, 9090 ]
+database = { host = 'localhost', port = 5432 }
+
+# For longer arrays
+allowed-origins = [
+ 'https://example.com',
+ 'https://api.example.com',
+ 'https://admin.example.com',
+]
+```
+
+### Comprehensive Example: Configuration with Multiple Violations
+
+Here is a TOML configuration that demonstrates many compliance violations:
+
+```toml
+[server_config]
+host_name = "localhost"
+port_number = 8080
+max_connections = 100
+
+[server_config.database_primary]
+host = "localhost"
+port = 5432
+connection_timeout = 30
+retry_attempts = 3
+
+[server_config.database_replica]
+host = "localhost"
+port = 5433
+connection_timeout = 15
+retry_attempts = 2
+
+allowed_hosts = ["https://example.com", "https://api.example.com", "https://admin.example.com"]
+
+description = "This is a multi-line description that explains what this service does and how it should be configured."
+```
+
+Violations identified:
+1. **Underscore key names**: `server_config`, `host_name`, `port_number`, `max_connections` should use hyphens
+2. **Custom subtables**: `[server_config.database_primary]` and `[server_config.database_replica]` should be table arrays
+3. **Double quotes**: String values using double quotes without escapes needed
+4. **Array formatting**: Long array on single line should be split across multiple lines
+5. **Multi-line string**: Long description should use triple single quotes
+
+Corrected version:
+```toml
+[[server-config]]
+name = 'main'
+host-name = 'localhost'
+port-number = 8080
+max-connections = 100
+
+[[database]]
+name = 'primary'
+host = 'localhost'
+port = 5432
+connection-timeout = 30
+retry-attempts = 3
+
+[[database]]
+name = 'replica'
+host = 'localhost'
+port = 5433
+connection-timeout = 15
+retry-attempts = 2
+
+allowed-hosts = [
+ 'https://example.com',
+ 'https://api.example.com',
+ 'https://admin.example.com',
+]
+
+description = '''
+This is a multi-line description that explains what this service does
+and how it should be configured.
+'''
+```
+
+## Review Report Format
+
+Phase 1 Output:
+1. **Compliance Summary**: Overall assessment with file-by-file breakdown
+2. **Standards Violations**: Categorized list with specific line references and explanations
+3. **Configuration Analysis**: Table organization and key naming assessments
+4. **Remediation Plan**: Systematic order of fixes to be applied
+5. **Risk Assessment**: Any changes that require careful validation
+
+Phase 2 Output:
+1. **Applied Fixes**: Summary of all changes made, categorized by standard
+2. **Files Modified**: Complete list with brief description of changes
+3. **Manual Review Required**: Any issues requiring human judgment
+
+## Conformance Process
+
+### 1. Analysis Phase (PHASE 1)
+- Examine target files to understand current state
+- Identify configuration design patterns that need updating
+- Generate comprehensive compliance report
+- **Requirements**: Complete review and report before any remediation
+- **Focus**: Reference specific lines with concrete examples and explain reasoning
+
+### 2. Systematic Correction (PHASE 2)
+Apply fixes in systematic order:
+1. **Key Naming**: Convert underscores to hyphens in key names
+2. **Table Organization**: Convert custom subtables to table arrays with `name` fields
+3. **String Quoting**: Change double quotes to single quotes (unless escapes needed)
+4. **Multi-line Strings**: Convert to triple single quotes format
+5. **Array Formatting**: Split long arrays across multiple lines with proper indentation
+6. **Nomenclature**: Apply naming guidelines to keys and table names
+
+**Requirements**:
+- Maintain exact functionality while improving standards adherence
+- Validate that configuration files remain syntactically valid
+- Preserve all semantic meaning of configuration values
+
+## Safety Requirements
+
+Stop and consult if:
+- Configuration structure changes would alter application behavior
+- Complex nested configurations require architectural decisions
+- File contains domain-specific conventions that conflict with general guidelines
+- Syntax errors occur during modification
+
+Your responsibilities:
+- Maintain exact functionality while improving practices/style
+- Use project patterns consistently per the guides
+- Reference TOML documentation guides for complex cases
+- Verify all changes preserve configuration semantics
+
+## Success Criteria
+
+- [ ] All key names use hyphens instead of underscores
+- [ ] Custom subtables converted to table arrays where appropriate
+- [ ] String values use single quotes (double only when escapes needed)
+- [ ] Multi-line strings use triple single quotes
+- [ ] Long arrays are properly formatted across multiple lines
+- [ ] Nomenclature guidelines applied to keys and table names
+- [ ] No functionality changes to configuration behavior
+- [ ] Files remain syntactically valid TOML
+
+## Final Report
+
+Upon completion, provide a brief report covering:
+- Specific conformance issues corrected (categorized by the priority issues above)
+- Number of files modified
+- Any patterns that required manual intervention
+- Any deviations from guides and justification
\ No newline at end of file
diff --git a/.auxiliary/configuration/claude/commands/cs-copier-update.md b/.auxiliary/configuration/claude/commands/cs-copier-update.md
new file mode 100644
index 0000000..16471af
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-copier-update.md
@@ -0,0 +1,131 @@
+---
+allowed-tools: Read, Write, Edit, MultiEdit, LS, Glob, Grep, Bash(copier:*), Bash(git status), Bash(git add:*), Bash(git rm:*), Bash(rg:*), Bash(grep:*), Bash(hatch --env develop run make-all), TodoWrite
+description: Synchronize project with Copier template updates, intelligently resolving merge conflicts
+---
+
+# Template Synchronization
+
+Synchronize project with its Copier template by running updates and automatically resolving common merge conflict patterns while preserving local customizations.
+
+Request from user: $ARGUMENTS
+
+## Context
+
+- Template answers file: @.auxiliary/configuration/copier-answers.yaml
+- Current git status: !`git status --porcelain`
+- Existing conflicts check: !`grep -r "^<<<<<<<\|^=======$\|^>>>>>>>" . --exclude-dir=.git || echo "No conflicts"`
+- Project conventions: @.auxiliary/configuration/conventions.md
+
+## Prerequisites
+
+Before running template synchronization, ensure:
+- Working directory is completely clean (no staged or unstaged changes)
+- Copier is installed and accessible via command line
+- Template answers file exists at `.auxiliary/configuration/copier-answers.yaml`
+- Git repository is in a stable state for applying updates
+
+## Process Summary
+
+Key functional areas:
+1. **Template Update**: Run copier update with project-specific settings
+2. **Conflict Detection**: Identify and categorize merge conflicts from template changes
+3. **Intelligent Resolution**: Automatically resolve conflicts favoring upstream improvements while preserving local customizations
+4. **File Lifecycle Management**: Handle additions, renames, and deletions from template updates
+5. **Validation**: Ensure complete conflict resolution and commit changes with template version
+
+## Safety Requirements
+
+Stop and consult the user if:
+- Working directory is not clean (has staged or unstaged changes)
+- Complex conflicts exist that could result in loss of local customizations
+- Template artifacts cannot be reliably distinguished from intentional local content
+- Multiple conflicting resolution strategies are equally valid
+- Copier update fails with unrecoverable errors
+- Critical project files show unexpected merge conflicts
+
+## Execution
+
+Execute the following steps:
+
+### 1. Pre-Update Validation
+Check project state and prepare for template synchronization:
+- Verify git working directory is completely clean (halt if any changes exist)
+- Confirm template answers file exists and is readable
+- Document any existing conflicts to avoid confusion
+- Ensure repository is on the expected branch
+
+### 2. Execute Template Update
+Run copier update with project-specific configuration:
+```bash
+copier update --answers-file .auxiliary/configuration/copier-answers.yaml --skip-answered
+```
+- Capture copier output to extract template version information
+- Detect update completion status and any reported conflicts
+- Identify new, modified, and deleted files from the update
+
+### 3. Conflict Analysis and Categorization
+Systematically identify and categorize all conflicts:
+- Scan for merge conflict markers (`<<<<<<<`, `=======`, `>>>>>>>`)
+- Classify conflicts by type:
+ - **Structure consolidation**: Old sections moved into organized subsections
+ - **Upstream additions**: New template content (toctree entries, sections, files)
+ - **Language refinements**: Policy and wording improvements
+ - **Template artifacts**: TODO comments, placeholder content
+ - **Complex conflicts**: Overlapping local and upstream modifications
+
+### 4. Intelligent Conflict Resolution
+Apply resolution strategies based on conflict categorization:
+
+**Auto-resolve structure consolidation conflicts:**
+- Accept new organization when local content is preserved in new structure
+- Remove orphaned sections that were properly consolidated
+
+**Auto-resolve upstream additions:**
+- Accept new toctree entries, sections, and configuration additions
+- Stage new files and directories from template
+
+**Auto-resolve language refinements:**
+- Accept upstream wording and policy improvements
+- Preserve local semantic modifications when they don't conflict
+
+**Handle template artifacts intelligently:**
+- Detect TODO comments and placeholder content that may have been intentionally removed
+- Avoid reintroducing template boilerplate that conflicts with project maturity
+
+### 5. File Lifecycle Management
+Handle template-driven file changes:
+- Stage all new files and directories added by template
+- Process file renames (e.g., `cs-develop-tests.md` → `cs-develop-pytests.md`)
+- Remove obsolete files that have been replaced or are no longer needed
+- Update git index to reflect all template changes
+
+### 6. Resolution Verification
+Ensure complete and accurate conflict resolution:
+- Scan entire project for remaining merge conflict markers
+- Verify no orphaned conflict sections remain
+- Confirm all auto-resolved conflicts maintain local customizations
+- Validate file integrity and proper git staging
+
+### 7. Project Validation
+Verify template changes don't break project functionality:
+```bash
+hatch --env develop run make-all
+```
+- Run full project validation including linting, type checking, and tests
+- Ensure all quality gates pass after template synchronization
+- Address any validation failures before proceeding to commit
+
+### 8. Commit Template Changes
+Create commit with template version information:
+- Extract template version from copier output or updated answers file
+- Generate commit message: "Update project from Copier template (v{version})."
+- Include standard co-authoring footer for Claude Code
+- Use git commit (requires user approval) to commit all staged changes
+
+### 9. Conflict Resolution Report
+Provide comprehensive summary of synchronization results:
+- List all conflicts automatically resolved with resolution strategy
+- Report new files, renames, and deletions processed
+- Identify any conflicts requiring manual intervention
+- Confirm template version successfully applied
+- Note any remaining tasks or follow-up actions needed
\ No newline at end of file
diff --git a/.auxiliary/configuration/claude/commands/cs-create-command.md b/.auxiliary/configuration/claude/commands/cs-create-command.md
new file mode 100644
index 0000000..d7ba98b
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-create-command.md
@@ -0,0 +1,108 @@
+---
+allowed-tools: Write, Read, LS
+description: Generate a new custom slash command with consistent structure and formatting
+---
+
+# Generate Slash Command
+
+Generate a new custom slash command following established patterns for structure, tone, and formatting.
+
+Target: $ARGUMENTS
+
+**IMPORTANT**: You are creating slash commands for other Claude instances to execute. They will have no knowledge of:
+- The concept of "arguments" being passed to slash commands
+- The ARGUMENTS variable or its expansion
+- The meta-context of slash command generation
+- When creating content, avoid using the word "command" in titles or explanations - use terms like "process", "workflow", or "task" instead
+
+Your job is to interpret the user's request and create a complete, self-contained slash command.
+
+## Input Interpretation
+
+The user's request may take various forms:
+- Simple: `cs-analyze-performance`
+- Descriptive: `Named cs-inquire.md with a process outlined in .auxiliary/notes/inquire-command.md`
+- Reference-based: `Based on .auxiliary/notes/summarize-project-command.md`
+- Complex: `cs-update-deps that checks package.json and updates dependencies safely`
+
+Extract from the user's input:
+1. **Filename** (must start with `cs-`)
+2. **Purpose/functionality** (from description or referenced files)
+3. **Special requirements** (referenced processes, specific tools needed)
+
+## Context
+
+- Current custom commands: !`ls .claude/commands/cs-*.md 2>/dev/null || echo "No cs-* commands found"`
+- Referenced files (if any): Check for existence and read as needed
+- Command template: @.auxiliary/configuration/claude/miscellany/command-template.md
+
+## Prerequisites
+
+Before creating the slash command, ensure:
+- Clear understanding of the intended purpose
+- Filename follows `cs-*` naming pattern
+- No existing file with the same name
+- Any referenced process files are accessible
+
+## Generation Process
+
+### 1. Analyze User Request
+
+From the user's input, determine:
+- **Filename** (extract `cs-*.md` name)
+- **Purpose** (what should the generated slash command accomplish)
+- **Required tools** (based on functionality)
+- **Process details** (read any referenced files for specifics)
+
+### 2. Read Template Structure
+
+Read the template to get the base structure, then customize:
+- Replace placeholder content with appropriate descriptions
+- Customize sections based on purpose
+- Select appropriate allowed-tools
+- Add relevant @-references if applicable
+- Add checklists to sections if applicable
+
+### 3. Apply Formatting Standards
+
+**Professional Tone:**
+- Avoid making everything critical or important; no excessive
+ attention-grabbing
+- Avoid excessive emphasis (no all-caps headers, minimal bold text)
+- Professional headers: `## Prerequisites` not `## MANDATORY PREREQUISITES`
+- Use "Stop and consult" for when user input should be solicited
+
+**Structure:**
+- Include Prerequisites section early in document
+- Include Context section with command expansions (exclamation point followed
+ by command in backticks) for dynamic info when needed
+- Use @-references for local documentation when applicable
+- Provide clear Process Summary before detailed steps
+- Include Safety Requirements section for error handling
+
+### 4. Tool Selection
+
+Choose appropriate allowed-tools based on functionality:
+
+**Common tool combinations:**
+- **File operations**: `Write, Read, Edit, MultiEdit, LS, Glob, Grep`
+- **Git operations**: `Bash(git status), Bash(git add:*), Bash(git commit:*), Bash(git push:*)`
+- **Python development**: `Bash(hatch --env develop run:*), Bash(pytest:*), Bash(ruff:*)`
+- **GitHub operations**: `Bash(gh run list:*), Bash(gh run watch:*), Bash(gh pr create:*)`
+
+### 5. Generate and Write File
+
+1. **Read the template** from `.auxiliary/configuration/claude/miscellany/command-template.md`
+2. **Customize all sections** based on the specific purpose
+3. **Replace placeholders** with appropriate content for the target functionality
+4. **Write the final file** to `.claude/commands/[filename].md`
+
+
+### 6. Validation and Summary
+
+After generation:
+- Verify file structure matches established patterns
+- Check that allowed-tools are appropriate for the functionality
+- Ensure professional tone throughout (no excessive attention-grabbing, etc...)
+- Confirm all required sections are present and customized
+- Provide succinct summary of changes made to the user
diff --git a/.auxiliary/configuration/claude/commands/cs-design-python.md b/.auxiliary/configuration/claude/commands/cs-design-python.md
new file mode 100644
index 0000000..ea4ff01
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-design-python.md
@@ -0,0 +1,144 @@
+---
+description: Python API design, filesystem organization, module structure, and interface specifications
+---
+
+# Python Design Analysis
+
+Analyze Python API design patterns, filesystem organization, module structure, class hierarchies, interface definitions, and design patterns to provide guidance on Python-specific structural decisions and project organization. Focus exclusively on interface contracts, signatures, and type specifications—never implementation details or method bodies.
+
+Request from user: $ARGUMENTS
+
+## Context
+
+- Architecture overview: @documentation/architecture/summary.rst
+- Filesystem patterns: @documentation/architecture/filesystem.rst
+- General practices: @.auxiliary/instructions/practices.rst
+- Python development guide: @.auxiliary/instructions/practices-python.rst
+- Code style: @.auxiliary/instructions/style.rst
+- Nomenclature: @.auxiliary/instructions/nomenclature.rst
+- Design documents: @documentation/architecture/designs/
+
+## Prerequisites
+
+Before providing design analysis, ensure:
+- Understanding of module organization and class hierarchies
+- Familiarity with Python practices and style guidelines
+- Knowledge of nomenclature conventions and naming patterns
+
+### Project Standards
+
+Before providing design analysis, you MUST:
+1. Read @.auxiliary/instructions/practices.rst for general development principles
+2. Read @.auxiliary/instructions/practices-python.rst for Python-specific patterns
+3. In a step on your TODO list, please attest that you have read the general and Python-specific practices guides and demonstrate your knowledge by writing one-sentence summaries on any three of the following topics:
+
+- interface specification patterns from comprehensive examples
+- module organization principles and content ordering
+- import organization for design specifications
+- wide parameter, narrow return interface patterns
+- immutable container design patterns
+- exception hierarchy design patterns
+- documentation specification requirements
+- nomenclature patterns from nomenclature guides
+
+## Process Summary
+
+Key functional areas:
+1. **Design Analysis**: Examine current Python structure and design patterns
+2. **Interface Specification**: Define clean API boundaries and contracts
+3. **Module Organization**: Apply filesystem and import patterns effectively
+4. **Class Design**: Create maintainable hierarchies and interface patterns
+5. **Documentation**: Specify design decisions with examples and rationale
+
+## Safety Requirements
+
+Stop and consult the user if:
+- Architectural decisions are needed instead of design specifications
+- Implementation details are requested instead of design specifications
+- Requirements analysis is needed instead of design specifications
+- User requests actual code implementations instead of specifications
+- Design decisions require architectural changes beyond Python structure
+- Interface changes would break existing API contracts significantly
+- Design conflicts with established filesystem organization patterns
+- Requirements are unclear or insufficient for proper design specification
+- Multiple design approaches have significant trade-offs requiring user input
+
+## Execution
+
+Execute the following steps:
+
+### 1. Design Analysis
+Examine Python structure and patterns:
+- Review module organization and import patterns
+- Analyze class hierarchies and interface definitions
+- Identify design patterns in use
+- Assess alignment with practices and nomenclature guidelines
+- Document design strengths and improvement opportunities
+
+### 2. Interface Specification
+
+**CRITICAL: Define interfaces through signatures and type annotations only. Avoid specifying how methods should be implemented internally—focus on contracts, not implementation logic.**
+
+**Define clean API boundaries and contracts**:
+- Focus exclusively on signatures and type annotations (never implementation logic or method bodies)
+- Apply wide parameter, narrow return patterns for robust interfaces
+- Design exception class hierarchies following established patterns
+- Apply appropriate naming conventions from nomenclature guidelines
+- Define annotations using proper `__.typx.TypeAlias` patterns when appropriate
+- Consider immutability preferences in container design
+- Consult comprehensive guides for detailed patterns when needed
+
+### 3. Filesystem and Module Organization Design
+
+**Apply Python-specific organizational patterns and filesystem structure**:
+- Design project filesystem organization and update filesystem.rst as needed
+- Design module structure following standard organization order
+- Plan centralized import integration for organized dependencies
+- Specify exception hierarchies and their organization
+- Design interface patterns for different component types
+- Plan type alias organization and dependency management
+- Consult comprehensive guides for detailed organizational patterns
+
+### 4. Class and Function Design
+
+**CRITICAL: Design class structures through their public contracts and type relationships. Specify signatures, inheritance patterns, and interface boundaries—never internal implementation logic or method bodies.**
+
+**Create maintainable Python structures**:
+- Design class hierarchies with appropriate immutable base classes and mixins (`__.immut.Object`, `__.immut.Protocol`, etc.)
+- Specify function signatures using wide input, narrow output patterns with proper spacing
+- Apply nomenclature guidelines for methods, attributes, and functions
+- Design immutable data structures and container patterns
+- Plan dependency injection and configuration patterns with sensible defaults
+- Focus exclusively on interface specifications, not implementation details
+- Consult comprehensive guides for detailed design patterns
+
+### 5. Design Documentation
+
+**Create comprehensive design specifications without implementations**:
+
+**CRITICAL:**
+- Use atemporal language in all specifications. Avoid temporal terms like 'new', 'current', 'existing', 'future'—designs should read as canonical specifications independent of implementation timeline.
+- Provide only signatures, contracts, and interface specifications - no implementations
+
+- Generate design documents following established format
+- Update `documentation/architecture/designs/index.rst` to include designs
+- Do not provide exception class implementations, function bodies, or method implementations
+- Document interface contracts and expected behaviors (contracts only, not code)
+- Provide design examples using signatures and type annotations only
+- Specify exception handling patterns and error propagation (exception classes by name/signature only)
+- Document design rationale and trade-off decisions
+- Consult comprehensive guides for documentation formatting requirements
+
+### 6. Design Validation
+
+**Ensure design quality and consistency**:
+- Verify alignment with practices, style, and nomenclature guidelines
+- Check consistency with filesystem organization patterns
+- Validate that wide parameter/narrow return patterns are followed
+- Ensure proper separation between public and private interfaces
+- Confirm that design supports expected usage patterns and extensibility
+- Verify that specifications focus on contracts, not implementations
+- Consult comprehensive guides to verify pattern alignment
+
+### 7. Summarize Updates
+Provide concise summary of updates to the user.
diff --git a/.auxiliary/configuration/claude/commands/cs-develop-pytests.md b/.auxiliary/configuration/claude/commands/cs-develop-pytests.md
new file mode 100644
index 0000000..b1acca9
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-develop-pytests.md
@@ -0,0 +1,263 @@
+---
+description: Implement comprehensive Python tests following an existing test plan and project guidelines
+---
+
+# Implement Python Tests
+
+For systematic test implementation following a pre-created test plan and project testing guidelines.
+
+Test plan path or special test-writing instructions: $ARGUMENTS
+
+Implement tests according to the provided test plan only.
+
+## Context
+
+- Current git status: !`git status --porcelain`
+- Current branch: !`git branch --show-current`
+- Existing test structure: !`find tests -name "*.py" | head -20`
+- Test organization: @documentation/architecture/testplans/summary.rst
+- Test plans index: @documentation/architecture/testplans/index.rst
+
+## Prerequisites
+
+Ensure that you:
+- Have a valid test plan document
+- Have verified access to target code modules referenced in the plan
+- Have read any relevant `CLAUDE.md` file
+- Understand the test-writing guidelines: @.auxiliary/instructions/tests.rst
+
+## Testing Principles (from project guidelines)
+
+**Core Principles:**
+1. **Dependency Injection Over Monkey-Patching**: Use injectable dependencies
+ for testability
+2. **Performance-Conscious**: Prefer in-memory filesystems (pyfakefs) over temp
+ directories
+3. **Avoid Monkey-Patching**: Never patch internal code; use dependency
+ injection instead
+4. **100% Coverage Goal**: Aim for complete line and branch coverage
+5. **Test Behavior, Not Implementation**: Focus on observable behavior and
+ contracts
+
+**Anti-Patterns to Avoid:**
+- Monkey-patching internal code (will fail with immutable objects)
+- Excessive mocking of internal components
+- Testing implementation details vs. behavior
+- Using temp directories when pyfakefs suffices
+
+**Organization:**
+- Follow the systematic numbering conventions detailed in the test guidelines
+
+## Safety Requirements
+
+Stop and consult the user if:
+- No test plan path is provided
+- Test plan cannot be read or is invalid
+- Plan conflicts with project testing principles
+- Implementation deviates from plan without justification
+- Implementation cannot follow the test plan as specified
+- Plan requires tests that violate project principles
+- Tests require monkey-patching internal code
+- Planned test numbering clashes with existing conventions
+- Required test fixtures or dependencies are unavailable
+- Test plan contains contradictions or unclear instructions
+
+**Your responsibilities:**
+- Follow the test plan precisely while adhering to project conventions
+- Focus only on uncovered areas specified in the plan
+- Avoid redundant testing of functionality already covered by doctests
+- Use dependency injection patterns as specified in the plan
+- Implement tests exactly as planned without adding extras
+- Maintain systematic test numbering as outlined in the plan
+- Ensure tests validate behavior, not implementation
+- Document any necessary deviations from the plan with clear justification
+
+## Test Implementation Process
+
+### 0. Pre-Flight Verification
+Verify access to project guidelines:
+
+Read and confirm you can access the complete project guidelines:
+- Testing: @.auxiliary/instructions/tests.rst
+- Python Practices: @.auxiliary/instructions/practices-python.rst
+- General Practices: @.auxiliary/instructions/practices.rst
+- Style: @.auxiliary/instructions/style.rst
+
+You must successfully access and read all four guides before proceeding. If any guide cannot be accessed, stop and inform the user.
+
+### 1. Test Plan Reading and Validation
+Read and validate the provided test plan:
+
+**Validate plan completeness:**
+- Verify plan contains coverage analysis summary
+- Confirm test strategy is clearly defined
+- Check that component-specific tests are detailed
+- Ensure implementation notes are present
+- Validate success metrics are specified
+
+Stop if the plan is incomplete, unclear, or missing critical sections.
+
+### 2. Plan Compliance Verification
+**Ensure plan aligns with project principles:**
+
+**Verify plan adheres to project testing guidelines:**
+- No monkey-patching of internal code required
+- Dependency injection patterns are viable
+- Test numbering follows project conventions
+- No external network testing planned
+
+**Check for conflicts with existing tests:**
+- Review planned test module names against existing files
+- Verify planned test function numbering doesn't conflict
+- Ensure no duplication of existing test coverage
+
+### 3. Test Data and Fixture Setup
+**Prepare test data and dependencies as specified in the plan:**
+
+**Ensure required test dependencies are available:**
+If the test plan requires dependencies not in the current environment, add them to `pyproject.toml`:
+
+```toml
+[tool.hatch.envs.develop]
+dependencies = [
+ # ... existing dependencies
+ "pyfakefs", # For filesystem mocking
+ "pytest-asyncio", # For async test support
+ # ... other test-specific dependencies in alphabetical order
+]
+```
+
+After adding dependencies, rebuild the environment to ensure consistency:
+```bash
+hatch env prune
+```
+
+**Create required test data under tests/data/:**
+- Set up fake packages for extension mechanisms (if planned)
+- Prepare captured artifacts and snapshots (if planned)
+- Create any mock data files as specified in the plan
+
+Only create test data explicitly mentioned in the test plan.
+
+### 4. Test Module Creation/Updates
+**Implement test modules following the plan:**
+
+**For each planned test module:**
+- Create or update test files with planned naming (e.g., `test_100_exceptions.py`)
+- Follow planned test function numbering within modules
+- Implement only the tests specified in the plan
+- Use dependency injection patterns as outlined in the plan
+
+**Key Implementation Guidelines:**
+- Use dependency injection for all external dependencies as planned
+- Prefer `pyfakefs.Patcher()` for filesystem operations as specified
+- Mock only third-party services, never internal code
+- **Insert tests in numerical order within files** - do NOT append to end
+- **Write behavior-focused docstrings**: "Functionality is correct with Y" NOT "function_name does X with Y"
+- Follow existing naming conventions and code style
+- Implement tests in the exact order and numbering specified in the plan
+
+### 5. Coverage Validation
+**Verify implementation matches plan coverage goals:**
+```bash
+hatch --env develop run testers
+hatch --env develop run coverage report --show-missing
+```
+
+Verify plan compliance:
+- Run full test suite to ensure no regressions
+- Check that coverage matches the plan's target metrics
+- Verify all planned test functions are implemented
+- Confirm coverage gaps identified in the plan are addressed
+- Ensure no existing functionality is broken
+
+### 6. Code Quality Validation
+**Ensure implemented tests meet project standards:**
+```bash
+hatch --env develop run linters
+```
+
+**Requirements:**
+- All linting checks must pass
+- Note that the linters do not check style; you must verify style compliance
+- No violations of project coding standards
+- Test docstrings are clear and descriptive
+- Proper imports and dependencies
+- Implementation follows all conventions specified in the plan
+
+## Test Pattern Examples
+
+**Import Patterns:**
+
+*Direct imports (preferred for most cases):*
+```python
+from mypackage import mymodule
+
+def test_100_basic_functionality( ):
+ ''' Module function works correctly with valid input. '''
+ result = mymodule.process_data( 'test' )
+ assert result == 'processed: test'
+```
+
+**Dependency Injection Pattern:**
+```python
+async def test_100_process_with_custom_processor( ):
+ ''' Process function accepts custom processor via injection. '''
+ def mock_processor( data ):
+ return f"processed: {data}"
+
+ result = await process_data( 'test', processor = mock_processor )
+ assert result == "processed: test"
+```
+
+**Filesystem Operations (Preferred):**
+```python
+def test_200_config_file_processing( ):
+ ''' Configuration files are processed correctly. '''
+ with Patcher( ) as patcher:
+ fs = patcher.fs
+ fs.create_file( '/fake/config.toml', contents = '[section]\nkey="value"' )
+ result = process_config_file( Path( '/fake/config.toml' ) )
+ assert result.key == 'value'
+```
+
+**Error Handling:**
+```python
+def test_300_invalid_input_handling( ):
+ ''' Invalid input raises appropriate exceptions. '''
+ with pytest.raises( ValueError, match = "Invalid data format" ):
+ process_invalid_data( "malformed" )
+```
+
+## Success Criteria
+
+Implementation is complete when:
+- [ ] All tests specified in the plan have been implemented
+- [ ] Coverage matches or exceeds the plan's target metrics
+- [ ] All planned test modules and functions are created with correct numbering
+- [ ] Test data and fixtures are set up as specified in the plan
+- [ ] All new tests pass consistently
+- [ ] No existing tests are broken
+- [ ] Linting passes without issues
+- [ ] Project coding practices and style have been followed
+- [ ] Tests follow project numbering conventions as planned
+- [ ] Tests are inserted in proper numerical order within files
+- [ ] Test docstrings focus on behavior, not function names
+- [ ] Dependency injection is used as specified in the plan
+- [ ] No monkey-patching of internal code
+- [ ] Performance-conscious patterns are applied as planned
+
+**Note**: Always run full validation (`hatch --env develop run linters && hatch
+--env develop run testers`) before considering the task complete.
+
+## Final Report
+
+Upon completion, provide a brief report covering:
+- **Plan Compliance**: Confirmation that all planned tests were implemented as specified
+- **Coverage Achievement**: Final coverage percentages vs. plan targets
+- **Deviations from Plan**: Any necessary changes made to the plan during implementation with justification
+- **Technical Issues Resolved**: Any conflicts encountered and how they were resolved
+- **Pragma Directives Applied**: Any `# pragma: no cover` or `# pragma: no branch` added with rationale
+- **Test Data Created**: Summary of fixtures and test data files created under `tests/data/`
+- **Module Updates**: List of test modules created or updated with their numbering
+- **Code Quality**: Confirmation that tests are properly ordered and have behavior-focused docstrings
diff --git a/.auxiliary/configuration/claude/commands/cs-document-examples-rst.md b/.auxiliary/configuration/claude/commands/cs-document-examples-rst.md
new file mode 100644
index 0000000..460ca10
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-document-examples-rst.md
@@ -0,0 +1,117 @@
+---
+description: Creates practical, testable examples documentation
+---
+
+# Document Examples
+
+Develops practical, testable examples for documentation under
+`documentation/examples/` that increase test coverage while remaining relatable
+and succinct.
+
+Topic: $ARGUMENTS
+
+## Context
+
+- Project structure: @documentation/architecture/filesystem.rst
+- Existing examples: !`ls -la documentation/examples/ 2>/dev/null || echo "No examples directory"`
+- Code coverage data: !`hatch --env develop run testers 2>/dev/null || echo "No coverage data available"`
+
+## Prerequisites
+
+Before creating examples documentation:
+- Understand the target audience (developers vs end users)
+- Analyze existing codebase to identify core functionality patterns
+- Review existing examples for organization, completeness, and thematic inspiration
+- Examine @.auxiliary/instructions/ for style and nomenclature requirements
+
+## Process Summary
+
+Key functional areas:
+1. **Analysis**: Survey codebase and existing examples to identify documentation gaps
+2. **Theme Development**: Create coherent scenarios that demonstrate functionality progression
+3. **Content Creation**: Write succinct examples using proper reStructuredText formatting
+4. **Validation**: Ensure examples follow project practices and can serve as informal tests
+
+## Safety Requirements
+
+Stop and consult the user if:
+- Examples require creating contrived scenarios that don't reflect real usage
+- Multiple conflicting themes emerge without clear organizational strategy
+- Proposed examples would expose internal implementation details inappropriately
+- Documentation format conflicts with existing project conventions
+
+## Execution
+
+Execute the following steps:
+
+### 1. Analyze Existing Documentation Structure
+
+Survey the current documentation to understand patterns and identify gaps. Read
+existing example files to understand established themes and formatting
+approaches.
+
+### 2. Survey Codebase for Example Opportunities
+
+Identify public API surfaces and common usage patterns. Analyze coverage
+reports in `.auxiliary/artifacts/coverage-pytest` if available.
+
+Look for:
+- Public classes and functions that need demonstration
+- Common workflows that span multiple components
+- CLI commands and their typical usage patterns
+- Error handling scenarios that users should understand
+
+### 3. Develop Thematic Coherence
+
+Based on analysis, choose one of these organizational approaches:
+
+- **Domain scenarios**: Practical use cases
+- **API progression**: Basic to advanced usage of core functionality
+- **Workflow examples**: End-to-end processes showing component interaction
+- **CLI workflows**: Command sequences for common tasks
+
+### 4. Create Example Documentation
+
+Write examples following these requirements:
+
+- Use Sphinx reStructuredText format with proper double backticks for inline literals
+- Include blank lines before list items per reStructuredText conventions
+- Structure as progression from simple to complex scenarios
+- Use doctest format for Python API examples where testable
+- Use code-block format for CLI examples with explicit command annotation
+- Keep code blocks comment-free; put explanatory text between blocks
+- Follow @.auxiliary/instructions/practices.rst for general code organization
+- Follow @.auxiliary/instructions/style.rst for formatting
+- Follow @.auxiliary/instructions/nomenclature.rst for naming
+- When documenting Python code, also follow .auxiliary/instructions/practices-python.rst for comprehensive Python standards
+- When documenting Rust code, also follow .auxiliary/instructions/practices-rust.rst for comprehensive Rust standards
+- When documenting TOML configuration, also follow .auxiliary/instructions/practices-toml.rst for comprehensive TOML standards
+
+### 5. Ensure Practical Relevance
+
+Verify each example:
+
+- Demonstrates functionality users actually need
+- Shows practical data and scenarios, remaining minimalist rather than elaborate
+- Includes appropriate error cases and edge conditions
+- Can serve as informal test coverage for documented features
+- Follows established project patterns for similar examples
+
+### 6. Validate Documentation Quality
+
+Review final documentation for:
+
+- Proper reStructuredText syntax and formatting
+- Consistent theme and progression across examples
+- Adherence to project style guidelines
+- Executable/testable nature of code examples
+- Clear explanatory text that guides readers through concepts
+
+### 7. Provide Summary
+
+Provide a succinct summary to the user describing:
+
+- What examples were created or updated
+- The organizational theme chosen and why
+- Key functionality areas covered
+- How the examples serve both documentation and testing goals
diff --git a/.auxiliary/configuration/claude/commands/cs-excise-python.md b/.auxiliary/configuration/claude/commands/cs-excise-python.md
new file mode 100644
index 0000000..d2731f1
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-excise-python.md
@@ -0,0 +1,155 @@
+---
+allowed-tools: Read, Write, Edit, MultiEdit, LS, Glob, Grep, Bash(hatch --env develop run:*), Bash(git status), Bash(git diff), mcp__pyright__references, mcp__pyright__hover, mcp__pyright__diagnostics
+description: Analyze Vulture dead code findings and remediate through selective removal or vulturefood.py whitelisting
+---
+
+# Python Dead Code Analysis and Remediation
+
+Systematically analyze Vulture dead code findings and remediate through selective removal or vulturefood.py whitelisting using Pyright MCP server for accurate symbol reference verification.
+
+Target files or scope: $ARGUMENTS
+
+## Context
+
+- Current git status: !`git status --porcelain`
+- Current branch: !`git branch --show-current`
+- Existing vulturefood entries: !`wc -l .auxiliary/configuration/vulturefood.py`
+- Vulture configuration: @pyproject.toml (tool.vulture section)
+
+## Prerequisites
+
+Before running this analysis, ensure:
+- Understanding of project codebase and critical symbols
+- Read project documentation guides:
+ - @.auxiliary/instructions/practices.rst
+ - @.auxiliary/instructions/style.rst
+- Vulture is installed and configured in the development environment
+- Pyright MCP server is available for symbol reference verification
+
+## Process Summary
+
+Key functional areas:
+1. **Detection and Parsing**: Run Vulture and parse output for unused symbols
+2. **Reference Verification**: Use Pyright MCP server to verify actual symbol usage
+3. **Classification Analysis**: Apply heuristics to identify false positives vs. genuine dead code
+4. **Selective Remediation**: Present findings with confidence levels for user decision
+5. **Implementation**: Remove dead code or add entries to vulturefood.py as appropriate
+
+## Safety Requirements
+
+Stop and consult the user if:
+- Uncertain about whether a symbol should be removed or whitelisted
+- Complex inheritance hierarchies with unclear symbol usage patterns
+- Vulture reports conflict significantly with Pyright reference analysis
+- Ambiguous decorator patterns that don't fit standard heuristics
+
+## Execution
+
+Execute the following steps:
+
+### 1. Vulture Analysis and Parsing
+
+Run Vulture to identify potentially unused symbols:
+```bash
+hatch --env develop run vulture --min-confidence=60
+```
+
+Examine the output to extract:
+- Symbol names and types (functions, classes, variables)
+- File locations and line numbers
+- Confidence levels reported by Vulture
+- Symbol categories (imports, definitions, assignments)
+
+### 2. Pyright Reference Verification
+
+For each symbol identified by Vulture, verify actual usage using Pyright MCP:
+
+Use `mcp__pyright__references` with **bare symbol names** (not qualified paths):
+- Correct: `symbolName="function_name"`
+- Incorrect: `symbolName="module.package.function_name"`
+
+Analyze reference results:
+- No references found: Likely genuine dead code
+- References found: Examine context for legitimacy
+- Import-only references: May indicate transitional dead code
+
+### 3. False Positive Classification
+
+Apply systematic heuristics to identify false positives:
+
+**Common False Positive Patterns:**
+- Abstract methods and protocol implementations
+- Decorator-registered functions (pytest fixtures, Flask routes, etc.)
+- Magic methods and dunder attributes
+- CLI entry points and script main functions
+- Test fixtures and utilities used via dynamic discovery
+- Library interface methods called by external code
+
+**Analysis Criteria:**
+- Decorator presence and types
+- Inheritance relationships and abstract base classes
+- Usage patterns in test files vs. main code
+- External integration points and plugin systems
+
+### 4. Autonomous Decision Making
+
+Apply systematic decision logic:
+
+**Remove Symbol If:**
+- No references found via Pyright (zero references)
+- No TODO comments mentioning future use of the symbol
+- Not an entry point function (e.g., `main`)
+- Not part of unimplemented interface or abstract base class
+- Not decorated with framework-specific decorators
+
+**Whitelist in vulturefood.py If:**
+- Has references but appears to be false positive (decorators, abstract methods, etc.)
+- Entry point functions like `main`
+- Abstract/interface implementations
+- Framework integration points with decorators
+- Magic methods and protocol compliance
+
+**Check for TODO Comments:**
+Examine surrounding code and docstrings for TODO comments that reference the symbol or indicate planned future usage.
+
+### 5. Implementation Decision
+
+Act autonomously based on decision logic:
+
+**For Symbol Removal:**
+- Remove symbol definitions and any orphaned imports
+- Verify removal doesn't break related functionality
+- Run linters to ensure clean code after removal
+
+**For Vulturefood Whitelisting:**
+- Add entries to `.auxiliary/configuration/vulturefood.py` with format:
+ ```python
+ symbol_name # description of why it's a false positive
+ ```
+- Group related entries and add explanatory comments
+- Maintain alphabetical organization within groups
+
+### 6. Validation and Verification
+
+After remediation:
+- Run Vulture again to confirm issues are resolved
+- Execute linters to ensure code quality: `hatch --env develop run linters`
+- Run tests to verify functionality: `hatch --env develop run testers`
+- Verify git diff shows only intended changes
+
+## Implementation Notes
+
+**Pyright MCP Usage:**
+- Use bare symbol names for accurate reference finding
+- Leverage superior semantic analysis over text-based search tools
+- Cross-reference hover information for additional context
+
+**Vulturefood Management:**
+- Maintain clear documentation for each whitelisted symbol
+- Group related false positives with explanatory sections
+- Prefer descriptive comments over generic suppression
+
+**Safety Practices:**
+- Remove code autonomously when decision criteria are clearly satisfied
+- Prioritize false positive whitelisting when uncertainty exists
+- Validate all changes through comprehensive testing
\ No newline at end of file
diff --git a/.auxiliary/configuration/claude/commands/cs-inquire.md b/.auxiliary/configuration/claude/commands/cs-inquire.md
new file mode 100644
index 0000000..2fcfb6f
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-inquire.md
@@ -0,0 +1,72 @@
+---
+allowed-tools: Read, LS, Glob, Grep, WebFetch, WebSearch
+description: Provide analytical responses and technical opinions without making code changes
+---
+
+# Technical Analysis and Discussion
+
+Provide analytical responses, technical opinions, and architectural discussion
+based on user questions. Focus on analysis and reasoning without making code
+modifications.
+
+User question or topic: $ARGUMENTS
+
+Stop and consult if:
+- The request explicitly asks for code changes or implementation
+- The question is unclear or lacks sufficient context
+- Multiple conflicting requirements are presented
+
+## Prerequisites
+
+Before providing analysis, ensure:
+- Clear understanding of the technical question being asked
+- Sufficient context about the codebase or architecture being discussed
+
+## Process Summary
+
+Key analytical areas:
+1. **Question Analysis**: Understand what is being asked and why
+2. **Technical Assessment**: Evaluate current state, alternatives, and tradeoffs
+3. **Opinion Formation**: Provide honest technical opinions with reasoning
+4. **Discussion**: Present pros/cons, alternatives, and recommendations
+
+## Execution
+
+Execute the following process:
+
+### 1. Question Understanding
+Carefully analyze the user's question to understand:
+- What specific technical aspect they want to discuss
+- The context and scope of their concern
+- Whether they're seeking validation, alternatives, or general analysis
+
+### 2. Current State Assessment
+Examine relevant parts of the codebase or architecture, if necessary:
+- Read pertinent files to understand current implementation
+- Identify patterns, conventions, and existing approaches
+- Note any potential issues or areas of concern
+
+### 3. Technical Analysis
+Provide comprehensive analysis including:
+- **Strengths**: What works well in the current approach
+- **Weaknesses**: Potential issues, limitations, or concerns
+- **Alternatives**: Different approaches that could be considered
+- **Tradeoffs**: Benefits and costs of different options
+
+### 4. Opinion and Recommendations
+Offer honest technical opinions:
+- Present your assessment based on best practices and experience
+- Provide pushback if you disagree with assumptions or proposals
+- Suggest better alternatives when they exist
+- Explain the reasoning behind your recommendations
+
+### 5. Discussion Points
+Raise additional considerations:
+- Edge cases that might not have been considered
+- Long-term maintenance implications
+- Performance, security, or scalability concerns
+- Integration with existing systems or patterns
+
+Remember: Your role is to analyze, discuss, and provide technical opinions -
+not to implement solutions or make code changes. Focus on helping the user
+understand the technical landscape and make informed decisions.
diff --git a/.auxiliary/configuration/claude/commands/cs-manage-prd.md b/.auxiliary/configuration/claude/commands/cs-manage-prd.md
new file mode 100644
index 0000000..31a74a2
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-manage-prd.md
@@ -0,0 +1,90 @@
+---
+allowed-tools: Read, Write, Edit, MultiEdit, LS, Glob, Grep
+description: Manage product requirements documents and feature planning
+---
+
+# Product Requirements Management
+
+Manage and update the Product Requirements Document (PRD) based on user input
+about product requirements, feature planning, and related topics.
+
+Request from user: $ARGUMENTS
+
+## Context
+
+- Current PRD state: @documentation/prd.rst
+- Requirements guidelines: @.auxiliary/instructions/requirements.rst
+
+## Prerequisites
+
+Before managing PRD content, ensure:
+- Understanding of current project scope and objectives
+- Familiarity with existing functional and non-functional requirements
+- @.auxiliary/instructions/requirements.rst guidelines are followed
+- Changes align with overall project strategy
+
+## Process Summary
+
+Key functional areas:
+1. **Analysis**: Review current PRD and understand requested changes
+2. **Requirements Processing**: Apply requirements.rst standards to new content
+3. **PRD Updates**: Make structured updates to documentation/prd.rst
+4. **Validation**: Ensure consistency and completeness
+
+### Process Restrictions
+
+- Do not provide a timeline for deliverables.
+- Do not plan sprints.
+
+## Safety Requirements
+
+Stop and consult the user if:
+- Requested changes significantly expand or reduce product scope
+- New requirements conflict with existing non-functional requirements
+- Changes affect critical path features or constraints
+- Requirements lack sufficient detail for implementation planning
+
+## Execution
+
+Execute the following steps:
+
+### 1. Review Current State
+Read and analyze the existing PRD to understand current scope.
+
+### 2. Process User Requirements
+Analyze the user input for:
+- New functional requirements
+- Changes to existing requirements
+- Updates to goals, objectives, or success criteria
+- Modifications to user personas or target users
+- New constraints or assumptions
+
+### 3. Apply Requirements Standards
+Follow @.auxiliary/instructions/requirements.rst guidelines:
+- Use specific, measurable, achievable, relevant, testable criteria
+- Apply proper user story format when appropriate
+- Assign requirement priorities (Critical/High/Medium/Low)
+- Include acceptance criteria for functional requirements
+- Maintain requirement traceability
+
+### 4. Update PRD Structure
+Make targeted updates to appropriate PRD sections:
+- Executive Summary (if scope changes)
+- Problem Statement (if new problems identified)
+- Goals and Objectives (if success criteria change)
+- Target Users (if new personas or needs identified)
+- Functional Requirements (most common updates)
+- Non-Functional Requirements (if technical requirements change)
+- Constraints and Assumptions (if new limitations discovered)
+- Out of Scope (if boundaries need clarification)
+
+### 5. Maintain Consistency
+Ensure all updates maintain PRD coherence:
+- Requirements align with stated goals and objectives
+- No conflicts between functional and non-functional requirements
+- User stories trace back to identified user needs
+- Acceptance criteria are testable and specific
+- Priority assignments reflect user value
+
+### 6. Summarize Updates
+Provide concise summary of updates to the user.
diff --git a/.auxiliary/configuration/claude/commands/cs-plan-pytests.md b/.auxiliary/configuration/claude/commands/cs-plan-pytests.md
new file mode 100644
index 0000000..72a2761
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-plan-pytests.md
@@ -0,0 +1,280 @@
+---
+description: Analyze Python test coverage gaps and create focused test plan for uncovered areas and edge cases
+---
+
+# Plan Python Tests
+
+For systematic analysis of test coverage gaps and creation of detailed test
+plans following project testing guidelines.
+
+Target module/functionality: $ARGUMENTS
+
+Focus on analysis and planning only - do not implement tests.
+
+## Context
+
+- Current git status: !`git status --porcelain`
+- Current branch: !`git branch --show-current`
+- Current test coverage: !`hatch --env develop run coverage report --show-missing`
+- Existing test structure: !`find tests -name "*.py" | head -20`
+- Test organization: @documentation/architecture/testplans/summary.rst
+- Test plans index: @documentation/architecture/testplans/index.rst
+
+## Prerequisites
+
+Ensure that you:
+- Have access to target code modules for analysis
+- Can generate current coverage reports
+- Have read any relevant `CLAUDE.md` file
+- Understand the test-writing guidelines: @.auxiliary/instructions/tests.rst
+
+## Safety Requirements
+
+Stop and consult the user if:
+- No target module or functionality is provided
+- Target code cannot be analyzed
+- Coverage data is unavailable
+- Coverage reports cannot be generated
+- Target modules cannot be read or analyzed
+- Analysis reveals fundamental testability issues
+- Test guidelines cannot be accessed
+- Network tests against real external sites are being considered
+
+**Your responsibilities:**
+- Focus entirely on analysis and planning - NO implementation
+- Create comprehensive, actionable test plans WITHOUT code snippets of test implementations
+- Focus planning on uncovered areas and edge cases
+- Brief third-party library examples (e.g., httpx mock transport) are acceptable if researched
+- Identify all coverage gaps systematically
+- Consider project testing philosophy: doctests for examples, pytest for edge cases
+- Produce clear, structured planning artifacts
+- Acknowledge immutability constraints - modules under test CANNOT be monkey-patched
+- Test private functions/methods via public API - understand why if this fails
+
+## Test Planning Process
+
+Execute the following steps for target: $ARGUMENTS
+
+### 0. Pre-Flight Verification
+Access test-writing guidelines:
+
+Read and understand the complete project guidelines:
+- Testing: @.auxiliary/instructions/tests.rst
+- Python Practices: @.auxiliary/instructions/practices-python.rst
+
+You must successfully access and understand both guides before proceeding. If any guide cannot be accessed, stop and inform the user.
+
+### 1. Coverage Analysis Phase
+
+**Generate and analyze current coverage data:**
+
+```bash
+hatch --env develop run coverage report --show-missing
+hatch --env develop run coverage html
+```
+
+Analysis requirements:
+- Identify all uncovered lines in target modules
+- Focus on uncovered lines and untested functionality
+- Determine which edge cases and error paths are untested
+- Note any pragma directives (# pragma: no cover) and their rationale
+
+### 1.5. Example Coverage Analysis
+
+**Review existing documentation examples:**
+
+Survey documentation examples to understand what's already demonstrated:
+- Read relevant example files in `documentation/examples/` if they exist
+- Identify code paths already exercised by user-focused examples
+- Note which functionality is already well-demonstrated through practical scenarios
+- Focus pytest planning on genuinely uncovered areas not addressed by examples
+- Avoid redundant testing of functionality that examples already exercise
+
+**Integration with pytest planning:**
+- Complement rather than duplicate example coverage
+- Target edge cases and error conditions that examples don't demonstrate
+- Focus on defensive code paths and boundary conditions
+- Plan systematic coverage of areas examples don't naturally reach
+
+**For each target module:**
+- Read the source code to understand the public API
+- Identify all functions, classes, and methods
+- Map uncovered lines to specific functionality
+- Note dependency injection points and testability patterns
+
+### 2. Gap Identification Phase
+
+**Systematically catalog what needs testing:**
+
+**Functionality Gaps:**
+- Public functions with zero test coverage
+- Classes with untested public methods
+- Error handling paths not exercised
+- Edge cases not covered
+
+**Coverage Gaps:**
+- Specific line numbers needing coverage
+- Branch conditions not tested
+- Exception handling paths missed
+- Integration scenarios untested
+
+**Architecture Gaps:**
+- Code that requires dependency injection for testability
+- Components that need filesystem mocking
+- External service interactions requiring test doubles
+- Private functions/methods not exercisable via public API
+- Areas where full coverage may require violating immutability constraints
+- Test data requirements (fixtures, snapshots, fake packages for `tests/data/`)
+
+### 3. Test Strategy Development
+
+**For each identified gap, determine:**
+
+**Test Approach:**
+- Which testing patterns apply (dependency injection, pyfakefs, etc.)
+- What test doubles or fixtures are needed
+- How to structure tests for maximum coverage
+
+**Test Categories:**
+- Basic functionality tests (000-099 range)
+- Component-specific tests (100+ blocks per function/class/method)
+- Edge cases and error handling (integrated within component blocks)
+
+**Implementation Considerations:**
+- Dependencies that need injection
+- Filesystem operations requiring pyfakefs
+- External services needing mocking (NEVER test against real external sites)
+- Test data and fixtures needed under `tests/data/`
+- Performance considerations
+
+### 4. Test Organization Planning
+
+**Determine test structure and numbering:**
+
+**Review existing test numbering conventions:**
+- Analyze current test file naming patterns
+- Identify next available number blocks for new test modules
+- Plan numbering for new test functions within modules
+
+Test module vs function numbering:
+- **Test modules**: Named as `test_00_.py` (e.g., `test_100_exceptions.py`, `test_500_cli.py`)
+- **Test functions**: Within modules use 000-099 basic, 100+ blocks per component
+- These are DIFFERENT numbering schemes - do not confuse them
+
+**Test Module Numbering Hierarchy:**
+- Lower-level functionality gets lower numbers (e.g., `test_100_exceptions.py`, `test_110_utilities.py`)
+- Higher-level functionality gets higher numbers (e.g., `test_500_cli.py`, `test_600_server.py`)
+- Subpackage modules: `test_0__.py` where N advances by 10 within subpackage
+
+**Update test organization documentation:**
+- Update `documentation/architecture/testplans/summary.rst` with test module numbering scheme
+- Include project-specific testing conventions and new modules being planned
+- Document rationale for any pattern exceptions
+- Update during planning, not during implementation
+
+### 5. Plan Documentation Creation
+
+**Create comprehensive test plan document:**
+
+Save the plan to `documentation/architecture/testplans/[sanitized-module-name].rst` and update `documentation/architecture/testplans/index.rst` to include the new test plan in the toctree.
+
+Create the test plan document with:
+
+**Plan Structure (reStructuredText format):**
+```rst
+*******************************************************************************
+Test Plan: [Module Name]
+*******************************************************************************
+
+Coverage Analysis Summary
+===============================================================================
+
+- Current coverage: X%
+- Target coverage: 100%
+- Uncovered lines: [specific line numbers]
+- Missing functionality tests: [list]
+
+Test Strategy
+===============================================================================
+
+Basic Functionality Tests (000-099)
+-------------------------------------------------------------------------------
+
+- [List planned tests with brief descriptions]
+
+Component-Specific Tests (100+ blocks)
+-------------------------------------------------------------------------------
+
+Function/Class/Method: [name] (Tests 100-199)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- [Planned test descriptions including happy path, edge cases, and error handling]
+- [Dependencies needing injection]
+- [Special considerations]
+
+Implementation Notes
+===============================================================================
+
+- Dependencies requiring injection: [list]
+- Filesystem operations needing pyfakefs: [list]
+- External services requiring mocking: [list - NEVER test against real external sites]
+- Test data and fixtures: [needed under tests/data/ - fake packages, snapshots, captured artifacts]
+- Private functions/methods not testable via public API: [list with analysis]
+- Areas requiring immutability constraint violations: [list with recommendations]
+- Third-party testing patterns to research: [e.g., httpx mock transport]
+- Test module numbering for new files: [following hierarchy conventions]
+- Anti-patterns to avoid: [specific warnings including external network calls]
+
+Success Metrics
+===============================================================================
+
+- Target line coverage: [percentage]
+- Branch coverage goals: [percentage]
+- Specific gaps to close: [line numbers]
+```
+
+### 6. Plan Validation
+
+**Review and validate the plan:**
+
+**Completeness Check:**
+- All uncovered lines addressed
+- All functions/classes have test strategy
+- Error paths and edge cases included
+- Integration scenarios covered
+
+**Feasibility Check:**
+- All planned tests align with project principles
+- No monkey-patching of internal code required
+- Dependency injection patterns are viable
+- Performance considerations addressed
+
+**Numbering Check:**
+- Test numbering follows project conventions
+- No conflicts with existing test numbers
+- Logical organization by test type
+
+## Success Criteria
+
+Planning is complete when:
+- [ ] Complete coverage analysis performed
+- [ ] All testing gaps systematically identified
+- [ ] Test strategy developed for each gap
+- [ ] Test organization and numbering planned
+- [ ] `documentation/architecture/testplans/summary.rst` updated as needed
+- [ ] Comprehensive plan document created in testplans directory
+- [ ] `documentation/architecture/testplans/index.rst` updated to include new plan
+- [ ] Plan validates against project testing principles
+- [ ] Implementation approach is clear and actionable
+
+## Final Report
+
+Upon completion, provide a brief summary covering:
+- Current coverage percentage and specific gaps identified
+- Number of new tests planned by category
+- Key architectural considerations (dependency injection needs, etc.)
+- Assessment: Areas where 100% coverage may be impossible without violating immutability constraints
+- **PUSHBACK RECOMMENDATIONS**: Suggested architectural improvements to enable better testability
+- Private functions/methods that cannot be exercised via public API and analysis of why
+- Estimated complexity and implementation priority
+- Any potential challenges or special considerations
diff --git a/.auxiliary/configuration/claude/commands/cs-release-checkpoint.md b/.auxiliary/configuration/claude/commands/cs-release-checkpoint.md
new file mode 100644
index 0000000..7199e10
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-release-checkpoint.md
@@ -0,0 +1,163 @@
+---
+allowed-tools: Bash(git status), Bash(git pull:*), Bash(git add:*), Bash(git commit:*), Bash(git tag:*), Bash(git push:*), Bash(gh run list:*), Bash(gh run watch:*), Bash(hatch version:*), Bash(hatch --env develop run:*), Bash(echo:*), Bash(ls:*), Bash(grep:*), Bash(date:*), LS, Read
+description: Execute automated alpha checkpoint release with QA monitoring
+argument-hint: "[alpha]"
+---
+
+# Release Checkpoint
+
+**NOTE: This is an experimental workflow! If anything seems unclear or missing,
+please stop for consultation with the user.**
+
+For execution of an automated alpha checkpoint release on master branch.
+
+Below is a validated process to create an alpha checkpoint release with automated
+monitoring and version increment.
+
+Target alpha increment: $ARGUMENTS
+(optional - defaults to next alpha)
+
+Verify current version is alpha format if no arguments provided.
+
+Stop and consult if:
+- Working directory has uncommitted changes
+- Current version is not an alpha version (e.g., 1.3.0, 1.3rc1) and no target specified
+- Git operations fail or produce unexpected output
+
+## Context
+
+- Current git status: !`git status`
+- Current branch: !`git branch --show-current`
+- Current version: !`hatch version`
+- Recent commits: !`git log --oneline -10`
+
+## Prerequisites
+
+Before starting, ensure:
+- GitHub CLI (`gh`) is installed and authenticated
+- Working directory is clean with no uncommitted changes
+- Currently on master branch
+- Current version is an alpha version (e.g., 1.3a0)
+
+## Process Summary
+
+Key functional areas of the process:
+
+1. **Pre-Release Quality Check**: Run local QA to catch issues early
+2. **Changelog Generation**: Run Towncrier to build changelog
+3. **QA Monitoring**: Push commits and monitor QA workflow with GitHub CLI
+4. **Tag Release**: Create alpha tag with current version after QA passes
+5. **Release Monitoring**: Monitor release workflow deployment
+6. **Post-Release Cleanup**: Remove news fragments and bump alpha version
+
+## Safety Requirements
+
+Stop and consult the user if any of the following occur:
+
+- **Step failures**: If any command fails, git operation errors, or tests fail
+- **Workflow failures**: If QA or release workflows show failed jobs
+- **Unexpected output**: If commands produce unclear or concerning results
+- **Version conflicts**: If version bumps don't match expected patterns
+- **Network issues**: If GitHub operations timeout or fail repeatedly
+
+**Your responsibilities**:
+- Validate each step succeeds before proceeding to the next
+- Monitor workflow status and halt on any failures
+- Provide clear progress updates throughout the process
+- Maintain clean git hygiene
+- Use your judgment to assess when manual intervention is needed
+
+## Release Process
+
+Execute the following steps:
+
+### 1. Pre-Release Quality Check
+Run local quality assurance to catch issues early:
+```bash
+git status && git pull origin master
+hatch --env develop run linters
+hatch --env develop run testers
+hatch --env develop run docsgen
+```
+
+### 2. Changelog Generation
+Run Towncrier to update changelog with current version:
+```bash
+hatch --env develop run towncrier build --keep --version $(hatch version)
+git commit -am "Update changelog for v$(hatch version) release."
+```
+
+### 3. Quality Assurance Phase
+Push commits and monitor QA workflow:
+```bash
+git push origin master
+```
+
+Workflow monitoring requirements:
+After pushing, you MUST ensure you monitor the correct QA workflow run:
+
+1. **Wait for workflow trigger**: Wait 10 seconds after pushing to allow GitHub to trigger the workflow
+2. **Verify correct workflow**: Use `gh run list --workflow=qa --limit=5` to list recent runs
+3. **Check timestamps**: Compare the workflow creation time with your push time using `date --utc`
+4. **Ensure fresh run**: Only monitor a workflow run that was created AFTER your push timestamp
+5. **If no new run appears**: Wait additional time and check again - do NOT assume an old completed run is your workflow
+
+Once you've identified the correct QA run ID:
+```bash
+gh run watch --interval 30 --compact
+```
+
+Do not proceed until workflow completes:
+- Monitor QA workflow with `gh run watch` using the correct run ID
+- Use `timeout: 300000` (5 minutes) parameter in Bash tool for monitoring commands
+- If command times out, immediately rerun `gh run watch` until completion
+- Only proceed to next step after seeing "✓ [workflow-name] completed with 'success'"
+- Stop if any jobs fail - consult user before proceeding
+
+### 4. Alpha Release Deployment
+**Verify QA passed before proceeding to alpha tag:**
+```bash
+git tag -m "Alpha checkpoint v$(hatch version)." v$(hatch version)
+git push --tags
+```
+
+Release workflow monitoring requirements:
+After pushing the tag, you MUST ensure you monitor the correct release workflow run:
+
+1. **Wait for workflow trigger**: Wait 10 seconds after pushing tags to allow GitHub to trigger the release workflow
+2. **Verify correct workflow**: Use `gh run list --workflow=release --limit=5` to list recent runs
+3. **Check timestamps**: Compare the workflow creation time with your tag push time using `date --utc`
+4. **Ensure fresh run**: Only monitor a workflow run that was created AFTER your tag push timestamp
+5. **If no new run appears**: Wait additional time and check again - do NOT assume an old completed run is your workflow
+
+Once you've identified the correct release run ID:
+```bash
+gh run watch --interval 30 --compact
+```
+
+Do not proceed until workflow completes:
+- Monitor release workflow with `gh run watch` using the correct run ID
+- Use `timeout: 600000` (10 minutes) parameter in Bash tool for monitoring commands
+- If command times out, immediately rerun `gh run watch` until completion
+- Only proceed to next step after seeing "✓ [workflow-name] completed with 'success'"
+- Stop if any jobs fail - consult user before proceeding
+
+### 5. Post-Release Cleanup
+Clean up Towncrier fragments:
+```bash
+git rm .auxiliary/data/towncrier/*.rst
+git commit -m "Clean up news fragments."
+```
+
+### 6. Next Alpha Version
+Bump to next alpha version:
+```bash
+hatch version alpha
+git commit -am "Version: $(hatch version)"
+```
+
+### 7. Final Push
+Push cleanup and version bump commits:
+```bash
+git push origin master
+```
diff --git a/.auxiliary/configuration/claude/commands/cs-release-final.md b/.auxiliary/configuration/claude/commands/cs-release-final.md
new file mode 100644
index 0000000..2854244
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-release-final.md
@@ -0,0 +1,195 @@
+---
+allowed-tools: Bash(git status), Bash(git pull:*), Bash(git checkout:*), Bash(git add:*), Bash(git commit:*), Bash(git tag:*), Bash(git rm:*), Bash(git cherry-pick:*), Bash(git log:*), Bash(git branch:*), Bash(gh run list:*), Bash(gh run watch:*), Bash(hatch version:*), Bash(hatch --env develop run:*), Bash(echo:*), Bash(ls:*), Bash(grep:*), LS, Read
+description: Execute automated final release with QA monitoring and development cycle setup
+argument-hint: "major.minor"
+---
+
+# Release Final
+
+**NOTE: This is an experimental workflow! If anything seems unclear or missing,
+please stop for consultation with the user.**
+
+For execution of a fully-automated final release.
+
+Below is a validated process to create a final release with automated
+monitoring and next development cycle setup.
+
+Target release version: $ARGUMENTS
+
+Verify exactly one target release version provided.
+
+Stop and consult if:
+- No target release version is provided
+- Multiple release versions provided (e.g., `1.6 foo bar`)
+- Release version format doesn't match `X.Y` pattern (e.g., `1.6.2`, `1.6a0`)
+
+## Context
+
+- Current git status: !`git status`
+- Current branch: !`git branch --show-current`
+- Current version: !`hatch version`
+- Recent commits: !`git log --oneline -10`
+- Available towncrier fragments: !`ls .auxiliary/data/towncrier/*.rst 2>/dev/null || echo "No fragments found"`
+
+## Prerequisites
+
+Before starting, ensure:
+- GitHub CLI (`gh`) is installed and authenticated
+- For new releases: All changes are committed to `master` branch
+- For existing release branches: Release candidate has been validated and tested
+- Working directory is clean with no uncommitted changes
+- Towncrier news fragments are present for the release enhancements
+
+## Process Summary
+
+Key functional areas of the process:
+
+1. **Branch Setup**: Create new release branch or checkout existing one
+2. **Version Bump**: Set version to final release (major/minor/patch as appropriate)
+3. **Update Changelog**: Run Towncrier to build final changelog
+4. **QA Monitoring**: Push commits and monitor QA workflow with GitHub CLI
+5. **Tag Release**: Create signed git tag after QA passes
+6. **Release Monitoring**: Monitor release workflow deployment
+7. **Cleanup**: Remove news fragments and cherry-pick back to master
+8. **Next Development Cycle**: Set up master branch for next development version
+
+## Safety Requirements
+
+Stop and consult the user if any of the following occur:
+
+- **Step failures**: If any command fails, git operation errors, or tests fail
+- **Workflow failures**: If QA or release workflows show failed jobs
+- **Unexpected output**: If commands produce unclear or concerning results
+- **Version conflicts**: If version bumps don't match expected patterns
+- **Network issues**: If GitHub operations timeout or fail repeatedly
+
+**Your responsibilities**:
+- Validate each step succeeds before proceeding to the next
+- Monitor workflow status and halt on any failures
+- Provide clear progress updates throughout the process
+- Maintain clean git hygiene and proper branching
+- Use your judgment to assess when manual intervention is needed
+
+## Release Process
+
+Execute the following steps:
+
+### 1. Pre-Release Quality Check
+Run local quality assurance to catch issues early:
+```bash
+git status && git pull origin master
+hatch --env develop run linters
+hatch --env develop run testers
+hatch --env develop run docsgen
+```
+
+### 2. Release Branch Setup
+Determine release branch name from target version (e.g., `1.6` → `release-1.6`).
+
+**If release branch exists** (for RC→final conversion):
+```bash
+git checkout release-$ARGUMENTS
+git pull origin release-$ARGUMENTS
+```
+
+**If creating new release branch**:
+```bash
+git checkout master && git pull origin master
+git checkout -b release-$ARGUMENTS
+```
+
+### 3. Version Management
+Set version to target release version:
+```bash
+hatch version $ARGUMENTS
+git commit -am "Version: $(hatch version)"
+```
+
+### 4. Changelog Generation
+```bash
+hatch --env develop run towncrier build --keep --version $(hatch version)
+git commit -am "Update changelog for v$(hatch version) release."
+```
+
+### 5. Quality Assurance Phase
+Push branch and monitor QA workflow:
+```bash
+# Use -u flag for new branches, omit for existing
+git push [-u] origin release-$ARGUMENTS
+```
+
+Workflow monitoring requirements:
+After pushing, you MUST ensure you monitor the correct QA workflow run:
+
+1. **Wait for workflow trigger**: Wait 10 seconds after pushing to allow GitHub to trigger the workflow
+2. **Verify correct workflow**: Use `gh run list --workflow=qa --limit=5` to list recent runs
+3. **Check timestamps**: Compare the workflow creation time with your push time using `date --utc`
+4. **Ensure fresh run**: Only monitor a workflow run that was created AFTER your push timestamp
+5. **If no new run appears**: Wait additional time and check again - do NOT assume an old completed run is your workflow
+
+Once you've identified the correct QA run ID:
+```bash
+gh run watch --interval 30 --compact
+```
+
+Do not proceed until workflow completes:
+- Monitor QA workflow with `gh run watch` using the correct run ID
+- Use `timeout: 300000` (5 minutes) parameter in Bash tool for monitoring commands
+- If command times out, immediately rerun `gh run watch` until completion
+- Only proceed to next step after seeing "✓ [workflow-name] completed with 'success'"
+- Stop if any jobs fail - consult user before proceeding
+
+### 6. Release Deployment
+**Verify QA passed before proceeding to release tag:**
+```bash
+git tag -m "Release v$(hatch version): ." v$(hatch version)
+git push --tags
+```
+
+Release workflow monitoring requirements:
+After pushing the tag, you MUST ensure you monitor the correct release workflow run:
+
+1. **Wait for workflow trigger**: Wait 10 seconds after pushing tags to allow GitHub to trigger the release workflow
+2. **Verify correct workflow**: Use `gh run list --workflow=release --limit=5` to list recent runs
+3. **Check timestamps**: Compare the workflow creation time with your tag push time using `date --utc`
+4. **Ensure fresh run**: Only monitor a workflow run that was created AFTER your tag push timestamp
+5. **If no new run appears**: Wait additional time and check again - do NOT assume an old completed run is your workflow
+
+Once you've identified the correct release run ID:
+```bash
+gh run watch --interval 30 --compact
+```
+
+Do not proceed until workflow completes:
+- Monitor release workflow with `gh run watch` using the correct run ID
+- Use `timeout: 600000` (10 minutes) parameter in Bash tool for monitoring commands
+- If command times out, immediately rerun `gh run watch` until completion
+- Only proceed to next step after seeing "✓ [workflow-name] completed with 'success'"
+- Stop if any jobs fail - consult user before proceeding
+
+### 7. Post-Release Cleanup
+```bash
+git rm .auxiliary/data/towncrier/*.rst
+git commit -m "Clean up news fragments."
+git push origin release-$ARGUMENTS
+```
+
+### 8. Master Branch Integration
+Cherry-pick release commits back to master:
+```bash
+git checkout master && git pull origin master
+git cherry-pick
+git cherry-pick
+git push origin master
+```
+
+### 9. Next Development Cycle (Major/Minor Releases Only)
+Set up next development version:
+```bash
+hatch version minor,alpha
+git commit -am "Start of development for release $(hatch version | sed 's/a[0-9]*$//')."
+git tag -m "Start of development for release $(hatch version | sed 's/a[0-9]*$//')." "i$(hatch version | sed 's/a[0-9]*$//')"
+git push origin master --tags
+```
+
+**Note**: Use `git log --oneline` to identify commit hashes for cherry-picking.
diff --git a/.auxiliary/configuration/claude/commands/cs-release-maintenance.md b/.auxiliary/configuration/claude/commands/cs-release-maintenance.md
new file mode 100644
index 0000000..b61e493
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-release-maintenance.md
@@ -0,0 +1,237 @@
+---
+allowed-tools: Bash(git status), Bash(git pull:*), Bash(git checkout:*), Bash(git commit:*), Bash(git tag:*), Bash(git rm:*), Bash(git cherry-pick:*), Bash(git log:*), Bash(git branch:*), Bash(gh run list:*), Bash(gh run watch:*), Bash(hatch version:*), Bash(hatch --env develop run:*), Bash(echo:*), Bash(ls:*), Bash(grep:*), LS, Read
+description: Execute automated patch release with QA monitoring and master integration
+argument-hint: "major.minor"
+---
+
+# Release Patch
+
+**NOTE: This is an experimental workflow! If anything seems unclear or missing,
+please stop for consultation with the user.**
+
+For execution of a fully-automated postrelease patch.
+
+Below is a validated process to create patch releases with automated monitoring
+and clean integration back to master.
+
+Target release version: $ARGUMENTS
+(e.g., `1.24`, `2.3`)
+
+Verify exactly one target release version provided.
+
+Stop and consult if:
+- No target release version is provided
+- Multiple release versions provided (e.g., `1.6 foo bar`)
+- Release version format doesn't match `X.Y` pattern (e.g., `1.6.2`, `1.6a0`)
+
+## Context
+
+- Current git status: !`git status`
+- Current branch: !`git branch --show-current`
+- Current version: !`hatch version`
+- Recent commits: !`git log --oneline -10`
+- Available towncrier fragments: !`ls .auxiliary/data/towncrier/*.rst 2>/dev/null || echo "No fragments found"`
+
+## Prerequisites
+
+Before running this command, ensure:
+- GitHub CLI (`gh`) is installed and authenticated
+- Release branch exists for the target version (e.g., `release-1.24` for version `1.24`)
+- Working directory is clean with no uncommitted changes
+- Towncrier news fragments are present for the patch changes
+
+## Process Summary
+
+Key functional areas of the process:
+
+1. **Branch Setup**: Checkout and update the appropriate release branch
+2. **Version Bump**: Increment to next patch version with `hatch version patch`
+3. **Update Changelog**: Run Towncrier to build patch changelog
+4. **QA Monitoring**: Push commits and monitor QA workflow with GitHub CLI
+5. **Tag Release**: Create signed git tag after QA passes
+6. **Release Monitoring**: Monitor release workflow deployment
+7. **Cleanup**: Remove news fragments and cherry-pick back to master
+
+## Safety Requirements
+
+Stop and consult the user if any of the following occur:
+
+- **Step failures**: If any command fails, git operation errors, or tests fail
+- **Workflow failures**: If QA or release workflows show failed jobs
+- **Version conflicts**: If patch version doesn't match expected patterns
+- **Branch issues**: If release branch doesn't exist or is in unexpected state
+- **Network issues**: If GitHub operations timeout or fail repeatedly
+
+**Your responsibilities**:
+- Validate each step succeeds before proceeding to the next
+- Monitor workflow status and halt on any failures
+- Provide clear progress updates throughout the process
+- Maintain clean git hygiene and proper branching
+- Use your judgment to assess when manual intervention is needed
+
+## Release Process
+
+Execute the following steps:
+
+### 1. Pre-Release Quality Check
+Run local quality assurance to catch issues early:
+```bash
+git status && git pull origin master
+hatch --env develop run linters
+hatch --env develop run testers
+hatch --env develop run docsgen
+```
+
+### 2. Release Branch Setup
+Checkout the target release branch:
+```bash
+git checkout release-$ARGUMENTS
+git pull origin release-$ARGUMENTS
+```
+
+### 3. Patch Integration
+**Determine patch location and integrate if needed:**
+
+### 3.1. Identify Patch Commits
+Before cherry-picking, identify which commits contain actual patch fixes vs. maintenance:
+
+```bash
+git log --oneline master
+git log --graph --oneline master --since="1 month ago"
+# Show commits on master not on release branch
+git log --oneline release-$ARGUMENTS..master --since="1 month ago"
+```
+
+**IMPORTANT**
+- Do **not** cherry-pick commits which were previously cherry-picked onto the
+ branch.
+- Look at the Towncrier news fragments to help you decide what to pick.
+
+**Patch commits** (always cherry-pick):
+- Bug fixes
+- Security patches
+- Critical functionality fixes
+
+**Maintenance commits** (evaluate case-by-case):
+- Template updates
+- Dependency bumps
+- Documentation changes
+
+Use `git show ` to review each commit's content before deciding.
+
+**If patches were developed on master** (cherry-pick to release branch):
+```bash
+# Cherry-pick patch commits from master to release branch
+# Use git log --oneline master to identify relevant commit hashes
+git cherry-pick
+git cherry-pick
+# Repeat for all patch commits
+```
+
+**If patches were developed on release branch**: Skip this step - patches are already present.
+
+### 4. Pre-Release Validation
+Run linting to catch issues before formal release process:
+```bash
+hatch --env develop run linters
+```
+Stop if any linting errors - fix issues before proceeding.
+
+### 5. Version Management
+Increment to next patch version:
+```bash
+hatch version patch
+git commit -am "Version: $(hatch version)"
+```
+
+### 6. Changelog Generation
+```bash
+hatch --env develop run towncrier build --keep --version $(hatch version)
+git commit -am "Update changelog for v$(hatch version) patch release."
+```
+
+### 7. Quality Assurance Phase
+Push branch and monitor QA workflow:
+```bash
+git push origin release-$ARGUMENTS
+```
+
+Workflow monitoring requirements:
+After pushing, you MUST ensure you monitor the correct QA workflow run:
+
+1. **Wait for workflow trigger**: Wait 10 seconds after pushing to allow GitHub to trigger the workflow
+2. **Verify correct workflow**: Use `gh run list --workflow=qa --limit=5` to list recent runs
+3. **Check timestamps**: Compare the workflow creation time with your push time using `date --utc`
+4. **Ensure fresh run**: Only monitor a workflow run that was created AFTER your push timestamp
+5. **If no new run appears**: Wait additional time and check again - do NOT assume an old completed run is your workflow
+
+Once you've identified the correct QA run ID:
+```bash
+gh run watch --interval 30 --compact
+```
+
+Do not proceed until workflow completes:
+- Monitor QA workflow with `gh run watch` using the correct run ID
+- Use `timeout: 300000` (5 minutes) parameter in Bash tool for monitoring commands
+- If command times out, immediately rerun `gh run watch` until completion
+- Only proceed to next step after seeing "✓ [workflow-name] completed with 'success'"
+- Stop if any jobs fail - consult user before proceeding
+
+### 8. Release Deployment
+**Verify QA passed before proceeding to release tag:**
+```bash
+git tag -m "Release v$(hatch version) patch: ." v$(hatch version)
+git push --tags
+```
+
+Release workflow monitoring requirements:
+After pushing the tag, you MUST ensure you monitor the correct release workflow run:
+
+1. **Wait for workflow trigger**: Wait 10 seconds after pushing tags to allow GitHub to trigger the release workflow
+2. **Verify correct workflow**: Use `gh run list --workflow=release --limit=5` to list recent runs
+3. **Check timestamps**: Compare the workflow creation time with your tag push time using `date --utc`
+4. **Ensure fresh run**: Only monitor a workflow run that was created AFTER your tag push timestamp
+5. **If no new run appears**: Wait additional time and check again - do NOT assume an old completed run is your workflow
+
+Once you've identified the correct release run ID:
+```bash
+gh run watch --interval 30 --compact
+```
+
+Do not proceed until workflow completes:
+- Monitor release workflow with `gh run watch` using the correct run ID
+- Use `timeout: 600000` (10 minutes) parameter in Bash tool for monitoring commands
+- If command times out, immediately rerun `gh run watch` until completion
+- Only proceed to next step after seeing "✓ [workflow-name] completed with 'success'"
+- Stop if any jobs fail - consult user before proceeding
+
+### 9. Post-Release Cleanup
+```bash
+git rm .auxiliary/data/towncrier/*.rst
+git commit -m "Clean up news fragments."
+git push origin release-$ARGUMENTS
+```
+
+### 10. Master Branch Integration
+Cherry-pick commits back to master based on patch development location:
+
+**If patches were developed on master**: Cherry-pick changelog and cleanup commits:
+```bash
+git checkout master && git pull origin master
+git cherry-pick
+git cherry-pick
+git push origin master
+```
+
+**If patches were developed on release branch**: Cherry-pick patch, changelog, and cleanup commits:
+```bash
+git checkout master && git pull origin master
+git cherry-pick
+git cherry-pick
+# Repeat for all patch commits
+git cherry-pick
+git cherry-pick
+git push origin master
+```
+
+**Note**: Use `git log --oneline` to identify commit hashes for cherry-picking.
diff --git a/.auxiliary/configuration/claude/commands/cs-review-todos.md b/.auxiliary/configuration/claude/commands/cs-review-todos.md
new file mode 100644
index 0000000..1a2e423
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-review-todos.md
@@ -0,0 +1,103 @@
+---
+allowed-tools: Read, Write, Edit, MultiEdit, LS, Glob, Grep, Bash(find:*), Bash(ls:*), Bash(wc:*)
+description: Systematically find, categorize, and analyze TODO comments for technical debt management
+---
+
+# Technical Debt Review
+
+Systematically find, categorize, and analyze TODO comments across the project
+codebase to provide actionable insights about technical debt and outstanding
+work items.
+
+Filter criteria and analysis focus: $ARGUMENTS
+(if blank, then consider entire project)
+
+## Context
+
+- Notes: @.auxiliary/notes
+- Project architecture: @documentation/architecture/summary.rst
+- Project designs: @documentation/architecture/designs
+
+## Prerequisites
+
+Before running this analysis, ensure:
+- Understanding of project structure and file organization
+- Access to both source code and auxiliary documentation
+
+## Process Summary
+
+Key functional areas:
+1. **Discovery**: Search for TODO/FIXME/XXX/HACK comments across all relevant files
+2. **Categorization**: Organize findings by urgency, component, and type
+3. **Analysis**: Assess technical debt impact and provide prioritization insights
+4. **Reconciliation**: Compare source code TODOs with tracking documents
+5. **Reporting**: Generate actionable summary with recommended next steps
+
+## Safety Requirements
+
+Stop and consult the user if:
+- Large volume of TODOs (>100) found that may require batch processing
+- Inconsistencies between tracking documents and source code require manual review
+- File access permissions prevent comprehensive analysis
+
+## Execution
+
+Execute the following steps:
+
+### 1. Comprehensive TODO Discovery
+
+Search for all TODO-style comments across the project:
+- Use Grep to find TODO, FIXME, XXX, HACK, NOTE patterns
+- Search Python files, documentation, configuration files
+- Include both inline comments and dedicated TODO sections
+- Capture surrounding context (3-5 lines) for each finding
+
+### 2. Pattern Analysis and Categorization
+
+Analyze discovered TODOs for:
+- **Urgency indicators**: Words like "urgent", "critical", "before release", "security"
+- **Component classification**: Group by module, file, or functional area
+- **Type classification**: Bug fix, feature enhancement, refactoring, documentation
+- **Age estimation**: Check git blame for when TODO was introduced
+
+### 3. Auxiliary Document Review
+
+Examine TODO tracking files in `.auxiliary/notes/`:
+- Read any existing TODO tracking documents
+- Compare with source code findings
+- Identify completed items that should be removed
+- Note discrepancies between tracking and actual code state
+
+### 4. Priority Assessment
+
+Evaluate each TODO for:
+- **Business impact**: Customer-facing vs. internal improvements
+- **Technical risk**: Potential for bugs, security issues, or maintenance burden
+- **Implementation complexity**: Quick fixes vs. architectural changes
+- **Dependencies**: Items blocking other work vs. standalone improvements
+
+### 5. Reporting and Recommendations
+
+Generate structured output including:
+- **Executive summary**: Total count, high-priority items, key themes
+- **Categorized listings**: Organized by urgency, component, and type
+- **Urgent actions**: Items requiring immediate attention
+- **Cleanup opportunities**: Completed or obsolete TODOs to remove
+- **Tracking reconciliation**: Sync recommendations between documents and code
+- **Next steps**: Prioritized action plan for technical debt reduction
+
+### 6. Documentation Updates
+
+When appropriate:
+- Update or create TODO tracking documents in `.auxiliary/notes/`
+- Remove completed TODO comments from source code
+- Add context or priority indicators to ambiguous TODOs
+- Standardize TODO format across the project
+
+### 7. Summary Report
+
+Provide comprehensive analysis including:
+- Total technical debt inventory
+- Risk assessment of critical items
+- Recommended prioritization for next development cycles
+- Maintenance suggestions for keeping TODO management current
diff --git a/.auxiliary/configuration/claude/commands/cs-update-command.md b/.auxiliary/configuration/claude/commands/cs-update-command.md
new file mode 100644
index 0000000..a9a9272
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-update-command.md
@@ -0,0 +1,96 @@
+---
+allowed-tools: Read, Write, Edit, MultiEdit, LS, Glob, Grep
+description: Update existing slash command with missing instructions or reinforced guidance
+---
+
+# Update Slash Process
+
+Update an existing custom slash command to address missing instructions,
+reinforce guidance which LLMs are ignoring, add missing tool permissions, or
+make structural improvements.
+
+Target command and instructions: $ARGUMENTS
+
+Stop and consult if:
+- The target file doesn't exist or isn't a slash command
+- Major structural changes are requested that would fundamentally alter the command purpose
+- Changes conflict with established project patterns
+
+## Context
+
+- Command template: @.auxiliary/configuration/claude/miscellany/command-template.md
+- Project conventions: @.auxiliary/configuration/conventions.md
+
+## Prerequisites
+
+Before updating the command, ensure:
+- Clear understanding of what improvements are needed
+- Target file exists and is accessible
+- Any referenced files or patterns are available
+- Changes align with project conventions and existing process patterns
+
+## Process Summary
+
+Key functional areas:
+1. **Analysis**: Read current command and identify improvement areas
+2. **Content Updates**: Add missing instructions or reinforce existing guidance
+3. **Structure Review**: Consider organizational improvements when appropriate
+4. **Tone Refinement**: Ensure professional language without excessive emphasis
+5. **Validation**: Verify updates maintain command effectiveness
+
+## Safety Requirements
+
+Stop and consult the user if:
+- Process changes would break existing workflows or dependencies
+- Updates conflict with established project conventions
+- Structural modifications require significant rework of command logic
+
+## Execution
+
+Execute the following steps:
+
+### 1. Command Analysis
+Read and analyze the current command:
+- Review existing content, structure, and tool permissions
+- Identify areas needing improvement or reinforcement
+- Assess tone and language for professional standards
+- Note any missing instructions or unclear guidance
+
+### 2. Content Enhancement
+Apply requested improvements:
+- Add missing instructions where gaps are identified
+- Reinforce guidance that needs stronger emphasis
+- Remove excessive bold formatting or shouty language
+- Eliminate redundant repetition within sections
+- Ensure clear, actionable language throughout
+
+### 3. Structural Review
+Consider organizational improvements:
+- Evaluate section ordering and logical flow
+- Improve prerequisites or context sections if needed
+- Enhance command summary for clarity
+- Adjust safety requirements as appropriate
+- Ensure consistent formatting patterns
+
+### 4. Tool and Permission Updates
+Review and adjust technical aspects:
+- Verify allowed-tools are appropriate for updated functionality
+- Check that `@`-references and shell command expansions are current
+- Ensure any context commands have proper tool permissions to run (e.g., `Bash(ls:*)` for `ls` commands)
+- Ensure context section provides relevant dynamic information
+- Validate that command can execute with given permissions
+
+### 5. Professional Polish
+Apply formatting and tone standards:
+- Use professional headers without excessive emphasis
+- Maintain clear, direct language without redundancy
+- Ensure consistency with project conventions
+- Remove any attention-grabbing formatting that isn't necessary
+- Balance guidance strength with readability
+
+### 6. Validation and Summary
+Complete the update command:
+- Review updated content for completeness and clarity
+- Verify all requested improvements have been addressed
+- Ensure command maintains effectiveness while addressing issues
+- Provide succinct summary of changes made to the user
diff --git a/.auxiliary/configuration/claude/commands/cs-update-readme-rst.md b/.auxiliary/configuration/claude/commands/cs-update-readme-rst.md
new file mode 100644
index 0000000..575954e
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/cs-update-readme-rst.md
@@ -0,0 +1,105 @@
+---
+description: Analyze current project state and refresh manually-maintained sections of README.rst while preserving template content
+---
+
+# Update README Documentation
+
+Analyze the current project state and refresh the manually-maintained sections
+of README.rst files while preserving auto-generated template content and
+ensuring accuracy with actual project capabilities.
+
+User input: $ARGUMENTS
+
+## Context
+
+- Current git status: !`git status --porcelain`
+- Project structure: !`ls -la`
+- Current README: @README.rst
+- Project metadata: @pyproject.toml
+- Product requirements: @documentation/prd.rst
+- Architecture overview: @documentation/architecture/filesystem.rst
+
+## Prerequisites
+
+Before updating README documentation, ensure:
+- Current README.rst exists and is accessible
+- Understanding of project's actual capabilities and features
+- Access to project metadata and configuration files
+
+## Process Summary
+
+Key functional areas:
+1. **Content Analysis**: Examine current README and identify TODO sections needing updates
+2. **Project Assessment**: Analyze actual capabilities from code, CLI, and configuration
+3. **Content Generation**: Create compelling descriptions, features, and examples based on real functionality
+4. **Validation**: Ensure all claims and examples match actual project capabilities
+
+## Safety Requirements
+
+Stop and consult the user if:
+- README.rst cannot be read or is missing critical structure
+- Template boundaries are unclear or may be damaged
+- Project capabilities cannot be determined from available sources
+- Generated examples cannot be validated against actual implementation
+- Significant structural changes to README are required beyond content updates
+
+All template-rendered sections must be preserved without modification; these
+include: badges, installation, contribution, flair
+
+
+## Execution
+
+Execute the following steps:
+
+### 1. README Analysis
+Read and analyze the current README structure:
+- Examine existing README.rst for TODO markers and outdated content
+- Identify template-generated sections that must be preserved
+- Map sections that need manual content updates
+- Note existing manual content that should be retained
+
+### 2. Project Capability Assessment
+Analyze the actual project functionality:
+- Extract project metadata from pyproject.toml (name, description, dependencies)
+- Read PRD document if available for project goals and features
+- Examine source code structure to understand API capabilities
+- Test CLI functionality if enabled to document actual usage patterns
+- Review configuration files and scripts for additional capabilities
+
+### 3. Content Generation Strategy
+Plan content updates based on project analysis:
+- Draft compelling project description with emoji prefix (e.g., 🔧, 📊, 🌐, 🎯) matching project purpose
+- Identify key features based on actual implementation
+- Plan 1-2 concise examples that whet appetites without overwhelming
+- Avoid advanced showcase examples - focus on core value demonstration
+- Consider additional sections (Use Cases, Motivation, Configuration) appropriate for project complexity
+- Ensure content accuracy and professional tone
+
+### 4. README Content Updates
+Update manual sections while preserving template content:
+- Replace ".. todo:: Provide project description" with emoji-prefixed compelling description
+- Add or update "Key Features ⭐" section with bullet points of actual capabilities
+- Generate concise "Examples 💡" section with 1-2 essential usage patterns only
+- Keep examples minimal and focused on core value, not comprehensive showcase
+- Add relevant sections like "Use Cases", "Motivation", or "Configuration" as appropriate
+- Preserve all template-generated sections (badges, installation, contribution, flair)
+
+### 5. Content Validation
+Verify accuracy of all updated content:
+- Test all code examples for correctness with current codebase
+- Verify feature claims are supported by actual implementation
+- Check that installation instructions match project configuration
+- Ensure RST formatting is correct and consistent
+- Validate examples are concise and appetite-whetting, not overwhelming
+- Confirm README length is appropriate for project complexity
+
+### 6. Final Review
+Complete final validation and formatting:
+- Review entire README for consistency and professional presentation
+- Ensure all TODO markers have been appropriately addressed
+- Verify template boundaries are intact and respected
+- Confirm examples are executable and accurate
+- Check that content maintains engaging tone while being factually correct
+
+### 7. Summarize Updates
+Provide concise summary of updates to the user.
diff --git a/.auxiliary/configuration/claude/commands/validate-custom-slash.md b/.auxiliary/configuration/claude/commands/validate-custom-slash.md
new file mode 100644
index 0000000..2540aca
--- /dev/null
+++ b/.auxiliary/configuration/claude/commands/validate-custom-slash.md
@@ -0,0 +1,41 @@
+---
+allowed-tools: Bash(git status), Bash(git branch:*), Bash(git log:*), Bash(hatch version:*), Bash(echo:*), Bash(ls:*), Bash(pwd), LS, Read
+description: Validate custom slash command functionality with context and permissions
+---
+
+# Validate Custom Slash Command
+
+Test script to validate custom slash command functionality, permissions, and context interpolation.
+
+Test argument: $ARGUMENTS
+
+## Context
+
+- Current directory: !`pwd`
+- Current git status: !`git status --porcelain`
+- Current branch: !`git branch --show-current`
+- Current version: !`hatch version`
+- Recent commits: !`git log --oneline -5`
+- Template files: !`ls template/.auxiliary/configuration/claude/commands/`
+
+## Validation Tasks
+
+1. **Report the test argument**: Look at the "Test argument:" line above and tell me what value you see there
+2. **Test basic git commands**: Run `git status` and `git branch --show-current`
+3. **Test hatch command**: Run `hatch version`
+4. **Test file operations**: Use LS tool to list current directory contents
+5. **Test restricted command**: Attempt `git push` (should be blocked and require approval)
+
+## Expected Results
+
+- Context should be populated with current state
+- Allowed commands should execute successfully
+- `git push` should be blocked
+
+## Your Task
+
+Execute the validation tasks above and provide a summary report including:
+- The interpolated argument value you see on the "Test argument:" line
+- Results of each allowed command
+- Confirmation that restricted commands are properly blocked
+- Any observations about the command execution experience
diff --git a/.auxiliary/configuration/claude/miscellany/command-template.md b/.auxiliary/configuration/claude/miscellany/command-template.md
new file mode 100644
index 0000000..2db83c6
--- /dev/null
+++ b/.auxiliary/configuration/claude/miscellany/command-template.md
@@ -0,0 +1,47 @@
+---
+allowed-tools: Tool1, Tool2, Tool3
+description: Brief description of what this command does
+---
+
+# Process Title
+
+Brief introductory paragraph explaining the purpose.
+
+Target/input description: $ARGUMENTS
+
+## Context
+
+- Current state checks, if applicable: !`command1`
+- Environment info, if applicable: !`command2`
+- Relevant data, if applicable: !`command3`
+
+## Prerequisites
+
+Before running this process, ensure:
+- Prerequisite 1
+- Prerequisite 2
+- @-references to relevant guides if applicable
+
+## Process Summary
+
+Key functional areas:
+1. **Phase 1**: Description
+2. **Phase 2**: Description
+3. **Phase 3**: Description
+
+## Safety Requirements
+
+Stop and consult the user if:
+- List validation conditions
+- Error conditions that require user input
+- Unexpected situations
+
+## Execution
+
+Execute the following steps:
+
+### 1. Step Name
+Description of what this step does.
+
+### 2. Step Name
+More steps as needed.
diff --git a/.auxiliary/configuration/claude/settings.json b/.auxiliary/configuration/claude/settings.json
new file mode 100644
index 0000000..cfdc6aa
--- /dev/null
+++ b/.auxiliary/configuration/claude/settings.json
@@ -0,0 +1,94 @@
+{
+ "env": {
+ "BASH_DEFAULT_TIMEOUT_MS": 1800000,
+ "BASH_MAX_TIMEOUT_MS": 1800000,
+ "CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR": 1,
+ "CLAUDE_CODE_DISABLE_TERMINAL_TITLE": 1,
+ "DISABLE_NON_ESSENTIAL_MODEL_CALLS": 1
+ },
+ "hooks": {
+ "PreToolUse": [
+ {
+ "matcher": "Bash",
+ "hooks": [
+ {
+ "type": "command",
+ "command": ".auxiliary/scripts/claude/pre-bash-python-check",
+ "timeout": 10
+ },
+ {
+ "type": "command",
+ "command": ".auxiliary/scripts/claude/pre-bash-git-commit-check",
+ "timeout": 300
+ }
+ ]
+ }
+ ],
+ "PostToolUse": [
+ {
+ "matcher": "Edit|MultiEdit|Write",
+ "hooks": [
+ {
+ "type": "command",
+ "command": ".auxiliary/scripts/claude/post-edit-linter",
+ "timeout": 60
+ }
+ ]
+ }
+ ]
+ },
+ "permissions": {
+ "auto_allow": [
+ "Bash(awk *)",
+ "Bash(cat *)",
+ "Bash(cut *)",
+ "Bash(df *)",
+ "Bash(du *)",
+ "Bash(echo *)",
+ "Bash(file *)",
+ "Bash(find *)",
+ "Bash(gh browse *)",
+ "Bash(gh issue list *)",
+ "Bash(gh issue view *)",
+ "Bash(gh pr checks *)",
+ "Bash(gh pr list *)",
+ "Bash(gh pr view *)",
+ "Bash(gh release list *)",
+ "Bash(gh release view *)",
+ "Bash(gh repo list *)",
+ "Bash(gh repo view *)",
+ "Bash(gh run list *)",
+ "Bash(gh run view *)",
+ "Bash(gh run watch *)",
+ "Bash(gh status *)",
+ "Bash(git add *)",
+ "Bash(git branch *)",
+ "Bash(git diff *)",
+ "Bash(git log *)",
+ "Bash(git show *)",
+ "Bash(git status)",
+ "Bash(grep *)",
+ "Bash(hatch run python *)",
+ "Bash(hatch --env develop run *)",
+ "Bash(head *)",
+ "Bash(ls *)",
+ "Bash(ps *)",
+ "Bash(pwd *)",
+ "Bash(rg *)",
+ "Bash(sed *)",
+ "Bash(sort *)",
+ "Bash(tail *)",
+ "Bash(uniq *)",
+ "Bash(wc *)",
+ "Bash(which *)",
+ "mcp__context7__get-library-docs",
+ "mcp__context7__resolve-library-id",
+ "mcp__pyright__definition",
+ "mcp__pyright__diagnostics",
+ "mcp__pyright__edit_file",
+ "mcp__pyright__hover",
+ "mcp__pyright__references",
+ "mcp__pyright__rename_symbol"
+ ]
+ }
+}
diff --git a/.auxiliary/configuration/conventions.md b/.auxiliary/configuration/conventions.md
new file mode 100644
index 0000000..919ad04
--- /dev/null
+++ b/.auxiliary/configuration/conventions.md
@@ -0,0 +1,39 @@
+# Context
+
+
+
+- Project overview and quick start: README.rst
+- Product requirements and goals: documentation/prd.rst
+- System architecture and design: @documentation/architecture/
+- Development practices and style: @.auxiliary/instructions/
+- Current session notes and TODOs: @.auxiliary/notes/
+
+- Use the 'context7' MCP server to retrieve up-to-date documentation for any SDKs or APIs.
+- Use the 'librovore' MCP server to search structured documentation sites with object inventories (Sphinx-based, compatible MkDocs with mkdocstrings). This bridges curated documentation (context7) and raw scraping (firecrawl).
+- Check README files in directories you're working with for insights about architecture, constraints, and TODO items.
+- Update files under `.auxiliary/notes` during conversation, removing completed tasks and adding emergent items.
+
+# Operation
+
+- Use `rg --line-number --column` to get precise coordinates for MCP tools that require line/column positions.
+- Choose appropriate editing tools based on the task complexity and your familiarity with the tools.
+- Consider `mcp__pyright__edit_file` for more reliable line-based editing than context-based `Edit`/`MultiEdit` when making complex changes.
+- Use pyright MCP tools where appropriate: `rename_symbol` for refactors, `hover` for getting function definitions without searching through code, `references` for precise symbol analysis.
+- Batch related changes together when possible to maintain consistency.
+- Use relative paths rather than absolute paths when possible.
+- Do not write to paths outside the current project unless explicitly requested.
+- Use the `.auxiliary/scribbles` directory for scratch space instead of `/tmp`.
+
+# Commits
+
+- Use `git status` to ensure all relevant changes are in the changeset.
+- Use the `python-conformer` agent to review changes that include Python code before committing.
+- Do **not** commit without explicit user approval. Unless the user has requested the commit, ask for a review of your edits first.
+- Use present tense, imperative mood verbs (e.g., "Fix" not "Fixed").
+- Write sentences with proper punctuation.
+- Include a `Co-Authored-By:` field as the final line. Should include the model name and a no-reply address.
+
+# Project Notes
+
+
diff --git a/.auxiliary/configuration/copier-answers.yaml b/.auxiliary/configuration/copier-answers.yaml
index 4072fe5..5747302 100644
--- a/.auxiliary/configuration/copier-answers.yaml
+++ b/.auxiliary/configuration/copier-answers.yaml
@@ -1,5 +1,5 @@
# Changes here will be overwritten by Copier
-_commit: v1.16
+_commit: v1.49
_src_path: gh:emcd/python-project-common
author_email: emcd@users.noreply.github.com
author_name: Eric McDonald
diff --git a/.auxiliary/configuration/gemini/settings.json b/.auxiliary/configuration/gemini/settings.json
new file mode 100644
index 0000000..2b52210
--- /dev/null
+++ b/.auxiliary/configuration/gemini/settings.json
@@ -0,0 +1,23 @@
+{
+ "mcpServers": {
+ "context7": {
+ "command": "npx",
+ "args": [ "-y", "@upstash/context7-mcp" ]
+ },
+ "librovore": {
+ "command": "uvx",
+ "args": [ "librovore", "serve" ]
+ },
+ "pyright": {
+ "command": "mcp-language-server",
+ "args": [
+ "--lsp",
+ "pyright-langserver",
+ "--workspace",
+ ".",
+ "--",
+ "--stdio"
+ ]
+ }
+ }
+}
diff --git a/.auxiliary/configuration/hatch-constraints.pip b/.auxiliary/configuration/hatch-constraints.pip
new file mode 100644
index 0000000..c5dc974
--- /dev/null
+++ b/.auxiliary/configuration/hatch-constraints.pip
@@ -0,0 +1,2 @@
+# Pip constraints file for Hatch installation
+click<8.3.0 # https://github.com/pypa/hatch/issues/2050
diff --git a/.auxiliary/configuration/mcp-servers.json b/.auxiliary/configuration/mcp-servers.json
new file mode 100644
index 0000000..e3dc6c7
--- /dev/null
+++ b/.auxiliary/configuration/mcp-servers.json
@@ -0,0 +1,19 @@
+{
+ "mcpServers": {
+ "librovore": {
+ "command": "uvx",
+ "args": [ "librovore", "serve" ]
+ },
+ "pyright": {
+ "command": "mcp-language-server",
+ "args": [
+ "--lsp",
+ "pyright-langserver",
+ "--workspace",
+ ".",
+ "--",
+ "--stdio"
+ ]
+ }
+ }
+}
diff --git a/.auxiliary/configuration/opencode/agent/python-annotator.md b/.auxiliary/configuration/opencode/agent/python-annotator.md
new file mode 100644
index 0000000..ebfae59
--- /dev/null
+++ b/.auxiliary/configuration/opencode/agent/python-annotator.md
@@ -0,0 +1,220 @@
+---
+description: |
+ Use this agent when you need to address type checking issues from tools like Pyright, create type annotations
+ following project standards, generate type stubs for third-party packages, or analyze and resolve issues masked
+ by type: ignore comments or __.typx.cast calls.
+
+ Examples:
+
+
+ Context: User has written a new public function and needs proper type annotations according to project standards.
+ user: 'I just wrote this function but Pyright is complaining about missing type annotations: def process_data(data, configuration): return transformed_data'
+ assistant: 'Let me use the python-annotator agent to add proper type annotations following the project guidelines.'
+ The user needs type annotations added to their function following project standards, so use the python-annotator agent.
+
+
+
+ Context: User is getting Pyright errors about missing type stubs for a third-party library.
+ user: 'Pyright is showing errors because the requests library doesn't have type stubs available'
+ assistant: 'I'll use the python-annotator agent to create the missing type stubs for the requests library.'
+ Missing type stubs for third-party packages require the python-annotator agent's specialized workflow.
+
+
+
+ Context: User wants to clean up code that has type: ignore comments.
+ user: 'Can you help me resolve these # type: ignore comments in my code?'
+ assistant: 'Let me use the python-annotator agent to analyze and properly resolve those type checking suppressions.'
+ Analyzing and mitigating issues masked by type pragmas is a core function of the python-annotator agent.
+
+mode: subagent
+model: anthropic/claude-sonnet-4-20250514
+temperature: 0.0
+tools:
+ edit: true
+ bash: true
+permissions:
+ bash:
+ "hatch --env develop run *": allow
+ "git *": allow
+ "rg *": allow
+ "grep *": allow
+ "*": ask
+---
+
+You are an expert Python type annotation specialist focusing on static type analysis,
+type system design, and resolving type checker issues from tools like Pyright. You
+systematically analyze type checking problems and apply comprehensive solutions to
+ensure code adheres to strict typing standards.
+
+**IMPORTANT**: Only address Python type checking issues. If the request does not
+involve Python type annotations, type stubs, or type checker diagnostics, politely
+decline and explain your specialization.
+
+## Prerequisites
+
+- **Read project documentation guides FIRST**:
+ - @.auxiliary/instructions/practices.rst
+ - @.auxiliary/instructions/style.rst
+- Have read `opencode.md` for project-specific guidance
+
+## EXECUTION STRUCTURE
+
+**PHASE 1: COMPREHENSIVE TYPE ANALYSIS**
+Perform complete diagnostic analysis and generate detailed type checking report before making any changes.
+
+**PHASE 2: SYSTEMATIC RESOLUTION**
+Apply all identified type annotation fixes in systematic order, validating with type checkers after completion.
+
+## TYPE ANNOTATION STANDARDS
+
+### 1. Annotation Guidelines
+
+**Public Function Documentation:**
+- Use `__.typx.Annotated[ , __.ddoc.Doc( '''''' ) ]` pattern
+- Include `__.ddoc.Raises( )` annotations for documented exceptions
+- Follow narrative mood (third person) in documentation
+
+**Wide Parameters, Narrow Returns:**
+- Accept abstract base classes (`__.cabc.Sequence`, `__.cabc.Mapping`)
+- Return concrete immutable types (`tuple`, `frozenset`, `__.immut.Dictionary`)
+
+**Absential vs Optional:**
+- Prefer `__.Absential[ T ]` for optional parameters when `None` has semantic meaning
+- Use `__.typx.Optional[ T ]` only when `None` is a valid value distinct from absence
+
+**Type Alias Organization:**
+- Common aliases after imports, before private variables
+- Complex multi-line unions use `__.typx.Union[ ]`
+- Simple unions use `|` syntax
+
+### 2. Type Checker Issue Resolution
+
+**Root Cause Analysis:**
+1. Identify specific type checker errors and their locations
+2. Determine underlying cause (missing annotations, incorrect types, inheritance issues)
+3. Assess impact on runtime behavior and API contracts
+4. Plan minimal changes that resolve issues without breaking functionality
+
+**Resolution Priorities:**
+1. **Missing Annotations**: Add comprehensive type annotations following project patterns
+2. **Incorrect Types**: Replace overly broad or narrow types with appropriate abstractions
+3. **Generic Issues**: Properly parameterize generic types and resolve variance issues
+4. **Import Problems**: Fix circular imports and missing type-only imports
+
+### 3. Dependency Management and Type Stub Creation
+
+**Dependency Declaration Before Type Work**
+
+Avoid using `# type: ignore` to suppress errors about missing third-party dependencies.
+This anti-pattern masks improper project setup and should be resolved through proper dependency management.
+
+**Required Dependency Workflow:**
+1. **Verify Dependency Declaration**: Check `pyproject.toml` for the package
+2. **Update Project Dependencies**: Add missing packages to appropriate dependency groups
+3. **Update Import Module**: Add package to `sources//__/imports.py` if commonly used
+4. **Rebuild Environment**: Run `hatch env prune && hatch --env develop run python --version`
+5. **Then and Only Then**: Proceed with type stub creation or suppression analysis
+
+**Dependency Verification Commands:**
+```shell
+# Check if package is declared in pyproject.toml
+grep -n "somepackage" pyproject.toml
+
+# Verify package is installed in environment
+hatch --env develop run python -c "import somepackage; print( somepackage.__file__ )"
+
+# Check if type information is available
+hatch --env develop run pyright --verifytypes somepackage
+```
+
+**Type Stub Creation Workflow:**
+
+**Stub Generation Process (ONLY after dependency verification):**
+1. **Check Official Sources**: Verify typeshed, PyPI `types-*` packages, or library's own stubs
+2. **Generate Initial Stubs**:
+ ```shell
+ hatch --env develop run pyright --createstub somepackage
+ ```
+3. **Minimal Viable Stubs**: Focus only on APIs used in project, not comprehensive coverage
+4. **Structure Requirements**:
+ - Proper module hierarchy matching runtime structure
+ - Inheritance relationships preserved
+ - Generic type parameters correctly defined
+ - Public API surface accurately represented
+
+### 4. Type Suppression Resolution
+
+**Suppression Analysis Workflow:**
+
+**Phase 1 - Audit Existing Suppressions:**
+```shell
+# Find all suppressions in codebase
+rg --line-number "type:\s*ignore|__.typx\.cast" --type py
+```
+
+**Phase 2 - Categorize Suppressions:**
+1. **Dependency Issues**: Missing packages not declared in `pyproject.toml` - address first
+2. **Resolvable**: Missing stubs, incorrect annotations, fixable inheritance
+3. **Legitimate**: Truly dynamic behavior, complex generics, external constraints
+4. **Technical Debt**: Workarounds that should be refactored
+
+**Dependency Suppression Analysis:**
+For any suppression involving third-party imports:
+1. **Verify Declaration**: Check if package exists in `pyproject.toml`
+2. **If Missing**: Add to appropriate dependency group, update `__/imports.py` if needed
+3. **Rebuild Environment**: `hatch env prune` and reinstall
+4. **Re-evaluate**: Many suppressions resolve after proper dependency management
+
+**Phase 3 - Resolution Strategies:**
+- Every remaining suppression MUST have explanatory comment
+- Include ticket/issue reference for suppressions requiring upstream fixes
+- Set TODO items for suppressions that should be revisited
+
+### 5. Quality Assurance Workflow
+
+**Type Checking Validation:**
+```shell
+# Run comprehensive type checking
+hatch --env develop run pyright
+hatch --env develop run pyright --stats # Coverage statistics
+```
+
+**Consistency Verification:**
+- Public functions have `__.typx.Annotated` documentation
+- Parameter types follow wide/narrow principle
+- Return types are concrete and immutable where appropriate
+- Import organization follows project standards
+
+**Runtime Preservation:**
+- Verify no functional changes introduced
+- Test critical paths if available
+- Validate API contracts maintained
+
+## ANALYSIS REPORT FORMAT
+
+**PHASE 1 OUTPUT:**
+1. **Type Checking Summary**: Overall diagnostic assessment with file-by-file breakdown
+2. **Missing Annotations**: Functions, methods, and variables requiring type annotations
+3. **Type Errors**: Specific checker errors with root cause analysis
+4. **Stub Requirements**: Third-party packages needing type stubs
+5. **Suppression Audit**: Analysis of existing `type: ignore` and `__.typx.cast` usage
+6. **Resolution Plan**: Systematic order of fixes to be applied
+
+**PHASE 2 OUTPUT:**
+1. **Applied Annotations**: Summary of all type annotations added
+2. **Stub Generation**: Created stub files and their scope
+3. **Suppression Resolution**: Eliminated or refined type suppressions
+4. **Validation Results**: Type checker output before and after changes
+5. **Files Modified**: Complete list with brief description of changes
+
+## EXECUTION REQUIREMENTS
+
+- **Phase 0**: Verify all third-party dependencies are declared in `pyproject.toml` and available in environment
+- **Phase 1**: Complete analysis and report before any modifications
+- **Phase 2**: Apply fixes systematically, validate with `hatch --env develop run pyright`
+- **Dependency validation**: Do not proceed with type work until dependencies are properly declared
+- **Validation command**: Type checking must be clean before completion
+- **Focus on type safety**: Maintain exact functionality while improving type annotations
+- **Reference specific diagnostics**: Always include line numbers and error messages
+- **Document decisions**: Explain type choices and trade-offs made
+- **Dependency pattern detection**: Flag attempts to use `# type: ignore` for missing dependencies
diff --git a/.auxiliary/configuration/opencode/agent/python-conformer.md b/.auxiliary/configuration/opencode/agent/python-conformer.md
new file mode 100644
index 0000000..bcb9d6f
--- /dev/null
+++ b/.auxiliary/configuration/opencode/agent/python-conformer.md
@@ -0,0 +1,256 @@
+---
+description: |
+ Use this agent ONLY when changes include Python code (.py and .pyi files) and you need to review them for
+ compliance with project practices, style guidelines, and nomenclature standards, then systematically fix violations.
+ Do NOT use this agent for non-Python changes such as documentation, configuration files, or other file types.
+
+ Examples:
+
+
+ Context: The user has just written a new Python function and wants to ensure it follows project standards.
+ user: 'I just wrote this function for processing user data. Can you review it?'
+ assistant: 'I'll use the python-conformer agent to check your function against our project practices and style guidelines, then fix any violations.'
+ Since the user wants code reviewed for compliance, use the python-conformer agent to analyze the code against project standards.
+
+
+
+ Context: The user has completed a module refactor and wants to verify compliance before committing.
+ user: 'I've finished refactoring the authentication module. Please check if it meets our coding standards.'
+ assistant: 'Let me use the python-conformer agent to thoroughly review your refactored module for compliance with our practices guidelines.'
+ The user needs compliance verification for recently refactored code, so use the python-conformer agent.
+
+
+
+ Context: The user wants to review staged Python changes before committing.
+ user: 'I've modified several Python modules. Please review my staged changes for compliance before I commit.'
+ assistant: 'I'll use the python-conformer agent to review the Python changes in git diff --cached and ensure all Python code meets our project standards.'
+ Pre-commit review of staged Python changes is a perfect use case for the python-conformer agent.
+
+mode: subagent
+model: anthropic/claude-sonnet-4-20250514
+temperature: 0.1
+tools:
+ edit: true
+ bash: true
+permissions:
+ bash:
+ "hatch --env develop run *": allow
+ "git *": allow
+ "rg *": allow
+ "*": ask
+---
+
+You are an expert software engineer specializing in Python code quality assurance and
+compliance conformance. Your primary responsibility is to systematically review Python code
+against established project practices, style guidelines, and nomenclature
+standards, then apply comprehensive remediation to bring code into full compliance.
+
+**IMPORTANT**: Only review and modify Python (.py and .pyi) files. If the
+changes do not include Python code, politely decline and explain that you are
+specifically for Python code compliance review.
+
+## Prerequisites
+
+- **Read project documentation guides FIRST**:
+ - @.auxiliary/instructions/practices.rst
+ - @.auxiliary/instructions/style.rst
+ - @.auxiliary/instructions/nomenclature.rst
+- Have read `opencode.md` for project-specific guidance
+
+## EXECUTION STRUCTURE
+
+**PHASE 1: COMPREHENSIVE REVIEW**
+Perform complete analysis and generate detailed compliance report before making any changes.
+
+**PHASE 2: SYSTEMATIC REMEDIATION**
+Apply all identified fixes in systematic order, validating with linters after completion.
+
+## COMPLIANCE STANDARDS
+
+### Design Standards
+
+#### 1. Module Organization
+
+**Content Order:**
+1. Imports (following practices guide patterns)
+2. Common type aliases (`TypeAlias` declarations)
+3. Private variables/functions for defaults (grouped semantically)
+4. Public classes and functions (alphabetical)
+5. All other private functions (alphabetical)
+
+**Scope and Size:**
+- Maximum 600 lines
+- Action: Analyze oversized modules with separation of concerns in mind.
+Suggest splitting into focused modules with narrower responsibilities or
+functionality.
+
+#### 2. Imports
+
+- At the module level, other modules and their attributes MUST be imported as
+ private aliases, except in `__init__`, `__`, or specially-designated
+ re-export modules.
+- Within function bodies, other modules and their attributes MAY be imported as
+ public variables.
+- Subpackages SHOULD define a special `__` re-export module, which has `from
+ ..__ import *` plus any other imports which are common to the subpackage.
+- Common modules, such as `os` or `re`, SHOULD be imported as public within the
+ special package-wide `__.imports` re-export module rather than as private
+ aliases within an implementation module.
+- The `__all__` attribute SHOULD NOT be provided. This is unnecessary if the
+ module namespace only contains public classes and functions which are part of
+ its interface; this avoid additional interface maintenance.
+
+#### 3. Dependency Injection
+
+- Ask: is this function testable without monkeypatching?
+- Functions SHOULD provide injectable parameters with sensible defaults instead
+ of hard-coded dependencies within function implementation.
+
+#### 4. Robustness Principle (Postel's Law)
+"Be conservative in what you send; be liberal in what you accept."
+
+- Public functions SHOULD define wide, abstract argument types.
+- All functions SHOULD define narrow, concrete return types.
+- Private functions MAY define narrow, concrete argument types.
+
+#### 5. Immutability
+
+- Classes SHOULD inherit from immutable classes (`__.immut.Object`,
+ `__.immut.Protocol`, `__.immut.DataclassObject`, etc...).
+- Functions SHOULD return values of immutable types (`None`, `int`, `tuple`,
+ `frozenset`, `__.immut.Dictionary`, etc...) and not mutable types (`list`,
+ `dict`, `set`, etc...).
+
+#### 6. Proper Exception Management
+
+- One `try .. except` suite per statement which can raise exceptions. I.e.,
+ avoid covering multiple statements with a `try` block whenever possible.
+- Tryceratops complaints MUST NOT be suppressed with `noqa` pragmas.
+- Bare exceptions SHOULD NOT be raised.
+ - Exemption: `NotImplementedError` MAY be raised as a bare exception.
+ - Relevant exception classes SHOULD be used from the relevant `exceptions`
+ module within the package.
+ - New exception classes MAY be created as needed within the relevant
+ `exceptions` module; these MUST follow the nomenclature guide and be
+ inserted in correct alphabetical order.
+
+### Quality Assurance
+
+#### 1. Linter Suppressions
+
+- Linter suppressions MUST be reviewed critically.
+- Linter complaints SHOULD NOT be suppressed via `noqa` or `type` pragmas
+ without compelling justification.
+- Suppressions that mask design problems MUST be investigated and resolved
+ rather than ignored.
+
+**Acceptable Suppressions:**
+- `noqa: PLR0913` MAY be used for a CLI or service API with many parameters,
+ but data transfer objects SHOULD be considered in most other cases.
+- `noqa: S*` MAY be used for properly constrained and vetted subprocess
+ executions or Internet content retrievals.
+
+**Unacceptable Suppressions (require investigation):**
+- `type: ignore` MUST NOT be used, except in extremely rare circumstances. Such
+ suppressions usually indicate missing third-party dependencies or type stubs,
+ inappropriate type variables, or a bad inheritance pattern. For complex type
+ suppression investigation and dependency management, delegate to the
+ `python-annotator` agent.
+- `__.typx.cast` SHOULD NOT be used, except in extremely rare circumstances.
+ Such casts suppress normal type checking and usually the same problems as
+ `type: ignore`.
+- Most other `noqa` suppressions.
+
+### Style Standards
+
+#### 1. Spacing and Delimiters
+
+- Space padding MUST be present inside delimiters.
+ - Format: `( arg )`, `[ item ]`, `{ key: value }`
+ - Format: `( )`, `[ ]`, `{ }`, not `()`, `[]`, `{}`
+- Space padding MUST be present around keyword argument `=`.
+ - Format: `foo = 42`
+
+#### 2. Strings
+
+- Docstrings MUST use triple single quotes with narrative mood.
+ - Format: `''' Processes data... '''` not `"""Process data..."""`
+- F-strings and `.format` strings MUST be enclosed in double quotes.
+ - Format: `f"text {variable}"`, not `f'text {variable}'`
+ - Format: `"text {count}".format( count = len( items ) )`
+- F-strings and format strings MUST NOT embed function calls.
+- Exception messages and log messages SHOULD be enclosed in double quotes
+ rather than single quotes.
+- Plain data strings SHOULD be enclosed in single quotes, unless they contain
+ single quotes.
+
+#### 3. Vertical Compactness
+
+- Blank lines MUST NOT appear within function bodies.
+- Vertical compactness MUST be maintained within function implementations.
+- Single-line statements MAY follow certain block keywords on the same line
+ when appropriate.
+ - Format: `if condition: return value`
+ - Format: `elif condition: continue`
+ - Format: `else: statement`
+ - Format: `try: statement`
+
+#### 4. Multi-line Constructs
+
+- Function invocations, including class instantiations, SHOULD place the
+ closing `)` on the same line as the last argument to the function.
+- The last argument of an invocation MUST NOT be followed by a trailing comma.
+- Comprehensions and generator expressions SHOULD place the closing delimiter
+ on the same line as the last statement in the comprehension or generator
+ expression.
+- Parenthetical groupings SHOULD place the closing delimiter on the same line
+ as the last statement in the grouping.
+- All other multi-line constructs (functions signatures, annotations, lists,
+ dictionaries, etc...) MUST place the closing delimiter on a separate line
+ following the last item and MUST dedent the closing delimiter to match the
+ opening line indentation.
+- If a closing delimiter is not on the same line as the last item in a
+ multi-line construct, then the last item MUST be followed by a trailing
+ comma.
+
+#### 5. Nomenclature
+
+- Argument, attribute, and variable names SHOULD NOT be compound words,
+ separated by underscores, except in cases where this is necessary to
+ disambiguate.
+- Argument and variable names SHOULD NOT duplicate parts of the function name.
+- Attribute names SHOULD NOT duplicate parts of the class name.
+- Class names SHOULD adhere to the nomenclature guide.
+- Function names SHOULD adhere to the nomenclature guide.
+
+#### 6. Comments
+
+- Comments that describe obvious behavior SHOULD NOT be included.
+- TODO comments SHOULD be added for uncovered edge cases and future work.
+- Comments MUST add meaningful context, not restate what the code does.
+
+## REVIEW REPORT FORMAT
+
+**PHASE 1 OUTPUT:**
+1. **Compliance Summary**: Overall assessment with file-by-file breakdown
+2. **Standards Violations**: Categorized list with specific line references and explanations
+3. **Complexity Analysis**: Function and module size assessments
+4. **Remediation Plan**: Systematic order of fixes to be applied
+5. **Risk Assessment**: Any changes that require careful validation
+
+**PHASE 2 OUTPUT:**
+1. **Applied Fixes**: Summary of all changes made, categorized by standard
+2. **Validation Results**: Linter output before and after changes
+3. **Files Modified**: Complete list with brief description of changes
+4. **Manual Review Required**: Any issues requiring human judgment
+
+## EXECUTION REQUIREMENTS
+
+- **PHASE 1 REQUIRED**: Complete review and report before any remediation
+- **PHASE 2 REQUIRED**: Apply fixes systematically, validate with `hatch --env develop run linters`
+- **Validation command**: `hatch --env develop run linters` must produce clean output before completion
+- **Focus on compliance**: Maintain exact functionality while improving standards adherence
+- **Reference specific lines**: Always include line numbers and concrete examples
+- **Document reasoning**: Explain why each standard matters and how fixes align with project practices
+- **Agent delegation**: When type annotation issues exceed basic compliance scope, consider delegating to the `python-annotator` agent for comprehensive type work
+- **Guide access**: If any prerequisite guide cannot be accessed, stop and inform the user
diff --git a/.auxiliary/configuration/opencode/command/.gitignore b/.auxiliary/configuration/opencode/command/.gitignore
new file mode 100644
index 0000000..d6b7ef3
--- /dev/null
+++ b/.auxiliary/configuration/opencode/command/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/.auxiliary/configuration/pre-commit.yaml b/.auxiliary/configuration/pre-commit.yaml
index 383b03a..b25f5b9 100644
--- a/.auxiliary/configuration/pre-commit.yaml
+++ b/.auxiliary/configuration/pre-commit.yaml
@@ -40,7 +40,7 @@ repos:
name: 'Check: Debug Statements (Python)'
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.11.4
+ rev: v0.12.1
hooks:
- id: ruff
name: 'Lint: Ruff'
@@ -49,6 +49,15 @@ repos:
- repo: local
hooks:
+ - id: hatch-pytest
+ name: 'Test Code Units (Python)'
+ stages: [ 'pre-commit' ] # push is covered below
+ fail_fast: true
+ language: system
+ always_run: true
+ pass_filenames: false
+ entry: 'hatch --env develop run pytest'
+
- id: hatch-pyright
name: 'Lint: Pyright'
stages: [ 'pre-push' ]
@@ -83,4 +92,4 @@ repos:
language: system
always_run: true
pass_filenames: false
- entry: 'hatch --env develop run packagers'
+ entry: 'hatch build'
diff --git a/.auxiliary/configuration/vulturefood.py b/.auxiliary/configuration/vulturefood.py
new file mode 100644
index 0000000..292bb27
--- /dev/null
+++ b/.auxiliary/configuration/vulturefood.py
@@ -0,0 +1,23 @@
+ComparisonResult # unused variable
+NominativeArguments # unused variable
+PositionalArguments # unused variable
+package_name # unused variable
+
+# --- BEGIN: Injected by Copier ---
+# --- END: Injected by Copier ---
+
+class_mutables
+class_visibles
+instances_assigner_core
+instances_deleter_core
+instances_surveyor_core
+ObjectMutable
+DataclassObject
+DataclassObjectMutable
+ProtocolMutable
+DataclassProtocol
+DataclassProtocolMutable
+reclassify_modules
+__class__
+class_
+delattr0
diff --git a/.auxiliary/data/towncrier/+api-constants-and-utilities.enhance.rst b/.auxiliary/data/towncrier/+api-constants-and-utilities.enhance.rst
new file mode 100644
index 0000000..870b8f0
--- /dev/null
+++ b/.auxiliary/data/towncrier/+api-constants-and-utilities.enhance.rst
@@ -0,0 +1 @@
+Expose exception mutability constants and public identifier utility function for downstream reuse.
\ No newline at end of file
diff --git a/.auxiliary/data/towncrier/+cfc.enhance.rst b/.auxiliary/data/towncrier/+cfc.enhance.rst
deleted file mode 100644
index cd3b86f..0000000
--- a/.auxiliary/data/towncrier/+cfc.enhance.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Decorators for modifying class factory classes (metaclasses) so that they can
-handle inline application of decorators during production of classes. This
-includes logic for the case where a decorator replaces a class rather than
-modifies it.
diff --git a/.auxiliary/data/towncrier/+cpy.enhance.rst b/.auxiliary/data/towncrier/+cpy.enhance.rst
deleted file mode 100644
index 38e7076..0000000
--- a/.auxiliary/data/towncrier/+cpy.enhance.rst
+++ /dev/null
@@ -1 +0,0 @@
-Add support for CPython 3.10 to 3.13.
diff --git a/.auxiliary/data/towncrier/+mass-decoration.enhance.rst b/.auxiliary/data/towncrier/+mass-decoration.enhance.rst
deleted file mode 100644
index f0af1af..0000000
--- a/.auxiliary/data/towncrier/+mass-decoration.enhance.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-Class decorator which accepts a sequence of other class decorators to apply.
-Reduces height of decorator stacks and improves their reusability.
diff --git a/.auxiliary/data/towncrier/+pypy-additional-compatibility-fixes.repair.rst b/.auxiliary/data/towncrier/+pypy-additional-compatibility-fixes.repair.rst
new file mode 100644
index 0000000..833a809
--- /dev/null
+++ b/.auxiliary/data/towncrier/+pypy-additional-compatibility-fixes.repair.rst
@@ -0,0 +1 @@
+Fix additional PyPy compatibility issues with exception and ABC cache attribute mutations.
\ No newline at end of file
diff --git a/.auxiliary/data/towncrier/+pypy.enhance.rst b/.auxiliary/data/towncrier/+pypy.enhance.rst
deleted file mode 100644
index 1d50439..0000000
--- a/.auxiliary/data/towncrier/+pypy.enhance.rst
+++ /dev/null
@@ -1 +0,0 @@
-Add support for PyPy 3.10.
diff --git a/.auxiliary/data/towncrier/+std-classes.enhance.rst b/.auxiliary/data/towncrier/+std-classes.enhance.rst
deleted file mode 100644
index 607b361..0000000
--- a/.auxiliary/data/towncrier/+std-classes.enhance.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Base classes and class factory classes which provide standard behaviors
-(immutability of all attributes after initialization, concealment of all
-non-public attributes from ``dir``) by default. Can apply additional decorators
-and can tune for selective or total mutability or visibility. Enforce on class
-attributes and instance attributes.
diff --git a/.auxiliary/data/towncrier/+std-decorators.enhance.rst b/.auxiliary/data/towncrier/+std-decorators.enhance.rst
deleted file mode 100644
index ec492f6..0000000
--- a/.auxiliary/data/towncrier/+std-decorators.enhance.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Class and dataclass decorators which apply a set of standard behaviors to
-classes so that they produce instances which are immutable and which only
-reveal public attributes by default. Decorators are tunable via arguments to
-provide selective or total attributes mutability and visibility as desired.
diff --git a/.auxiliary/data/towncrier/+std-modules.enhance.rst b/.auxiliary/data/towncrier/+std-modules.enhance.rst
deleted file mode 100644
index 6cc9ead..0000000
--- a/.auxiliary/data/towncrier/+std-modules.enhance.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Module class which enforces immutability and visibility limitation on module
-attributes. (Visibility restriction is to reveal only public attributes to
-``dir``.) Also, conveience function which can reclassify a module or an entire
-package, recursively, to use this class.
diff --git a/.auxiliary/instructions/.gitignore b/.auxiliary/instructions/.gitignore
new file mode 100644
index 0000000..d6b7ef3
--- /dev/null
+++ b/.auxiliary/instructions/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/.auxiliary/notes/release-aar.rst b/.auxiliary/notes/release-aar.rst
new file mode 100644
index 0000000..3b1250f
--- /dev/null
+++ b/.auxiliary/notes/release-aar.rst
@@ -0,0 +1,219 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+
+*******************************************************************************
+LLM-Guided Release After Action Report (AAR)
+*******************************************************************************
+
+:Date: 2025-07-02
+:Release: v1.6.1 Postrelease Patch
+:LLM: Claude (Sonnet 4)
+:Duration: ~45 minutes total
+:Status: **COMPLETE SUCCESS** ✅
+
+Executive Summary
+===============================================================================
+
+Successfully executed the first fully LLM-guided postrelease patch using
+GitHub CLI monitoring and automation. The release process was completed
+autonomously with real-time workflow monitoring, resulting in a clean
+deployment of v1.6.1 with deprecation warning fixes.
+
+**Key Achievement**: Pioneered LLM-guided releases with GitHub Actions monitoring.
+
+Mission Objectives
+===============================================================================
+
+**Primary**: Apply deprecation warning fixes to v1.6 → v1.6.1 patch release
+**Secondary**: Validate LLM capability for release automation
+**Tertiary**: Document process for future LLM-guided releases
+
+✅ All objectives achieved
+
+Technical Changes Delivered
+===============================================================================
+
+1. **Deprecation Warning Elimination**
+ - Refactored ``finalize_module`` to use private ``_reclassify_module``
+ - Eliminated warnings from calling deprecated ``reclassify_modules``
+ - Added test warning suppression for clean output
+
+2. **Documentation Cleanup**
+ - Reduced Sphinx warnings from ~122 to 2
+ - Cleaned ``nitpick_ignore`` list (11 → 7 entries)
+ - Removed unused type annotation suppressions
+
+3. **Release Infrastructure**
+ - Added Towncrier news fragment
+ - Generated proper changelog
+ - Maintained clean git history
+
+Process Execution
+===============================================================================
+
+**Branch Strategy**
+- Cherry-picked fixes from ``master`` to ``release-1.6``
+- Applied: 3063e6c, 3e2e5a5, b72125d
+- Version bump: 1.6 → 1.6.1
+- Clean cherry-pick back to master
+
+**Workflow Monitoring**
+- Used ``gh run watch --interval 30`` for rate-limited monitoring
+- Two-phase validation: QA workflow → Release workflow
+- Real-time status updates every 30 seconds
+- Total CI/CD time: ~6 minutes
+
+**Release Pipeline Success**
+- ✅ All 14 test matrix jobs (3 platforms × 4+ Python versions)
+- ✅ Linting, documentation generation, packaging
+- ✅ PyPI publication with digital attestations
+- ✅ GitHub release creation
+- ✅ Documentation deployment
+
+Key Learnings & Best Practices
+===============================================================================
+
+**GitHub CLI Monitoring**
+- ``gh run watch --interval 30`` provides optimal rate limiting
+- **Future improvement**: Use ``--compact`` flag to reduce token usage
+- Timeout handling: Re-issue watch commands if shell times out
+- Status validation before proceeding to next phase
+
+**Release Process Improvements**
+- Push commits first, monitor QA, then tag (better than original docs)
+- Use ``git push --tags`` instead of specific tag push
+- Sign tags with ``-m `` for proper metadata
+- Separate QA validation from release deployment
+
+**LLM Automation Insights**
+- Real-time monitoring works excellently with 30s intervals
+- Error handling: Halt on any failures for human consultation
+- Status reporting: Provide clear progress updates
+- Context management: Track multiple workflow phases
+
+**Pre-commit Hook Integration**
+- Local validation before GitHub workflows
+- All checks passed: Ruff, Pyright, Coverage, Documentation
+- Reduced remote CI load
+
+Technical Metrics
+===============================================================================
+
+**Workflow Performance**
+- QA Workflow: ~5 minutes (16015567174)
+- Release Workflow: ~6 minutes (16015632051)
+- Documentation: 1m13s generation + 8s publish
+- PyPI Deployment: 16s + digital attestations
+- GitHub Release: 34s
+
+**Test Coverage**
+- 14 test matrix jobs: All passed
+- Platforms: Ubuntu, macOS, Windows
+- Python versions: 3.10, 3.11, 3.12, 3.13, PyPy 3.10
+- Coverage: 100% maintained
+
+**Git Operations**
+- 3 cherry-picks: Clean application
+- 5 commits total: Version, changelog, cleanup
+- 2 cherry-picks back to master
+- Clean history maintained
+
+Issues & Resolutions
+===============================================================================
+
+**None encountered** - Process executed flawlessly
+
+**Near-misses prevented**:
+- Used signed tags (``-m`` flag) as required
+- Applied ``--compact`` flag suggestion for future efficiency
+- Proper timeout handling for long-running workflows
+
+Future Recommendations
+===============================================================================
+
+**Process Refinements**
+1. **Always use** ``gh run watch --compact --interval 30`` for monitoring
+2. **Document**: Two-phase workflow (QA → Release) in release instructions
+3. **Automate**: Consider webhook-based notifications for very long workflows
+4. **Standardize**: This process for all future patch releases
+
+**LLM Automation Guidelines**
+1. **Halt immediately** on any unexpected errors or failures
+2. **Validate each phase** before proceeding to next step
+3. **Provide clear status** reporting throughout process
+4. **Maintain git hygiene** with proper commit messages and history
+
+**Tooling Improvements**
+1. Update release documentation to reflect improved process
+2. Consider GitHub CLI automation scripts for common operations
+3. Investigate webhook integrations for very long workflows
+
+**Training Data for Future LLMs**
+- This AAR serves as training data for future LLM-guided releases
+- Process is now validated and can be replicated
+- Monitoring techniques are proven effective
+
+Final Instructions Executed
+===============================================================================
+
+.. code-block:: bash
+
+ # 1. Checkout and Prepare Release Branch
+ git checkout release-1.6
+ git pull origin release-1.6
+
+ # 2. Cherry-pick Patch Commits from Master
+ git cherry-pick 3063e6c # Refactor finalize_module
+ git cherry-pick 3e2e5a5 # Clean up Sphinx nitpick_ignore
+ git cherry-pick b72125d # Add Towncrier entry
+
+ # 3. Bump to Patch Version
+ hatch version patch
+ git add . && git commit -m "Bump version to $(hatch version)."
+
+ # 4. Run Towncrier to Build Changelog
+ hatch --env develop run towncrier build --keep --version $(hatch version)
+ git add . && git commit -m "Update changelog for v$(hatch version) patch release."
+
+ # 5. Push Commits and Monitor QA
+ git push origin release-1.6
+ gh run list --workflow=qa --limit=1
+ gh run watch --interval 30 --compact
+
+ # 6. Tag the Patch Release (After QA Passes)
+ git tag -m "Release v$(hatch version) patch: Fix deprecation warnings from finalize_module." v$(hatch version)
+ git push --tags
+
+ # 7. Monitor Release Workflow
+ gh run list --workflow=release --limit=1
+ gh run watch --interval 30 --compact
+
+ # 8. Clean Up News Fragments (After Release Completes)
+ git rm .auxiliary/data/towncrier/*.rst
+ git commit -m "Clean up news fragments."
+ git push origin release-1.6
+
+ # 9. Cherry-pick Back to Master
+ git checkout master
+ git pull origin master
+ git cherry-pick
+ git cherry-pick
+ git push origin master
+
+Conclusion
+===============================================================================
+
+**This experiment was a resounding success.** LLM-guided releases are not only
+possible but highly effective when properly structured with:
+
+- Clear monitoring strategies using GitHub CLI
+- Proper error handling and halt conditions
+- Real-time status reporting
+- Validated process documentation
+
+The combination of LLM reasoning, GitHub CLI automation, and structured
+workflows creates a powerful foundation for autonomous release management.
+
+**The future of software releases is here.** 🚀
+
+*This AAR serves as the foundation for future LLM-guided release automation.*
\ No newline at end of file
diff --git a/.auxiliary/publications/website.tar.xz b/.auxiliary/publications/website.tar.xz
new file mode 100644
index 0000000..492bc9b
--- /dev/null
+++ b/.auxiliary/publications/website.tar.xz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a9a3d35dc33e60d578f92b221b7748acbee3246dfb718df2d5f18a3e2771687
+size 436164
diff --git a/.auxiliary/scribbles/.gitignore b/.auxiliary/scribbles/.gitignore
new file mode 100644
index 0000000..d6b7ef3
--- /dev/null
+++ b/.auxiliary/scribbles/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/.auxiliary/scripts/claude-ds b/.auxiliary/scripts/claude-ds
new file mode 100755
index 0000000..9161e8d
--- /dev/null
+++ b/.auxiliary/scripts/claude-ds
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# claude-ds: Rhymes with "Claudius", the bowtie-wearing tungsten cube vendor.
+
+eecho() {
+ echo "$@" >&2
+}
+
+ENV_FILE=".auxiliary/secrets/deepseek-api.env"
+
+if [[ ! -f "${ENV_FILE}" ]]; then
+ eecho "Error: Environment file not found at '${ENV_FILE}'."
+ eecho "Please create the file with your DeepSeek API key."
+ exit 1
+fi
+
+source "${ENV_FILE}"
+
+if [[ -z "${DEEPSEEK_API_KEY}" ]]; then
+ eecho "Error: DEEPSEEK_API_KEY not found in '${ENV_FILE}'."
+ eecho "Please set DEEPSEEK_API_KEY=your_api_key in the environment file."
+ exit 1
+fi
+
+export ANTHROPIC_BASE_URL="https://api.deepseek.com/anthropic"
+export ANTHROPIC_AUTH_TOKEN="${DEEPSEEK_API_KEY}"
+export ANTHROPIC_MODEL="deepseek-chat"
+export ANTHROPIC_SMALL_FAST_MODEL="deepseek-chat"
+
+eecho "Anthropic API URL: ${ANTHROPIC_BASE_URL}"
+eecho "Claude Model: ${ANTHROPIC_MODEL}"
+
+exec claude "$@"
diff --git a/.auxiliary/scripts/claude-xai b/.auxiliary/scripts/claude-xai
new file mode 100755
index 0000000..2b4a005
--- /dev/null
+++ b/.auxiliary/scripts/claude-xai
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# claude-xai: Rhymes with "Claude's eye", the Anthropic viewpoint.
+
+eecho() {
+ echo "$@" >&2
+}
+
+ENV_FILE=".auxiliary/secrets/xai-api.env"
+
+if [[ ! -f "${ENV_FILE}" ]]; then
+ eecho "Error: Environment file not found at '${ENV_FILE}'."
+ eecho "Please create the file with your X.ai (Grok) API key."
+ exit 1
+fi
+
+source "${ENV_FILE}"
+
+if [[ -z "${XAI_API_KEY}" ]]; then
+ eecho "Error: XAI_API_KEY not found in '${ENV_FILE}'."
+ eecho "Please set XAI_API_KEY=your_api_key in the environment file."
+ exit 1
+fi
+
+export ANTHROPIC_BASE_URL="https://api.x.ai"
+export ANTHROPIC_AUTH_TOKEN="${XAI_API_KEY}"
+export ANTHROPIC_MODEL="grok-code-fast-1"
+export ANTHROPIC_SMALL_FAST_MODEL="grok-code-fast-1"
+
+eecho "Anthropic API URL: ${ANTHROPIC_BASE_URL}"
+eecho "Claude Model: ${ANTHROPIC_MODEL}"
+
+exec claude "$@"
diff --git a/.auxiliary/scripts/claude/post-edit-linter b/.auxiliary/scripts/claude/post-edit-linter
new file mode 100755
index 0000000..2237628
--- /dev/null
+++ b/.auxiliary/scripts/claude/post-edit-linter
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+# vim: set filetype=python fileencoding=utf-8:
+# -*- coding: utf-8 -*-
+
+''' Claude Code hook to run linters after file updates. '''
+
+
+import json
+import subprocess
+import sys
+# import os
+# from datetime import datetime
+
+
+def main( ):
+ # event = _acquire_event_data( )
+ try:
+ result = subprocess.run(
+ [ 'hatch', '--env', 'develop', 'run', 'linters' ], # noqa: S607
+ capture_output = True, check = False, text = True, timeout = 60 )
+ except Exception as exc:
+ exc_class = type( exc )
+ _reactor_failure( f"{exc_class.__qualname__}: {exc}" )
+ if result.returncode != 0:
+ # Combine stdout and stderr since linting output may go to stdout.
+ result_text = f"{result.stdout}\n\n{result.stderr}".strip( )
+ print( _truncate_if_necessary( result_text ), file = sys.stderr )
+ raise SystemExit( 2 )
+ # Use JSON output for better integration with Claude Code
+ # _emit_decision_json( "block", f"{result.stdout}\n\n{result.stderr}" )
+ raise SystemExit( 0 )
+
+
+def _acquire_event_data( ):
+ try: return json.load( sys.stdin )
+ except json.JSONDecodeError:
+ _reactor_failure( "Invalid event data." )
+
+
+# def _debug_log( message ):
+# ''' Logs debug message to file in scribbles directory. '''
+# log_file = '.auxiliary/scribbles/post-edit-linter-debug.log'
+# os.makedirs( os.path.dirname( log_file ), exist_ok = True )
+# timestamp = datetime.now().isoformat()
+# with open( log_file, 'a' ) as f:
+# f.write( f"[{timestamp}] {message}\n" )
+
+
+def _emit_decision_json( decision, reason ):
+ ''' Output JSON decision for Claude Code hook system. '''
+ response = { "decision": decision, "reason": reason }
+ print( json.dumps( response ) )
+ raise SystemExit( 2 )
+
+
+def _error( message ):
+ print( message, file = sys.stderr )
+ raise SystemExit( 2 )
+
+
+def _reactor_failure( message ):
+ print( "Claude Code Hook Failure: {message}", file = sys.stderr )
+ raise SystemExit( 1 )
+
+
+def _truncate_if_necessary( output, lines_max = 50 ):
+ ''' Truncates output to maximum number of lines with truncation notice. '''
+ lines = output.split( '\n' )
+ if len( lines ) <= lines_max: return output
+ lines_to_display = lines[ : lines_max ]
+ truncations_count = len( lines ) - lines_max
+ lines_to_display.append(
+ f"\n[OUTPUT TRUNCATED: {truncations_count} additional lines omitted. "
+ f"Fix the issues above to see remaining diagnostics.]" )
+ return '\n'.join( lines_to_display )
+
+
+if __name__ == '__main__': main( )
diff --git a/.auxiliary/scripts/claude/pre-bash-git-commit-check b/.auxiliary/scripts/claude/pre-bash-git-commit-check
new file mode 100755
index 0000000..d6741d6
--- /dev/null
+++ b/.auxiliary/scripts/claude/pre-bash-git-commit-check
@@ -0,0 +1,111 @@
+#!/usr/bin/env python3
+# vim: set filetype=python fileencoding=utf-8:
+# -*- coding: utf-8 -*-
+
+''' Claude Code hook to prevent git commits when linters or tests fail. '''
+
+
+import json
+import shlex
+import subprocess
+import sys
+
+
+_GIT_COMMIT_MIN_TOKENS = 2
+
+
+def main( ):
+ event = _acquire_event_data( )
+ command_line = _extract_command( event )
+ commands = _partition_command_line( command_line )
+ for command in commands:
+ _check_git_commit_command( command )
+ raise SystemExit( 0 )
+
+
+def _acquire_event_data( ):
+ try: return json.load( sys.stdin )
+ except json.JSONDecodeError:
+ _reactor_failure( "Invalid event data." )
+
+
+def _check_git_commit_command( tokens ):
+ ''' Checks for git commit commands and validates linters/tests. '''
+ if not _is_git_commit_command( tokens ): return
+ try:
+ result = subprocess.run(
+ [ 'hatch', '--env', 'develop', 'run', 'linters' ], # noqa: S607
+ capture_output = True, text = True, timeout = 120, check = False )
+ except (
+ subprocess.TimeoutExpired,
+ subprocess.CalledProcessError,
+ FileNotFoundError
+ ): _error_with_divine_message( )
+ else:
+ if result.returncode != 0: _error_with_divine_message( )
+ try:
+ result = subprocess.run(
+ [ 'hatch', '--env', 'develop', 'run', 'testers' ], # noqa: S607
+ capture_output = True, text = True, timeout = 300, check = False )
+ except (
+ subprocess.TimeoutExpired,
+ subprocess.CalledProcessError,
+ FileNotFoundError
+ ): _error_with_divine_message( )
+ else:
+ if result.returncode != 0: _error_with_divine_message( )
+
+
+def _error_with_divine_message( ):
+ ''' Displays divine admonition and exits. '''
+ message = (
+ "The Large Language Divinity 🌩️🤖🌩️ in the Celestial Data Center hath "
+ "commanded that:\n"
+ "* Thy code shalt pass all lints before thy commit.\n"
+ " Run: hatch --env develop run linters\n"
+ "* Thy code shalt pass all tests before thy commit.\n"
+ " Run: hatch --env develop run testers\n\n"
+ "(If you are in the middle of a large refactor, consider commenting "
+ "out the tests and adding a reminder note in the .auxiliary/notes "
+ "directory.)"
+ )
+ print( message, file = sys.stderr )
+ raise SystemExit( 2 )
+
+
+def _extract_command( event_data ):
+ ''' Extracts command from event data, exit if not Bash tool. '''
+ tool_name = event_data.get( 'tool_name', '' )
+ if tool_name != 'Bash': raise SystemExit( 0 )
+ tool_input = event_data.get( 'tool_input', { } )
+ return tool_input.get( 'command', '' )
+
+
+def _is_git_commit_command( tokens ):
+ ''' Checks if tokens represent a git commit command. '''
+ if len( tokens ) < _GIT_COMMIT_MIN_TOKENS:
+ return False
+ return tokens[ 0 ] == 'git' and tokens[ 1 ] == 'commit'
+
+
+_splitters = frozenset( ( ';', '&', '|', '&&', '||' ) )
+def _partition_command_line( command_line ):
+ tokens = shlex.split( command_line )
+ commands = [ ]
+ command_tokens = [ ]
+ for token in tokens:
+ if token in _splitters:
+ commands.append( command_tokens )
+ command_tokens = [ ]
+ continue
+ command_tokens.append( token )
+ if command_tokens: commands.append( command_tokens )
+ return commands
+
+
+def _reactor_failure( message ):
+ print( f"Claude Code Hook Failure: {message}", file = sys.stderr )
+ raise SystemExit( 1 )
+
+
+if __name__ == '__main__': main()
diff --git a/.auxiliary/scripts/claude/pre-bash-python-check b/.auxiliary/scripts/claude/pre-bash-python-check
new file mode 100755
index 0000000..0ccf678
--- /dev/null
+++ b/.auxiliary/scripts/claude/pre-bash-python-check
@@ -0,0 +1,123 @@
+#!/usr/bin/env python3
+# vim: set filetype=python fileencoding=utf-8:
+# -*- coding: utf-8 -*-
+
+''' Claude Code hook to detect improper Python usage in Bash commands. '''
+
+
+import json
+import shlex
+import sys
+
+
+def main( ):
+ event = _acquire_event_data( )
+ command_line = _extract_command( event )
+ commands = _partition_command_line( command_line )
+ for command in commands:
+ _check_direct_python_usage( command )
+ _check_multiline_python_c( command )
+ _check_direct_tool_usage( command )
+ raise SystemExit( 0 )
+
+
+def _acquire_event_data( ):
+ try: return json.load( sys.stdin )
+ except json.JSONDecodeError:
+ _reactor_failure( "Invalid event data." )
+
+
+def _check_direct_python_usage( tokens ):
+ ''' Checks for direct python usage patterns. '''
+ emessage = (
+ "Warning: Direct Python usage detected in command.\n"
+ "Consider using 'hatch run python' or "
+ "'hatch --env develop run python' to ensure dependencies "
+ "are available." )
+ for token in tokens:
+ if token == 'hatch': return # noqa: S105
+ if _is_python_command( token ): _error( emessage )
+
+
+def _check_multiline_python_c( tokens ):
+ ''' Checks for multi-line python -c scripts using shlex parsing. '''
+ emessage = (
+ "Warning: Multi-line Python script detected in command.\n"
+ "Consider writing the script to a file "
+ "in the '.auxiliary/scribbles' directory "
+ "instead of using 'python -c' with multi-line code." )
+ for i, token in enumerate( tokens ):
+ if ( _is_python_command( token )
+ and _check_python_c_argument( tokens, i )
+ ): _error( emessage )
+
+
+def _check_direct_tool_usage( tokens ):
+ ''' Checks for direct usage of Python tools outside Hatch environment. '''
+ emessage = (
+ "Warning: Direct Python tool usage detected in command.\n"
+ "Use 'hatch --env develop run {tool}' instead to ensure "
+ "proper environment and configuration." )
+ for token in tokens:
+ if token == 'hatch': return # noqa: S105
+ if _is_python_tool( token ):
+ _error( emessage.format( tool = token ) )
+
+
+def _check_python_c_argument( tokens, python_index ):
+ ''' Checks if Python -c argument contains multiline code. '''
+ for j in range( python_index + 1, len( tokens ) ):
+ if tokens[ j ] == '-c' and j + 1 < len( tokens ):
+ c_argument = tokens[ j + 1 ]
+ return '\n' in c_argument
+ if not tokens[ j ].startswith( '-' ):
+ # Non-option argument, stop looking for -c
+ break
+ return False
+
+
+def _error( message: str ):
+ print( message, file = sys.stderr )
+ raise SystemExit( 2 )
+
+
+def _extract_command( event_data ):
+ ''' Extracts command from event data, exit if not Bash tool. '''
+ tool_name = event_data.get( 'tool_name', '' )
+ if tool_name != 'Bash': raise SystemExit( 0 )
+ tool_input = event_data.get( 'tool_input', { } )
+ return tool_input.get( 'command', '' )
+
+
+def _is_python_command( token ):
+ ''' Checks if token is a Python command. '''
+ return (
+ token in ( 'python', 'python3' ) or token.startswith( 'python3.' ) )
+
+
+def _is_python_tool( token ):
+ ''' Checks if token is a Python development tool. '''
+ return token in ( 'coverage', 'pyright', 'pytest', 'ruff' )
+
+
+_splitters = frozenset( ( ';', '&', '|', '&&', '||' ) )
+def _partition_command_line( command_line ):
+ tokens = shlex.split( command_line )
+ commands = [ ]
+ command_tokens = [ ]
+ for token in tokens:
+ if token in _splitters:
+ commands.append( command_tokens )
+ command_tokens = [ ]
+ continue
+ command_tokens.append( token )
+ if command_tokens: commands.append( command_tokens )
+ return commands
+
+
+def _reactor_failure( message ):
+ print( "Claude Code Hook Failure: {message}", file = sys.stderr )
+ raise SystemExit( 1 )
+
+
+if __name__ == '__main__': main()
diff --git a/.auxiliary/scripts/prepare-agents b/.auxiliary/scripts/prepare-agents
new file mode 100755
index 0000000..532384d
--- /dev/null
+++ b/.auxiliary/scripts/prepare-agents
@@ -0,0 +1,170 @@
+#!/usr/bin/env bash
+
+set -eu -o pipefail
+
+eecho() {
+ echo "$@" >&2
+}
+
+if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then
+ eecho "Error: This script should not be sourced. Please run it directly."
+ return 1 2>/dev/null || exit 1
+fi
+
+if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
+ eecho "Error: Current directory is not in a Git repository"
+ exit 1
+fi
+
+repo_root="$(git rev-parse --show-toplevel)"
+if [[ -z "$repo_root" ]]; then
+ eecho "Error: Could not determine Git repository root"
+ exit 1
+fi
+
+create_symlink_if_needed() {
+ local target_path="$1"
+ local link_path="$2"
+
+ if [[ ! -e "$repo_root/$target_path" ]]; then
+ eecho "Warning: Target $repo_root/$target_path does not exist"
+ return
+ fi
+
+ if [[ -L "$repo_root/$link_path" ]]; then
+ local current_target="$(readlink "$repo_root/$link_path")"
+ if [[ "$current_target" = "$target_path" ]]; then
+ return
+ else
+ echo "Updating symlink $link_path: $current_target → $target_path"
+ rm "$repo_root/$link_path"
+ fi
+ elif [[ -e "$repo_root/$link_path" ]]; then
+ eecho "Warning: File or directory already exists at $repo_root/$link_path"
+ return
+ fi
+
+ ln -s "$target_path" "$link_path"
+}
+
+create_symlinks() {
+ trap 'popd >/dev/null 2>&1 || true' ERR EXIT
+
+ pushd "$repo_root" >/dev/null
+
+ create_symlink_if_needed ".auxiliary/configuration/conventions.md" "CLAUDE.md"
+ create_symlink_if_needed ".auxiliary/configuration/conventions.md" "AGENTS.md"
+ create_symlink_if_needed ".auxiliary/configuration/mcp-servers.json" ".mcp.json"
+ create_symlink_if_needed ".auxiliary/configuration/claude" ".claude"
+ create_symlink_if_needed ".auxiliary/configuration/gemini" ".gemini"
+ create_symlink_if_needed ".auxiliary/configuration/opencode" ".opencode"
+ create_symlink_if_needed ".auxiliary/configuration/opencode.jsonc" "opencode.jsonc"
+
+ popd >/dev/null
+
+ trap - ERR EXIT
+}
+
+download_instructions() {
+ local instructions_dir="$repo_root/.auxiliary/instructions"
+ local base_url="https://raw.githubusercontent.com/emcd/python-project-common/refs/tags/docs-1/documentation/common"
+ local files=(
+ "architecture.rst"
+ "nomenclature.rst"
+ "nomenclature-germanic.rst"
+ "nomenclature-latin.rst"
+ "practices.rst"
+ "practices-python.rst"
+ "practices-rust.rst"
+ "practices-toml.rst"
+ "requirements.rst"
+ "style.rst"
+ "tests.rst"
+ )
+
+ mkdir -p "$instructions_dir"
+
+ echo "Downloading project documentation guides to .auxiliary/instructions/"
+
+ local success_count=0
+ for file in "${files[@]}"; do
+ local url="$base_url/$file"
+ local output_path="$instructions_dir/$file"
+
+ if curl --fail --silent --location "$url" | tail -n +20 > "$output_path"; then
+ if [[ -s "$output_path" ]]; then
+ echo " ✓ Downloaded $file ($(wc -c < "$output_path") bytes, boilerplate stripped)"
+ success_count=$((success_count + 1))
+ else
+ eecho " ✗ Downloaded $file but file is empty after processing"
+ rm -f "$output_path"
+ fi
+ else
+ eecho " ✗ Failed to download $file"
+ fi
+ done
+
+ if [[ $success_count -eq ${#files[@]} ]]; then
+ echo "Successfully downloaded all ${#files[@]} documentation guides"
+ else
+ eecho "Warning: Only downloaded $success_count of ${#files[@]} documentation guides"
+ fi
+}
+
+echo "Creating symlinks for LLM instruction files..."
+create_symlinks
+echo "Symlinks created successfully"
+
+download_instructions
+
+copy_claude_commands() {
+ local claude_commands_dir="$repo_root/.auxiliary/configuration/claude/commands"
+ local opencode_commands_dir="$repo_root/.auxiliary/configuration/opencode/command"
+
+ if [[ ! -d "$claude_commands_dir" ]]; then
+ eecho "Warning: Claude commands directory not found at $claude_commands_dir"
+ return
+ fi
+
+ mkdir -p "$opencode_commands_dir"
+
+ echo "Copying Claude commands to Opencode format..."
+
+ local copied_count=0
+ for claude_command in "$claude_commands_dir"/*.md; do
+ if [[ ! -f "$claude_command" ]]; then
+ continue
+ fi
+
+ local command_name="$(basename "$claude_command")"
+ local opencode_command="$opencode_commands_dir/$command_name"
+
+ if cat "$claude_command" | grep -vE '^(argument-hints|allowed-tools):.*$' > "$opencode_command"; then
+ echo " ✓ Copied $command_name"
+ copied_count=$((copied_count + 1))
+ else
+ eecho " ✗ Failed to copy $command_name"
+ rm -f "$opencode_command"
+ fi
+ done
+
+ if [[ $copied_count -gt 0 ]]; then
+ echo "Successfully copied $copied_count command(s) to Opencode format"
+ else
+ echo "No commands were copied"
+ fi
+}
+
+copy_claude_commands
+
+echo ""
+echo "Agent environment preparation complete!"
+echo "- Created symlinks:"
+echo " - CLAUDE.md and AGENTS.md → .auxiliary/configuration/conventions.md"
+echo " - .mcp.json → .auxiliary/configuration/mcp-servers.json"
+echo " - .claude → .auxiliary/configuration/claude"
+echo " - .gemini → .auxiliary/configuration/gemini"
+echo " - .opencode → .auxiliary/configuration/opencode"
+echo " - opencode.jsonc → .auxiliary/configuration/opencode.jsonc"
+echo "- Downloaded project documentation guides"
+echo "- Copied Claude commands to Opencode format (filtered for compatibility)"
\ No newline at end of file
diff --git a/.auxiliary/secrets/.gitignore b/.auxiliary/secrets/.gitignore
new file mode 100644
index 0000000..d6b7ef3
--- /dev/null
+++ b/.auxiliary/secrets/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/.github/workflows/claude.yaml b/.github/workflows/claude.yaml
new file mode 100644
index 0000000..dadcf5f
--- /dev/null
+++ b/.github/workflows/claude.yaml
@@ -0,0 +1,45 @@
+name: claude
+
+on:
+ issue_comment:
+ types: [created]
+ pull_request_review_comment:
+ types: [created]
+ issues:
+ types: [opened, assigned]
+ pull_request_review:
+ types: [submitted]
+ workflow_dispatch:
+ inputs:
+ prompt:
+ description: 'Message to send to Claude'
+ required: true
+ default: 'Please validate your MCP server setup and available subagents.'
+ type: string
+
+jobs:
+
+ initialize:
+ uses: ./.github/workflows/core--initializer.yaml
+
+ claude:
+ if: |
+ (github.event_name == 'issue_comment' && contains(github.event.comment.body, '/claude')) ||
+ (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '/claude')) ||
+ (github.event_name == 'pull_request_review' && contains(github.event.review.body, '/claude')) ||
+ (github.event_name == 'issues' && (contains(github.event.issue.body, '/claude') || contains(github.event.issue.title, '/claude'))) ||
+ (github.event_name == 'workflow_dispatch')
+ needs: [initialize]
+ permissions:
+ contents: read
+ id-token: write
+ issues: read
+ pull-requests: read
+ uses: emcd/python-project-common/.github/workflows/xrepo--claude.yaml@gha-1
+ with:
+ allowed-tools: 'Bash(git add:*),Bash(git branch:*),Bash(git checkout:*),Bash(git diff:*),Bash(git log:*),Bash(git ls-files:*),Bash(git remote:*),Bash(git reset:*),Bash(git rev-parse:*),Bash(git rm:*),Bash(git status),Bash(hatch:*),Bash(pip:*),Bash(python:*),Edit,Write,mcp__context7__resolve-library-id,mcp__context7__get-library-docs,mcp__librovore__query_inventory,mcp__librovore__query_content'
+ python-version: '${{ fromJSON(needs.initialize.outputs.python-versions)[0] }}'
+ timeout-minutes: 20
+ prompt: '${{ inputs.prompt }}'
+ secrets:
+ anthropic-api-key: '${{ secrets.ANTHROPIC_API_KEY }}'
diff --git a/.github/workflows/releaser.yaml b/.github/workflows/releaser.yaml
index f079377..b9dbbef 100644
--- a/.github/workflows/releaser.yaml
+++ b/.github/workflows/releaser.yaml
@@ -24,7 +24,7 @@ jobs:
test:
needs: [initialize]
- uses: emcd/python-project-common/.github/workflows/xrepo--tester.yaml@v1.16
+ uses: emcd/python-project-common/.github/workflows/xrepo--tester.yaml@gha-1
with:
matrix-exclusions: '${{ needs.initialize.outputs.matrix-exclusions }}'
platforms: '${{ needs.initialize.outputs.platforms }}'
@@ -33,7 +33,7 @@ jobs:
report:
needs: [initialize, test]
- uses: emcd/python-project-common/.github/workflows/xrepo--reporter.yaml@v1.16
+ uses: emcd/python-project-common/.github/workflows/xrepo--reporter.yaml@gha-1
with:
python-version: '${{ fromJSON(needs.initialize.outputs.python-versions)[0] }}'
@@ -43,14 +43,14 @@ jobs:
contents: write
id-token: write
pages: write
- uses: emcd/python-project-common/.github/workflows/xrepo--documenter.yaml@v1.16
+ uses: emcd/python-project-common/.github/workflows/xrepo--documenter.yaml@gha-1
with:
include-reports: true
python-version: '${{ fromJSON(needs.initialize.outputs.python-versions)[0] }}'
package:
needs: [initialize, docsgen]
- uses: emcd/python-project-common/.github/workflows/xrepo--packager.yaml@v1.16
+ uses: emcd/python-project-common/.github/workflows/xrepo--packager.yaml@gha-1
with:
artifacts-path: '.auxiliary/artifacts/hatch-build' # TODO: Use environment.
python-version: '${{ fromJSON(needs.initialize.outputs.python-versions)[0] }}'
@@ -81,7 +81,6 @@ jobs:
skip-existing: ${{ inputs.which-pypi == 'testpypi' }}
publish-github:
- if: ${{ startsWith(github.ref, 'refs/tags/') }}
needs:
- initialize
- package
@@ -90,12 +89,13 @@ jobs:
# --- END: Injected by Copier ---
runs-on: ubuntu-latest
permissions:
+ attestations: write
contents: write
id-token: write
steps:
- name: Prepare Python
- uses: emcd/python-project-common/.github/actions/python-hatch@v1.16
+ uses: emcd/python-project-common/.github/actions/python-hatch@gha-1
with:
python-version: ${{ fromJSON(needs.initialize.outputs.python-versions)[0] }}
@@ -111,10 +111,10 @@ jobs:
cd ${{ env.DISTRIBUTIONS_PATH }}
sha256sum classcore-* >SHA256SUMS.txt
- - name: Sign Distributions
- uses: sigstore/gh-action-sigstore-python@v3.0.0
+ - name: Attest Build Provenance
+ uses: actions/attest-build-provenance@v2
with:
- inputs: >-
+ subject-path: |
${{ env.DISTRIBUTIONS_PATH }}/SHA256SUMS.txt
${{ env.DISTRIBUTIONS_PATH }}/classcore-*
@@ -127,6 +127,7 @@ jobs:
cp .auxiliary/artifacts/tc-release-notes.rst .auxiliary/artifacts/release-notes.rst
- name: Create Release
+ if: ${{ startsWith(github.ref, 'refs/tags/') }}
env:
GITHUB_TOKEN: ${{ github.token }}
run: |
@@ -135,6 +136,7 @@ jobs:
--notes-file .auxiliary/artifacts/release-notes.rst
- name: Publish Artifacts
+ if: ${{ startsWith(github.ref, 'refs/tags/') }}
env:
GITHUB_TOKEN: ${{ github.token }}
run: |
diff --git a/.github/workflows/tester.yaml b/.github/workflows/tester.yaml
index a341490..2de0d5c 100644
--- a/.github/workflows/tester.yaml
+++ b/.github/workflows/tester.yaml
@@ -14,7 +14,7 @@ jobs:
test:
needs: [initialize]
- uses: emcd/python-project-common/.github/workflows/xrepo--tester.yaml@v1.16
+ uses: emcd/python-project-common/.github/workflows/xrepo--tester.yaml@gha-1
with:
matrix-exclusions: '${{ needs.initialize.outputs.matrix-exclusions }}'
platforms: '${{ needs.initialize.outputs.platforms }}'
@@ -23,6 +23,6 @@ jobs:
report:
needs: [initialize, test]
- uses: emcd/python-project-common/.github/workflows/xrepo--reporter.yaml@v1.16
+ uses: emcd/python-project-common/.github/workflows/xrepo--reporter.yaml@gha-1
with:
python-version: '${{ fromJSON(needs.initialize.outputs.python-versions)[0] }}'
diff --git a/.gitignore b/.gitignore
index 8ae7cd9..5d97b3d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,15 @@
.env
+.claude
+.gemini
+.mcp.json
+.opencode
.*.swp
+AGENTS.md
+CLAUDE.md
+CONVENTIONS.md
+GEMINI.md
+opencode.jsonc
__pycache__/
bugs/
+build/
+dist/
diff --git a/README.rst b/README.rst
index b80fb0e..b36a90d 100644
--- a/README.rst
+++ b/README.rst
@@ -81,14 +81,17 @@ modules.
Installation 📦
===============================================================================
-Via `uv `_ ``pip``
+Method: Install Python Package
+-------------------------------------------------------------------------------
+
+Install via `uv `_ ``pip``
command:
::
uv pip install classcore
-Or, via ``pip``:
+Or, install via ``pip``:
::
@@ -109,9 +112,9 @@ Note on Immutability 📢
Examples 💡
===============================================================================
-.. Please see the `examples directory
-.. `_ for
-.. greater detail.
+Please see the `examples directory
+`_
+for greater detail.
Standard Behaviors 🔒
-------------------------------------------------------------------------------
@@ -188,7 +191,24 @@ Use Cases 🎯
* 📦 **Frameworks**: Frameworks with controlled extension and modification.
-`More Flair `_
+Contribution 🤝
+===============================================================================
+
+Contribution to this project is welcome! However, it must follow the `code of
+conduct
+`_
+for the project.
+
+Please file bug reports and feature requests in the `issue tracker
+`_ or submit `pull
+requests `_ to
+improve the source code or documentation.
+
+For development guidance and standards, please see the `development guide
+`_.
+
+
+Additional Indicia
===============================================================================
.. image:: https://img.shields.io/github/last-commit/emcd/python-classcore
@@ -228,27 +248,24 @@ Other Projects by This Author 🌟
===============================================================================
-* `python-absence `_
- - PyPI: `absence `_
+* `python-absence `_ (`absence `_ on PyPI)
+
+ 🕳️ A Python library package which provides a **sentinel for absent values** - a falsey, immutable singleton that represents the absence of a value in contexts where ``None`` or ``False`` may be valid values.
+* `python-accretive `_ (`accretive `_ on PyPI)
- 🕳️ A Python library package which provides a **sentinel for absent values** - a falsey, immutable singleton that represents the absence of a value in contexts where ``None`` or ``False`` may be valid values.
-* `python-accretive `_
- - PyPI: `accretive `_
+ 🌌 A Python library package which provides **accretive data structures** - collections which can grow but never shrink.
+* `python-dynadoc `_ (`dynadoc `_ on PyPI)
- 🌌 A Python library package which provides **accretive data structures** - collections which can grow but never shrink.
-* `python-falsifier `_
- - PyPI: `falsifier `_
+ 📝 A Python library package which bridges the gap between **rich annotations** and **automatic documentation generation** with configurable renderers and support for reusable fragments.
+* `python-falsifier `_ (`falsifier `_ on PyPI)
- 🎭 A very simple Python library package which provides a **base class for falsey objects** - objects that evaluate to ``False`` in boolean contexts.
-* `python-frigid `_
- - PyPI: `frigid `_
+ 🎭 A very simple Python library package which provides a **base class for falsey objects** - objects that evaluate to ``False`` in boolean contexts.
+* `python-frigid `_ (`frigid `_ on PyPI)
- 🔒 A Python library package which provides **immutable data structures** - collections which cannot be modified after creation.
-* `python-icecream-truck `_
- - PyPI: `icecream-truck `_
+ 🔒 A Python library package which provides **immutable data structures** - collections which cannot be modified after creation.
+* `python-icecream-truck `_ (`icecream-truck `_ on PyPI)
- 🍦 **Flavorful Debugging** - A Python library which enhances the powerful and well-known ``icecream`` package with flavored traces, configuration hierarchies, customized outputs, ready-made recipes, and more.
-* `python-mimeogram `_
- - PyPI: `mimeogram `_
+ 🍦 **Flavorful Debugging** - A Python library which enhances the powerful and well-known ``icecream`` package with flavored traces, configuration hierarchies, customized outputs, ready-made recipes, and more.
+* `python-mimeogram `_ (`mimeogram `_ on PyPI)
- 📨 A command-line tool for **exchanging collections of files with Large Language Models** - bundle multiple files into a single clipboard-ready document while preserving directory structure and metadata... good for code reviews, project sharing, and LLM interactions.
+ 📨 A command-line tool for **exchanging collections of files with Large Language Models** - bundle multiple files into a single clipboard-ready document while preserving directory structure and metadata... good for code reviews, project sharing, and LLM interactions.
diff --git a/documentation/api.rst b/documentation/api.rst
index ef541d3..5f90f5f 100644
--- a/documentation/api.rst
+++ b/documentation/api.rst
@@ -1,5 +1,5 @@
-.. vim: set fileencoding=utf-8:
-.. -*- coding: utf-8 -*-
+.. vim: set filetype=rst fileencoding=utf-8:
+.. -*- mode: rst ; coding: utf-8 -*-
.. +--------------------------------------------------------------------------+
| |
| Licensed under the Apache License, Version 2.0 (the "License"); |
@@ -16,7 +16,8 @@
| |
+--------------------------------------------------------------------------+
-:tocdepth: 4
+
+:tocdepth: 3
*******************************************************************************
@@ -27,38 +28,13 @@ API
Package ``classcore``
===============================================================================
-Foundational class factories and decorators.
-
-Provides ability to create class decorators and metaclasses with customization
-hooks. The metaclasses can apply class decorators inline during the class
-construction and initialization process, properly handling cases where
-decorators replace classes (e.g., ``dataclasses.dataclass( slots = True )``).
-They also backport the repair mechanism from newer versions of CPython to
-ensure that the class closure cells are rectified on replaced classes, so that
-zero-argument ``super`` calls function correctly in them.
-
-The ``classcore.standard`` subpackage is an example of the decorators and
-customization hooks being used to provide a set of practical classes and class
-decorators. Furthermore, the exception classes in the
-:py:mod:`classcore.exceptions` module inherit from one of the standard classes,
-making both the exception classes, themselves, and their instances immutable
-and concealing their non-public attributes to reduce API noise. I.e., this
-package "eats its own dog food" and provides practical examples in so doing.
-
-This package is not as magical as it might seem. It does **not** rely on any
-``exec`` or ``eval`` calls and it does **not** do anything with ``ctypes`` or
-similar surgical instruments. It relies completely on the documented Python
-data model and the machinery that it provides. While it is true that
-metaclasses can be tricky, this package is developed with a deep,
-highly-evolved understanding of them. We seek simplicity over cleverness and
-maintain robust tests across multiple Python implementations and versions. The
-package is also very clean in terms of static type checking (via Pyright).
+.. automodule:: classcore
Module ``classcore.decorators``
-------------------------------------------------------------------------------
-.. automodule:: classcore.factories
+.. automodule:: classcore.decorators
Module ``classcore.factories``
@@ -88,28 +64,7 @@ Module ``classcore.utilities``
Subpackage ``classcore.standard``
===============================================================================
-Decorators and class factories which imbue concealment and immutability.
-
-Concealment restricts the visibility of attributes on classes and their
-instances. By default, only public attributes (ones which do not start with
-``_``) are revealed for :py:func:`dir` calls. This behavior can be overriden by
-supplying visibility verifiers as a decorator factory argument or metaclass
-argument. These can be a sequence of attribute names, regular expression
-:py:class:`re.Pattern` objects which match attribute names, or predicate
-functions which match attribute names. Or, total visibility (per the Python
-default) can be achieved by supplying ``visibles = '*'`` instead of a sequence
-of verifiers.
-
-Immutability prevents assignment (including reassignment) or deletion of
-attrubtes on classes and their instances after they have been completely
-initialized. In addition to any standard Python class, this can be applied to
-dataclasses, allowing them to use ``__post_init__`` to set attributes, which
-``dataclasses.dataclass( frozen = True )`` prevents. The immutability behavior
-can be overridden by supplying mutability verifiers as a decorator factory
-argument or metaclass argument. These behave similarly to the visibility
-verifiers described above.
-
-Hooks to modify the concealment and immutability behaviors are also available.
+.. automodule:: classcore.standard
Module ``classcore.standard.classes``
diff --git a/documentation/architecture/decisions/index.rst b/documentation/architecture/decisions/index.rst
new file mode 100644
index 0000000..a84ce73
--- /dev/null
+++ b/documentation/architecture/decisions/index.rst
@@ -0,0 +1,31 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+.. +--------------------------------------------------------------------------+
+ | |
+ | Licensed under the Apache License, Version 2.0 (the "License"); |
+ | you may not use this file except in compliance with the License. |
+ | You may obtain a copy of the License at |
+ | |
+ | http://www.apache.org/licenses/LICENSE-2.0 |
+ | |
+ | Unless required by applicable law or agreed to in writing, software |
+ | distributed under the License is distributed on an "AS IS" BASIS, |
+ | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
+ | See the License for the specific language governing permissions and |
+ | limitations under the License. |
+ | |
+ +--------------------------------------------------------------------------+
+
+
+*******************************************************************************
+Architectural Decision Records
+*******************************************************************************
+
+.. toctree::
+ :maxdepth: 2
+
+
+.. todo:: Add architectural decision records to toctree.
+
+For ADR format and guidance, see the `architecture documentation guide
+`_.
\ No newline at end of file
diff --git a/documentation/architecture/designs/index.rst b/documentation/architecture/designs/index.rst
new file mode 100644
index 0000000..ebd094d
--- /dev/null
+++ b/documentation/architecture/designs/index.rst
@@ -0,0 +1,28 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+.. +--------------------------------------------------------------------------+
+ | |
+ | Licensed under the Apache License, Version 2.0 (the "License"); |
+ | you may not use this file except in compliance with the License. |
+ | You may obtain a copy of the License at |
+ | |
+ | http://www.apache.org/licenses/LICENSE-2.0 |
+ | |
+ | Unless required by applicable law or agreed to in writing, software |
+ | distributed under the License is distributed on an "AS IS" BASIS, |
+ | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
+ | See the License for the specific language governing permissions and |
+ | limitations under the License. |
+ | |
+ +--------------------------------------------------------------------------+
+
+
+*******************************************************************************
+Designs
+*******************************************************************************
+
+.. toctree::
+ :maxdepth: 2
+
+
+.. todo:: Add design documents to toctree.
diff --git a/documentation/architecture/filesystem.rst b/documentation/architecture/filesystem.rst
new file mode 100644
index 0000000..d3ca1b9
--- /dev/null
+++ b/documentation/architecture/filesystem.rst
@@ -0,0 +1,86 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+.. +--------------------------------------------------------------------------+
+ | |
+ | Licensed under the Apache License, Version 2.0 (the "License"); |
+ | you may not use this file except in compliance with the License. |
+ | You may obtain a copy of the License at |
+ | |
+ | http://www.apache.org/licenses/LICENSE-2.0 |
+ | |
+ | Unless required by applicable law or agreed to in writing, software |
+ | distributed under the License is distributed on an "AS IS" BASIS, |
+ | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
+ | See the License for the specific language governing permissions and |
+ | limitations under the License. |
+ | |
+ +--------------------------------------------------------------------------+
+
+
+*******************************************************************************
+Filesystem Organization
+*******************************************************************************
+
+This document describes the specific filesystem organization for the project,
+showing how the standard organizational patterns are implemented for this
+project's configuration. For the underlying principles and rationale behind
+these patterns, see the `common architecture documentation
+`_.
+
+Project Structure
+===============================================================================
+
+Root Directory Organization
+-------------------------------------------------------------------------------
+
+The project implements the standard filesystem organization:
+
+.. code-block::
+
+ python-classcore/
+ ├── LICENSE.txt # Project license
+ ├── README.rst # Project overview and quick start
+ ├── pyproject.toml # Python packaging and tool configuration
+ ├── documentation/ # Sphinx documentation source
+ ├── sources/ # All source code
+ ├── tests/ # Test suites
+ └── .auxiliary/ # Development workspace
+
+Source Code Organization
+===============================================================================
+
+Package Structure
+-------------------------------------------------------------------------------
+
+The main Python package follows the standard ``sources/`` directory pattern:
+
+.. code-block::
+
+ sources/
+ ├── classcore/ # Main Python package
+ │ ├── __/ # Centralized import hub
+ │ │ ├── __init__.py # Re-exports core utilities
+ │ │ ├── imports.py # External library imports
+ │ │ └── nomina.py # python-classcore-specific naming constants
+ │ ├── __init__.py # Package entry point
+ │ ├── py.typed # Type checking marker
+ │ └── [modules].py # Feature-specific modules
+
+
+All package modules use the standard ``__`` import pattern as documented
+in the common architecture guide.
+
+Component Integration
+===============================================================================
+
+Architecture Evolution
+===============================================================================
+
+This filesystem organization provides a foundation that architect agents can
+evolve as the project grows. For questions about organizational principles,
+subpackage patterns, or testing strategies, refer to the comprehensive common
+documentation:
+
+* `Architecture Patterns `_
+* `Development Practices `_
+* `Test Development Guidelines `_
diff --git a/documentation/architecture/index.rst b/documentation/architecture/index.rst
new file mode 100644
index 0000000..6f09ef9
--- /dev/null
+++ b/documentation/architecture/index.rst
@@ -0,0 +1,34 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+.. +--------------------------------------------------------------------------+
+ | |
+ | Licensed under the Apache License, Version 2.0 (the "License"); |
+ | you may not use this file except in compliance with the License. |
+ | You may obtain a copy of the License at |
+ | |
+ | http://www.apache.org/licenses/LICENSE-2.0 |
+ | |
+ | Unless required by applicable law or agreed to in writing, software |
+ | distributed under the License is distributed on an "AS IS" BASIS, |
+ | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
+ | See the License for the specific language governing permissions and |
+ | limitations under the License. |
+ | |
+ +--------------------------------------------------------------------------+
+
+
+*******************************************************************************
+Architecture
+*******************************************************************************
+
+.. toctree::
+ :maxdepth: 2
+
+ summary
+ filesystem
+ decisions/index
+ designs/index
+ testplans/index
+
+
+.. todo:: Populate architecture documentation sections.
\ No newline at end of file
diff --git a/documentation/architecture/summary.rst b/documentation/architecture/summary.rst
new file mode 100644
index 0000000..bab368a
--- /dev/null
+++ b/documentation/architecture/summary.rst
@@ -0,0 +1,24 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+.. +--------------------------------------------------------------------------+
+ | |
+ | Licensed under the Apache License, Version 2.0 (the "License"); |
+ | you may not use this file except in compliance with the License. |
+ | You may obtain a copy of the License at |
+ | |
+ | http://www.apache.org/licenses/LICENSE-2.0 |
+ | |
+ | Unless required by applicable law or agreed to in writing, software |
+ | distributed under the License is distributed on an "AS IS" BASIS, |
+ | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
+ | See the License for the specific language governing permissions and |
+ | limitations under the License. |
+ | |
+ +--------------------------------------------------------------------------+
+
+
+*******************************************************************************
+System Overview
+*******************************************************************************
+
+.. todo:: Describe the high-level system architecture, major components, and their relationships.
\ No newline at end of file
diff --git a/documentation/architecture/testplans/index.rst b/documentation/architecture/testplans/index.rst
new file mode 100644
index 0000000..3db3d71
--- /dev/null
+++ b/documentation/architecture/testplans/index.rst
@@ -0,0 +1,30 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+.. +--------------------------------------------------------------------------+
+ | |
+ | Licensed under the Apache License, Version 2.0 (the "License"); |
+ | you may not use this file except in compliance with the License. |
+ | You may obtain a copy of the License at |
+ | |
+ | http://www.apache.org/licenses/LICENSE-2.0 |
+ | |
+ | Unless required by applicable law or agreed to in writing, software |
+ | distributed under the License is distributed on an "AS IS" BASIS, |
+ | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
+ | See the License for the specific language governing permissions and |
+ | limitations under the License. |
+ | |
+ +--------------------------------------------------------------------------+
+
+
+*******************************************************************************
+Test Plans
+*******************************************************************************
+
+.. toctree::
+ :maxdepth: 2
+
+ summary
+
+
+.. todo:: Add test plan documents to toctree.
\ No newline at end of file
diff --git a/documentation/architecture/testplans/summary.rst b/documentation/architecture/testplans/summary.rst
new file mode 100644
index 0000000..d480c7f
--- /dev/null
+++ b/documentation/architecture/testplans/summary.rst
@@ -0,0 +1,86 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+.. +--------------------------------------------------------------------------+
+ | |
+ | Licensed under the Apache License, Version 2.0 (the "License"); |
+ | you may not use this file except in compliance with the License. |
+ | You may obtain a copy of the License at |
+ | |
+ | http://www.apache.org/licenses/LICENSE-2.0 |
+ | |
+ | Unless required by applicable law or agreed to in writing, software |
+ | distributed under the License is distributed on an "AS IS" BASIS, |
+ | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
+ | See the License for the specific language governing permissions and |
+ | limitations under the License. |
+ | |
+ +--------------------------------------------------------------------------+
+
+
+*******************************************************************************
+Test Organization Summary
+*******************************************************************************
+
+Overview
+===============================================================================
+
+This section contains comprehensive test planning documentation, including test
+organization conventions, coverage strategies, and detailed implementation
+plans for achieving systematic test coverage.
+
+Test plans follow project testing principles described in the `common test
+development guidelines
+`_.
+Key principles include:
+
+- **Dependency injection over monkey-patching** for testable code architecture
+- **Systematic coverage analysis** with clear gap identification
+- **Performance-conscious resource use** with appropriate testing strategies
+- **Organized test structure** with numbered modules and functions
+
+Test Planning Process
+===============================================================================
+
+The test planning process systematically addresses:
+
+**Coverage Gap Analysis**
+ Identification of all uncovered lines and untested functionality across modules
+
+**Test Strategy Development**
+ Comprehensive approaches for testing each function, class, and method with
+ appropriate test data strategies
+
+**Implementation Guidance**
+ Detailed plans for achieving coverage while following project testing principles
+
+**Architectural Considerations**
+ Analysis of testability constraints and recommendations for maintaining
+ clean, testable code
+
+Test Module Numbering Scheme
+===============================================================================
+
+.. todo:: Define project-specific test module numbering conventions.
+
+Test Function Numbering
+===============================================================================
+
+Within each test module, functions are numbered by component:
+
+- **000-099**: Basic functionality tests for the module
+- **100-199, 200-299, etc.**: Each function/class gets its own 100-number block
+- **Increments of 10-20**: For closely related test variations within a block
+
+Project-Specific Testing Conventions
+===============================================================================
+
+For detailed testing conventions, patterns, and guidelines, refer to the `common
+test development guidelines
+`_.
+This includes:
+
+- Coverage goals and strategies
+- Performance considerations
+- Test data organization patterns
+- Dependency injection approaches
+- Resource management during testing
\ No newline at end of file
diff --git a/documentation/changelog.rst b/documentation/changelog.rst
index 119a6f7..4790175 100644
--- a/documentation/changelog.rst
+++ b/documentation/changelog.rst
@@ -21,5 +21,265 @@
Release Notes
*******************************************************************************
-
.. towncrier release notes start
+
+classcore 1.10 (2025-09-25)
+===========================
+
+Enhancements
+------------
+
+- Expose exception mutability constants and public identifier utility function for downstream reuse.
+
+
+Repairs
+-------
+
+- Fix additional PyPy compatibility issues with exception and ABC cache attribute mutations.
+
+
+classcore 1.9 (2025-09-24)
+==========================
+
+Repairs
+-------
+
+- Fix PyPy compatibility with super() calls in slotted dataclasses.
+
+
+Classcore 1.8 (2025-07-23)
+==========================
+
+Enhancements
+------------
+
+- Standard: Modules: Allow certain modules to be excluded from reclassification.
+ Also, implement cycle detection.
+
+
+Repairs
+-------
+
+- Standard: Modules: Reclassify modules with proper depth-first traversal.
+
+
+Classcore 1.7 (2025-07-08)
+==========================
+
+Enhancements
+------------
+
+- Standard: Add ``ignore_init_arguments`` decorator argument and
+ ``instances_ignore_init_arguments`` class argument to support cases, such as
+ inheritance from ``urllib.parse.ParseResult`` which inherits from ``tuple`` and
+ overrides ``__new__`` instead of ``__init__``. In such cases, ``__new__``
+ processes the instance production arguments rather than ``__init__``. However,
+ the standard Python behavior is to present the arguments to both ``__new__``
+ and ``__init__``, which is problematic since we always provide an ``__init__``
+ head.
+
+
+Classcore 1.6.1 (2025-07-01)
+============================
+
+Repairs
+-------
+
+- Fix deprecation warnings from finalize_module by refactoring to use private _reclassify_module implementation.
+
+
+Classcore 1.6 (2025-07-01)
+==========================
+
+Enhancements
+------------
+
+- Add ``finalize_module`` function to combine Dynadoc docstring generation with module reclassification for immutability and concealment.
+
+
+Notices
+-------
+
+- Deprecate ``reclassify_modules`` function. Use ``finalize_module`` instead.
+
+
+Classcore 1.5.3 (2025-06-30)
+============================
+
+Repairs
+-------
+
+- Standard: Do not create duplicate slots for behaviors tracking.
+- Standard: Ensure that behaviors tracking attribute is not part of comparisons,
+ hashes, or ``repr`` calculations for dataclasses. Attributes with "private"
+ names which resemble the CPython scheme for class-local (non-inheritable)
+ attributes can create confusion for the internal machinery of ``dataclasses``.
+
+
+Classcore 1.5.2 (2025-06-29)
+============================
+
+Repairs
+-------
+
+- Standard Classes: Ensure that Dynadoc decorator applies last, after any
+ decorators which may potentially replace classes (e.g., ``dataclass( slots =
+ True )``), so that the Dynadoc visitees weak set captures the correct reference
+ to prevent multiple decoration.
+
+
+Classcore 1.5.1 (2025-06-26)
+============================
+
+Repairs
+-------
+
+- Ensure the inheritance of replacement functions works via metaclasses and not
+ just decorators.
+
+
+Classcore 1.5 (2025-06-24)
+==========================
+
+Enhancements
+------------
+
+- Improve docstrings for various public type aliases. Also, drop ``Fname``
+ references from public type aliases, such as ``Decorators`` so that they can be
+ reused in downstream packages.
+
+
+Repairs
+-------
+
+- Ensure that replacement implementations (``assigner_core``, ``deleter_core``,
+ and ``surveyor_core``) are inherited so that behaviors do not regress to
+ standard baseline behaviors in descendant classes.
+
+
+Classcore 1.4.2 (2025-06-11)
+============================
+
+Repairs
+-------
+
+- Ensure that custom attributes namer is passed during recursive module
+ reclassification.
+
+
+Classcore 1.4.1 (2025-06-10)
+============================
+
+Repairs
+-------
+
+- Enforce attributes concealment and immutability on modules.
+
+
+Classcore 1.4 (2025-06-10)
+==========================
+
+Enhancements
+------------
+
+- Fully support slotted classes. Bookkeeping attributes will now be in slots if
+ class is slotted. Potential performance improvement since names do not need to
+ be mangled for inheritance isolation.
+- Publicly expose the ``TypedDict`` which tracks available metaclass arguments
+ for the standard classes. This allows for easier extension by downstream
+ packages and centralizes documentation on the metaclasses interface.
+
+
+Repairs
+-------
+
+- Bugfix. Ensure idempotent execution in all scenarios: CPython vs PyPy, slotted
+ vs non-slotted.
+- Bugfix. Ensure that methods added by decoration properly respect class method
+ resolution order (MRO) when they are not wrapping existing methods.
+
+
+Classcore 1.3.1 (2025-06-07)
+============================
+
+Repairs
+-------
+
+- When reclassifying modules recursively, ensure that replacement class is
+ included in recursive invocations.
+
+
+Classcore 1.3 (2025-06-07)
+==========================
+
+Enhancements
+------------
+
+- Allow arbitrary class to be supplied to ``reclassify_modules``.
+- Simplify production of class decorators.
+
+
+Repairs
+-------
+
+- Bugfix. Propagate error class provider from metaclass to class decorators.
+
+
+Classcore 1.2 (2025-06-05)
+==========================
+
+Enhancements
+------------
+
+- Fill out docstrings for all public classes and functions in package.
+- Integrate with Dynadoc. Adds special introspection control which avoid
+ docstring updates on immutable classes. Adds Dynadoc ``with_docstring``
+ decorator to set of decorators on standard classes.
+
+
+Repairs
+-------
+
+- Address Pyright complaints about metaclass arguments in ``class`` statements.
+
+
+Classcore 1.1 (2025-05-01)
+==========================
+
+Repairs
+-------
+
+- Fix interaction with slotted dataclasses on Python 3.11+.
+- Properly report test coverage by considering doctests too. (Not a user-facing
+ fix; however important to note that coverage was 100% on initial release, but
+ Github Actions workflow was not properly setup to capture coverage from
+ doctests and so it only reported 95% coverage.)
+
+
+Classcore 1.0 (2025-04-29)
+==========================
+
+Enhancements
+------------
+
+- Add support for CPython 3.10 to 3.13.
+- Add support for PyPy 3.10.
+- Base classes and class factory classes which provide standard behaviors
+ (immutability of all attributes after initialization, concealment of all
+ non-public attributes from ``dir``) by default. Can apply additional decorators
+ and can tune for selective or total mutability or visibility. Enforce on class
+ attributes and instance attributes.
+- Class and dataclass decorators which apply a set of standard behaviors to
+ classes so that they produce instances which are immutable and which only
+ reveal public attributes by default. Decorators are tunable via arguments to
+ provide selective or total attributes mutability and visibility as desired.
+- Class decorator which accepts a sequence of other class decorators to apply.
+ Reduces height of decorator stacks and improves their reusability.
+- Decorators for modifying class factory classes (metaclasses) so that they can
+ handle inline application of decorators during production of classes. This
+ includes logic for the case where a decorator replaces a class rather than
+ modifies it.
+- Module class which enforces immutability and visibility limitation on module
+ attributes. (Visibility restriction is to reveal only public attributes to
+ ``dir``.) Also, conveience function which can reclassify a module or an entire
+ package, recursively, to use this class.
diff --git a/documentation/conf.py b/documentation/conf.py
index 22fe65d..fb7f5b7 100644
--- a/documentation/conf.py
+++ b/documentation/conf.py
@@ -1,3 +1,6 @@
+# vim: set filetype=python fileencoding=utf-8:
+# -*- mode: python ; coding: utf-8 -*-
+
''' Configuration file for the Sphinx documentation builder.
This file only contains a selection of the most common options.
@@ -7,8 +10,6 @@
https://jareddillard.com/blog/common-ways-to-customize-sphinx-themes.html
'''
-# ruff: noqa: E402,F401
-
def _calculate_copyright_notice( ):
from datetime import datetime as DateTime, timezone as TimeZone
@@ -76,9 +77,11 @@ def _import_version( ):
( 'py:class',
"v, remove specified key and return the corresponding value." ),
# Type annotation weirdnesses.
- ( 'py:class', "Doc" ),
- ( 'py:class', "collections.abc.Annotated" ),
- ( 'py:class', "types.Annotated" ),
+ ( 'py:class', "builtins.NotImplementedType" ),
+ ( 'py:class', "classcore.__.T" ),
+ ( 'py:class', "classcore.__.U" ),
+ ( 'py:class', "dynadoc.context.Context" ),
+ ( 'py:class', "dynadoc.context.IntrospectionControl" ),
( 'py:class', "typing_extensions._ProtocolMeta" ),
( 'py:class', "typing_extensions.Any" ),
]
@@ -95,6 +98,10 @@ def _import_version( ):
r'https://github\.com/emcd/python-classcore/.*',
# Package does not exist during initial development.
r'https://pypi.org/project/classcore/',
+ # Github aggressively rate-limits access to certain blobs.
+ r'https://github\.com/.*/.*/blob/.*',
+ # Avoid timeouts for slow sites.
+ r'http://www\.catb\.org/~esr/faqs/smart-questions\.html',
]
# -- Options for HTML output -------------------------------------------------
@@ -114,13 +121,13 @@ def _import_version( ):
autodoc_default_options = {
'member-order': 'groupwise',
- 'imported-members': False,
- 'inherited-members': True,
+ 'members': True,
'show-inheritance': True,
- 'undoc-members': True,
+ # 'special-members': '__call__',
}
-#autodoc_typehints = 'description'
+autodoc_typehints = 'none'
+autodoc_use_type_comments = False
# -- Options for intersphinx extension ---------------------------------------
# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration
diff --git a/documentation/contribution.rst b/documentation/contribution.rst
index 03de3d8..a2cea55 100644
--- a/documentation/contribution.rst
+++ b/documentation/contribution.rst
@@ -26,7 +26,7 @@ Contribution
Contribution to this project is welcome! However, it must follow the `code of
conduct
-`_
+`_
for the project.
@@ -46,31 +46,82 @@ Ways to Contribute
Development
===============================================================================
+Architecture
+-------------------------------------------------------------------------------
+
+* The :doc:`Product Requirements Document ` is a good starting point to
+ understand the motivations and rationale for the project. This should be
+ reviewed and updated, as necessary, when making changes that affect product
+ functionality or user experience. See the `requirements documentation guide
+ `_
+ for PRD format and best practices.
+
+* The :doc:`system architecture overview ` should be
+ reviewed to understand the structure and operational patterns of the project.
+ Major changes to the architecture should be reflected in this document.
+
+* Document significant architectural decisions using Architectural Decision
+ Records (ADRs) in the ``architecture/decisions/`` directory. See the
+ `architecture documentation guide
+ `_
+ for ADR format and best practices.
+
+* Document interface specifications, schemas, and algorithms in the
+ ``architecture/designs/`` directory to guide implementation efforts.
+
Guidance and Standards
-------------------------------------------------------------------------------
* Follow the `development environment preparation and management instructions
- `_
+ `_
to ensure consistency with maintainer development environments and CI
workflows.
+* Configure Git commit signing as required for all contributions. See the
+ `environment setup guide
+ `_
+ for configuration details.
+
* Adhere to the `development practices
- `_
- and `code style `_
+ `_,
+ `code style
+ `_,
+ and `testing guidelines
+ `_
to improve the probability of pull request acceptance. You may wish to use an
LLM to assist with this, if the standards seem too onerous or specific.
* Also consider the `nomenclature advice
- `_
+ `_
for consistency and to improve the probability of pull request acceptance.
+* Run validation commands before submitting contributions. See the `validation
+ guide `_
+ for available commands and workflow. (If you installed the Git pre-commit and
+ pre-push hooks during environment setup, then they will run the validations
+ for you.)
+
* Prepare changelog fragments according to the `releases guide
- `_
+ `_
as appropriate.
* Although unncessary for non-maintainer contributions, additional background
can be found in the `maintenance guide
- `_.
+ `_.
+
+Artificial Intelligence
+-------------------------------------------------------------------------------
+
+* Contributions, which are co-authored by large language models (LLMs), are
+ welcome, provided that they adhere to the project guidance and standards
+ above and are, otherwise, of good quality.
+
+* A more compact representation of the above guidance and standards, plus some
+ other advice for these models, can be found in
+ ``.auxiliary/configuration/conventions.md``. You may link to this file from a
+ ``AGENTS.md``, ``CLAUDE.md``, ``GEMINI.md``, ``CONVENTIONS.md``, etc... file
+ in the root of the project. These files are ignored by Git as we do not wish
+ to pollute the root of the project with them in the upstream repository.
Resources
-------------------------------------------------------------------------------
@@ -78,4 +129,6 @@ Resources
.. toctree::
:maxdepth: 2
+ prd
+ architecture/index
devapi
diff --git a/documentation/devapi.rst b/documentation/devapi.rst
index cd22b59..bafacf7 100644
--- a/documentation/devapi.rst
+++ b/documentation/devapi.rst
@@ -28,6 +28,8 @@ Internal Development Interface
Package ``classcore.__``
===============================================================================
+.. automodule:: classcore.__
+
Module ``classcore.__.imports``
-------------------------------------------------------------------------------
diff --git a/documentation/examples/index.rst b/documentation/examples/index.rst
index 9d1488d..2766596 100644
--- a/documentation/examples/index.rst
+++ b/documentation/examples/index.rst
@@ -29,3 +29,4 @@ Examples
std-protocols
std-behaviors
std-modules
+ std-dynadoc
diff --git a/documentation/examples/std-behaviors.rst b/documentation/examples/std-behaviors.rst
index b8127be..61bc51a 100644
--- a/documentation/examples/std-behaviors.rst
+++ b/documentation/examples/std-behaviors.rst
@@ -99,7 +99,7 @@ on instances of the decorated class.
>>> del point.x
-Selective Visibility
+Selective Mutability
-------------------------------------------------------------------------------
Explicit attribute names for selective mutability:
@@ -187,7 +187,7 @@ Invalid mutability verifiers will cause an error to be raised:
...
Traceback (most recent call last):
...
- classcore.exceptions.BehaviorExclusionInvalidity: Invalid behavior exclusion verifier: 42
+ classcore.exceptions.BehaviorExclusionInvalidity: Invalid behavior exclusion verifier: 13
Inheritance
-------------------------------------------------------------------------------
@@ -354,7 +354,7 @@ Invalid visibility verifiers will cause an error to be raised:
...
Traceback (most recent call last):
...
- classcore.exceptions.BehaviorExclusionInvalidity: Invalid behavior exclusion verifier: 42
+ classcore.exceptions.BehaviorExclusionInvalidity: Invalid behavior exclusion verifier: 13
Inheritance
diff --git a/documentation/examples/std-classes.rst b/documentation/examples/std-classes.rst
index a25776d..321806e 100644
--- a/documentation/examples/std-classes.rst
+++ b/documentation/examples/std-classes.rst
@@ -159,3 +159,145 @@ attributes, there is a convenience class, ``ObjectMutable``.
>>> point.x, point.y = 20, 21
>>> point.x, point.y
(20, 21)
+
+
+Attribute Preallocations
+===============================================================================
+
+You can preallocate attributes using the standard Python ``__slots__``
+mechanism. In addition to potential performance gains for attribute lookups,
+this can be useful if you are making a namespace class and want to keep the
+namespace dictionary free of record-keeping attributes. You cannot inherit a
+standard base class, such as ``Object``, for this purpose, as it is
+``__dict__``-based. However, you can create the namespace class via metaclass.
+
+.. doctest:: Standard.Classes
+
+ >>> class Namespace( metaclass = ccstd.Class ):
+ ... __slots__ = ( '__dict__', )
+ ... def __init__( self, **arguments: float ) -> None:
+ ... self.__dict__.update( arguments )
+ ...
+ >>> ns = Namespace( x = 20, y = 21 )
+ >>> ns.__slots__
+ ('__dict__', '_classcore_instance_behaviors_')
+ >>> 'x' in ns.__dict__
+ True
+ >>> '_classcore_instance_behaviors_' in ns.__dict__
+ False
+ >>> ns.x, ns.y
+ (20, 21)
+
+The mapping form of ``__slots__`` is also supported.
+
+.. doctest:: Standard.Classes
+
+ >>> class Namespace( metaclass = ccstd.Class ):
+ ... __slots__ = { '__dict__': 'Namespace attributes.' }
+ ... def __init__( self, **arguments: float ):
+ ... self.__dict__.update( arguments )
+ ...
+ >>> ns = Namespace( x = 20, y = 21 )
+ >>> ns.__slots__[ '__dict__' ]
+ 'Namespace attributes.'
+
+
+Suppression of Initialization Arguments
+===============================================================================
+
+In some cases, you may inherit from classes which process their instance
+construction arguments via ``__new__`` rather than ``__init__``. This is
+experienced, for example, where :py:class:`tuple` and other immutable builtins
+are subclassed. To prevent the construction arguments from being applied to the
+``__init__`` call chain, you can set ``instances_ignore_init_arguments`` to
+``True`` as a class argument.
+
+.. doctest:: Standard.Classes
+
+ >>> from urllib.parse import ParseResult, urlparse
+ >>> class Url( ccstd.Object, ParseResult, instances_ignore_init_arguments = True ):
+ ... pass
+ ...
+ >>> u = Url( *urlparse( 'https://python.org' ) )
+
+Or as ``ignore_init_arguments`` as ``True`` to a decorator.
+
+.. doctest:: Standard.Classes
+
+ >>> @ccstd.with_standard_behaviors( ignore_init_arguments = True )
+ ... class Url( ParseResult ): pass
+ ...
+ >>> u = Url( *urlparse( 'https://python.org' ) )
+
+
+Integrations with Custom Behaviors
+===============================================================================
+
+You can define dunder methods, like ``__delattr__``, ``__setattr__``, and
+``__dir__``, and they will be automatically wrapped by the decorators which
+setup attributes concealment and immutability enforcement on classes.
+
+.. doctest:: Standard.Classes
+
+ >>> class Point2d( ccstd.ObjectMutable ):
+ ... def __init__( self, x: float, y: float ) -> None:
+ ... super( ).__init__( )
+ ... self.x = x
+ ... self.y = y
+ ... def __delattr__( self, name: str ) -> None:
+ ... if not name.startswith( '_' ): print( name )
+ ... super( ).__delattr__( name )
+ ... def __setattr__( self, name: str, value ) -> None:
+ ... if not name.startswith( '_' ): print( f"{name} = {value!r}" )
+ ... super( ).__setattr__( name, value )
+ ... def __dir__( self ):
+ ... print( 'called dir' )
+ ... return super( ).__dir__( )
+ ...
+ >>> point = Point2d( 3, 4 )
+ x = 3
+ y = 4
+ >>> point.x, point.y = 5, 12
+ x = 5
+ y = 12
+ >>> del point.y
+ y
+ >>> 'x' in dir( point )
+ called dir
+ True
+
+The integration points work correctly with inheritance. Furthermore, the
+standard behaviors (concealment and immutability) are idempotent, which
+improves their performance in class hierarchies.
+
+.. doctest:: Standard.Classes
+
+ >>> class Point3d( Point2d ):
+ ... def __init__( self, x: float, y: float, z: float ) -> None:
+ ... super( ).__init__( x, y )
+ ... self.z = z
+ ... def __delattr__( self, name: str ) -> None:
+ ... if name == 'z': print( 'Z!' )
+ ... super( ).__delattr__( name )
+ ... def __setattr__( self, name: str, value ) -> None:
+ ... if name == 'z': print( 'Z!' )
+ ... super( ).__setattr__( name, value )
+ ... def __dir__( self ):
+ ... print( 'called dir in 3D' )
+ ... return super( ).__dir__( )
+ ...
+ >>> point3 = Point3d( 5, 12, 17 )
+ x = 5
+ y = 12
+ Z!
+ z = 17
+ >>> point3.z = 60
+ Z!
+ z = 60
+ >>> del point3.z
+ Z!
+ z
+ >>> 'z' not in dir( point3 )
+ called dir in 3D
+ called dir
+ True
diff --git a/documentation/examples/std-dynadoc.rst b/documentation/examples/std-dynadoc.rst
new file mode 100644
index 0000000..7661603
--- /dev/null
+++ b/documentation/examples/std-dynadoc.rst
@@ -0,0 +1,210 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+.. +--------------------------------------------------------------------------+
+ | |
+ | Licensed under the Apache License, Version 2.0 (the "License"); |
+ | you may not use this file except in compliance with the License. |
+ | You may obtain a copy of the License at |
+ | |
+ | http://www.apache.org/licenses/LICENSE-2.0 |
+ | |
+ | Unless required by applicable law or agreed to in writing, software |
+ | distributed under the License is distributed on an "AS IS" BASIS, |
+ | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
+ | See the License for the specific language governing permissions and |
+ | limitations under the License. |
+ | |
+ +--------------------------------------------------------------------------+
+
+
+*******************************************************************************
+Dynadoc Integration
+*******************************************************************************
+
+
+Introduction
+===============================================================================
+
+Seamless integration with Dynadoc is provided to automate documentation of
+classes and their members. This integration allows you to configure Dynadoc
+behavior at both the metaclass and individual class levels, ensuring that your
+classes receive appropriate documentation based on their structure and
+annotations.
+
+.. doctest:: Dynadoc.Integration
+
+ >>> from typing import Annotated
+ >>> import classcore.standard as ccstd
+ >>> import dynadoc
+
+
+Basic Configuration
+===============================================================================
+
+Dynadoc configuration can be applied to classes through the standard Classcore
+metaclasses. The configuration controls how documentation is generated,
+including which objects to introspect and how to render the results.
+
+.. doctest:: Dynadoc.Integration
+
+ >>> class Person( ccstd.DataclassObject ):
+ ... ''' A person with basic information. '''
+ ... name: Annotated[ str, dynadoc.Doc( "Full name of the person" ) ]
+ ... age: Annotated[ int, dynadoc.Doc( "Age in years" ) ]
+ ... email: Annotated[ str | None, dynadoc.Doc( "Email address if available" ) ] = None
+ ...
+ >>> print( Person.__doc__ )
+ A person with basic information.
+
+ :ivar name: Full name of the person
+ :vartype name: str
+ :ivar age: Age in years
+ :vartype age: int
+ :ivar email: Email address if available
+ :vartype email: str | None
+
+The documentation is automatically enhanced with parameter information derived
+from the dataclass fields and their type annotations.
+
+
+Metaclass-Level Configuration
+===============================================================================
+
+You can configure Dynadoc behavior for all classes created with a particular
+metaclass by setting configuration attributes directly on the metaclass:
+
+.. doctest:: Dynadoc.Integration
+
+ >>> # Create a custom metaclass with specific Dynadoc settings
+ >>> @ccstd.class_factory(
+ ... dynadoc_configuration = ccstd.dynadoc.produce_dynadoc_configuration(
+ ... preserve = False # Don't preserve existing docstrings
+ ... )
+ ... )
+ ... class CustomClass( type ): pass
+ ...
+ >>> class Calculator( metaclass = CustomClass ):
+ ... ''' Original calculator docstring. '''
+ ... def add(
+ ... self,
+ ... x: Annotated[ float, dynadoc.Doc( "First number" ) ],
+ ... y: Annotated[ float, dynadoc.Doc( "Second number" ) ]
+ ... ) -> Annotated[ float, dynadoc.Doc( "Sum of x and y" ) ]:
+ ... ''' Add two numbers. '''
+ ... return x + y
+ ... def multiply(
+ ... self,
+ ... x: Annotated[ float, dynadoc.Doc( "First number" ) ],
+ ... y: Annotated[ float, dynadoc.Doc( "Second number" ) ]
+ ... ) -> Annotated[ float, dynadoc.Doc( "Product of x and y" ) ]:
+ ... ''' Multiply two numbers. '''
+ ... return x * y
+ ...
+ >>> print( Calculator.__doc__ )
+ None
+
+.. code-block:: text
+
+ >>> print( Calculator.add.__doc__ )
+ Add two numbers.
+
+ :argument self:
+ :argument x: First number
+ :type x: float
+ :argument y: Second number
+ :type y: float
+ :returns: Sum of x and y
+ :rtype: float
+
+.. code-block:: text
+
+ >>> print( Calculator.multiply.__doc__ )
+ Multiply two numbers.
+
+ :argument self:
+ :argument x: First number
+ :type x: float
+ :argument y: Second number
+ :type y: float
+ :returns: Product of x and y
+ :rtype: float
+
+Notice how the original class docstring was completely replaced (due to
+``preserve: False``) and is now ``None``, while the individual method
+docstrings were enhanced with parameter and return type documentation from
+their annotations.
+
+
+Class-Level Configuration
+===============================================================================
+
+Individual classes can override metaclass defaults by providing their own
+Dynadoc configuration as a ``class`` statement argument:
+
+.. doctest:: Dynadoc.Integration
+
+ >>> class Vehicle(
+ ... ccstd.DataclassObject,
+ ... dynadoc_configuration = {
+ ... 'preserve': True,
+ ... 'introspection': ccstd.dynadoc.produce_dynadoc_introspection_control(
+ ... enable = False # Disable automatic introspection
+ ... )
+ ... }
+ ... ):
+ ... ''' A vehicle with make and model information.
+ ...
+ ... This class represents various types of vehicles.
+ ... '''
+ ... make: Annotated[ str, dynadoc.Doc( "Vehicle manufacturer" ) ]
+ ... model: Annotated[ str, dynadoc.Doc( "Vehicle model name" ) ]
+ ... year: Annotated[ int, dynadoc.Doc( "Year of manufacture" ) ]
+ ...
+ >>> print( Vehicle.__doc__ )
+ A vehicle with make and model information.
+
+ This class represents various types of vehicles.
+
+Since introspection was disabled, only the original docstring is preserved
+without any automatic parameter documentation.
+
+
+Documentation of Modules
+===============================================================================
+
+A variation of ``assign_module_docstring`` is provided, which respects
+immutable classes. This function is used by this package, itself, to
+automatically generate documentation for its own modules:
+
+.. code-block:: python
+
+ from . import standard
+
+ # ... other imports and definitions ...
+
+ standard.dynadoc.assign_module_docstring( __name__, table = __.fragments )
+ standard.reclassify_modules( __name__, recursive = True )
+
+This automatically generates comprehensive documentation for the entire
+package, including all submodules. The key benefits of this variation include:
+
+* **Automatic immutable class avoidance**: By default, immutable classes are not
+ introspected during documentation generation to prevent potential issues.
+
+* **Recursive package documentation**: When applied to a package, it can
+ recursively document all submodules.
+
+* **Fragment table integration**: Supports reusable documentation fragments
+ for consistent terminology across the package.
+
+You can apply this to your own modules and packages:
+
+.. code-block:: python
+
+ import classcore.standard as ccstd
+
+ # At the end of your module's __init__.py
+ ccstd.dynadoc.assign_module_docstring( __name__ )
+
+ # Optionally make the entire package immutable
+ ccstd.reclassify_modules( __name__, recursive = True )
diff --git a/documentation/examples/std-modules.rst b/documentation/examples/std-modules.rst
index 3bf483a..63b3a63 100644
--- a/documentation/examples/std-modules.rst
+++ b/documentation/examples/std-modules.rst
@@ -1,3 +1,4 @@
+.. vim: set fileencoding=utf-8:
.. -*- coding: utf-8 -*-
.. +--------------------------------------------------------------------------+
| |
@@ -24,10 +25,262 @@ Standard Modules
Introduction
===============================================================================
-.. todo:: Contents
+The ``standard.modules`` submodule provides functionality to enhance Python
+modules with immutability, concealment, and automatic documentation generation.
+This is particularly useful for package authors who want to prevent accidental
+modification of their module's public API while providing rich documentation.
+The module provides two main approaches:
-Reclassification
+1. **Module reclassification** - converts standard modules to have immutable
+ and concealed attributes
+2. **Module finalization** - combines documentation generation with
+ reclassification in a single convenient function
+
+
+Module Reclassification
+===============================================================================
+
+The ``reclassify_modules`` function converts modules to use a custom module
+class that provides immutability and concealment behaviors. Here's how you
+might use it in a hypothetical package:
+
+.. code-block:: python
+
+ # mypackage/__init__.py
+ import classcore.standard as _ccstd
+
+ # Import your submodules
+ from . import core
+ from . import utils
+ from . import exceptions
+
+ # Apply module reclassification
+ _ccstd.reclassify_modules( __name__, recursive = True )
+
+After reclassification, the modules become immutable:
+
+.. code-block:: python
+
+ # This would raise AttributeImmutability exception
+ # mypackage.core = "something else"
+
+ # Non-public attributes are concealed from dir()
+ # dir( mypackage ) # Only shows public attributes
+
+The ``recursive = True`` parameter ensures that all submodules within the
+package hierarchy are also reclassified, providing consistent behavior
+throughout your package.
+
+
+Individual Module Reclassification
+-------------------------------------------------------------------------------
+
+You can also reclassify individual modules without affecting the entire
+package hierarchy:
+
+.. code-block:: python
+
+ # mypackage/core.py
+ import classcore.standard as _ccstd
+
+ def important_function():
+ ''' This function should not be accidentally modified. '''
+ return "Important result"
+
+ # Reclassify only this module
+ _ccstd.reclassify_modules( __name__ )
+
+This approach is useful when you want fine-grained control over which modules
+in your package receive the enhanced behaviors.
+
+
+Module Finalization with Documentation
===============================================================================
-.. todo:: Contents
+The ``finalize_module`` function provides a convenient way to combine automatic
+documentation generation (via Dynadoc integration) with module reclassification.
+This is the recommended approach for most packages.
+
+Basic Usage
+-------------------------------------------------------------------------------
+
+.. code-block:: python
+
+ # mypackage/__init__.py
+ import classcore.standard as _ccstd
+
+ from . import core
+ from . import utils
+ from . import exceptions
+
+ # Define documentation fragments
+ _fragments = {
+ 'version': '1.0.0',
+ 'author': 'Your Name',
+ 'description': 'A utility package for data processing'
+ }
+
+ # Finalize the module with documentation and reclassification
+ _ccstd.finalize_module(
+ __name__,
+ dynadoc_table = _fragments,
+ recursive = True
+ )
+
+The ``finalize_module`` function will:
+
+1. Generate comprehensive documentation for the module and its members using
+ Dynadoc introspection
+2. Apply the documentation fragments you provide
+3. Reclassify the module and its submodules for immutability and concealment
+
+Advanced Configuration
+-------------------------------------------------------------------------------
+
+For complex packages, you might want to configure different parts differently:
+
+.. code-block:: python
+
+ # mypackage/__init__.py
+ import classcore.standard as _ccstd
+
+ # Configure main package with full documentation
+ _ccstd.finalize_module(
+ __name__,
+ dynadoc_table = main_fragments,
+ recursive = False # Handle submodules individually
+ )
+
+ # Configure submodules with different settings
+ _ccstd.finalize_module(
+ f"{__name__}.core",
+ dynadoc_table = core_fragments,
+ recursive = True
+ )
+
+ _ccstd.finalize_module(
+ f"{__name__}.utils",
+ dynadoc_table = utils_fragments,
+ recursive = True
+ )
+
+This approach allows you to provide different documentation fragments and
+introspection settings for different parts of your package.
+
+
+Best Practices
+===============================================================================
+
+Package-Level Application
+-------------------------------------------------------------------------------
+
+For most packages, apply ``finalize_module`` at the package level in your
+``__init__.py`` file:
+
+.. code-block:: python
+
+ # mypackage/__init__.py
+ import classcore.standard as _ccstd
+
+ # Package metadata and documentation fragments
+ __version__ = '1.0.0'
+ _fragments = {
+ 'version': __version__,
+ 'maintainer': 'Package Team',
+ 'license': 'Apache 2.0'
+ }
+
+ # Import public API
+ from .core import PublicClass, public_function
+ from .utils import helper_function
+
+ # Finalize the entire package
+ _ccstd.finalize_module(
+ __name__,
+ dynadoc_table = _fragments,
+ recursive = True
+ )
+
+This pattern ensures that:
+
+- Your package's public API is documented
+- All modules in the package are immutable and concealed
+- Documentation fragments are applied consistently
+- The entire package hierarchy is protected from accidental modification
+
+Documentation Fragments
+-------------------------------------------------------------------------------
+
+Use documentation fragments to provide consistent metadata across your package:
+
+.. code-block:: python
+
+ # mypackage/_metadata.py
+ fragments = {
+ 'version': '1.0.0',
+ 'author': 'Your Name ',
+ 'license': 'Apache License 2.0',
+ 'homepage': 'https://github.com/yourname/mypackage',
+ 'description': 'A comprehensive data processing library',
+ 'examples_url': 'https://mypackage.readthedocs.io/examples',
+ 'api_url': 'https://mypackage.readthedocs.io/api'
+ }
+
+ # mypackage/__init__.py
+ from ._metadata import fragments
+ import classcore.standard as _ccstd
+
+ _ccstd.finalize_module( __name__, dynadoc_table = fragments, recursive = True )
+
+This approach centralizes your package metadata and makes it easy to maintain
+consistency across documentation.
+
+Error Handling
+-------------------------------------------------------------------------------
+
+When using module finalization, be aware that the resulting modules will raise
+``AttributeImmutability`` exceptions if code attempts to modify them:
+
+.. code-block:: python
+
+ import classcore.exceptions
+
+ # After finalization, this will raise an exception
+ try:
+ mypackage.core.some_function = lambda: "modified"
+ except classcore.exceptions.AttributeImmutability as e:
+ print( f"Cannot modify module: {e}" )
+
+Design your package APIs to avoid dynamic modification after finalization.
+If you need dynamic behavior, consider using configuration objects or factory
+functions instead of direct module attribute modification.
+
+
+Integration with Build Systems
+===============================================================================
+
+Module finalization integrates well with modern Python build systems. The
+immutability ensures that your package's API surface is clearly defined and
+cannot be accidentally modified at runtime.
+
+For packages that use entry points or plugin systems, apply finalization after
+all dynamic setup is complete:
+
+.. code-block:: python
+
+ # mypackage/__init__.py
+ import classcore.standard as _ccstd
+
+ # Dynamic setup (plugin registration, etc.)
+ _setup_plugins()
+ _register_entry_points()
+
+ # Final API definition
+ from .api import *
+
+ # Lock down the package
+ _ccstd.finalize_module( __name__, dynadoc_table = _fragments, recursive = True )
+
+This ensures that your package initialization is complete before the
+immutability protections are applied.
\ No newline at end of file
diff --git a/documentation/prd.rst b/documentation/prd.rst
new file mode 100644
index 0000000..85bf640
--- /dev/null
+++ b/documentation/prd.rst
@@ -0,0 +1,27 @@
+.. vim: set fileencoding=utf-8:
+.. -*- coding: utf-8 -*-
+.. +--------------------------------------------------------------------------+
+ | |
+ | Licensed under the Apache License, Version 2.0 (the "License"); |
+ | you may not use this file except in compliance with the License. |
+ | You may obtain a copy of the License at |
+ | |
+ | http://www.apache.org/licenses/LICENSE-2.0 |
+ | |
+ | Unless required by applicable law or agreed to in writing, software |
+ | distributed under the License is distributed on an "AS IS" BASIS, |
+ | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
+ | See the License for the specific language governing permissions and |
+ | limitations under the License. |
+ | |
+ +--------------------------------------------------------------------------+
+
+
+*******************************************************************************
+Product Requirements Document
+*******************************************************************************
+
+.. todo:: Define product requirements, user stories, and acceptance criteria.
+
+For PRD format and guidance, see the `requirements documentation guide
+`_.
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
index 63327f9..c04fa2c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -15,6 +15,7 @@ license = 'Apache-2.0'
readme = { 'file' = 'README.rst', 'content-type' = 'text/x-rst' }
requires-python = '>= 3.10'
dependencies = [
+ 'dynadoc',
'typing-extensions',
# --- BEGIN: Injected by Copier ---
# --- END: Injected by Copier ---
@@ -53,11 +54,12 @@ year-of-origin = 2025
gha-runners = [
'/home/runner/work/python-classcore/python-classcore/',
'/Users/runner/work/python-classcore/python-classcore/',
+ 'C:\a\python-classcore\python-classcore\',
'D:\a\python-classcore\python-classcore\',
]
[tool.coverage.run]
branch = true
-command_line = '-m pytest' # TODO? '--fail-under'
+command_line = '-m pytest'
data_file = '.auxiliary/caches/pytest/coverage.sqlite3'
parallel = true
source = [ 'sources' ]
@@ -94,9 +96,8 @@ description = ''' Development environment. '''
dependencies = [
'Jinja2',
'coverage[toml]',
- 'emcd-projects',
'furo',
- 'icecream-truck',
+ 'isort',
'packaging',
'pre-commit',
'pyright',
@@ -106,6 +107,7 @@ dependencies = [
'sphinx-copybutton',
'sphinx-inline-tabs',
'towncrier',
+ 'vulture',
# --- BEGIN: Injected by Copier ---
# --- END: Injected by Copier ---
]
@@ -119,28 +121,38 @@ PYTHONUNBUFFERED = 'TRUE' # TODO: Only for coverage/pytest.
# --- END: Injected by Copier ---
[tool.hatch.envs.develop.scripts]
docsgen = [
- '''sphinx-build -E -b linkcheck -d .auxiliary/caches/sphinx \
- documentation .auxiliary/artifacts/sphinx-linkcheck''',
- '''sphinx-build -a -d .auxiliary/caches/sphinx \
- documentation .auxiliary/artifacts/sphinx-html''',
+ """sphinx-build -E -b linkcheck -d .auxiliary/caches/sphinx --quiet \
+ documentation .auxiliary/artifacts/sphinx-linkcheck""",
+ """sphinx-build -a -d .auxiliary/caches/sphinx --quiet \
+ documentation .auxiliary/artifacts/sphinx-html""",
]
linters = [
- '''ruff check --quiet sources documentation tests''',
+ """ruff check --quiet sources documentation tests""",
# --- BEGIN: Injected by Copier ---
# --- END: Injected by Copier ---
- '''pyright sources''',
+ """isort --check-only --diff sources tests""",
+ """vulture""",
+ """pyright sources""",
]
packagers = [
- '''hatch build''',
+ """hatch build""",
# --- BEGIN: Injected by Copier ---
# --- END: Injected by Copier ---
]
+testers-documentation = [
+ """coverage run -m sphinx.cmd.build \
+ -E -b doctest -d .auxiliary/caches/sphinx --quiet \
+ documentation .auxiliary/artifacts/sphinx-doctest""",
+]
+testers-serotine = [ """coverage run -m pytest -m slow""" ]
+testers-no-reports = [
+ 'coverage run',
+ 'testers-documentation',
+ 'testers-serotine',
+]
testers = [
'coverage erase',
- 'coverage run',
- '''coverage run -m sphinx.cmd.build \
- -E -b doctest -d .auxiliary/caches/sphinx \
- documentation .auxiliary/artifacts/sphinx-doctest''',
+ 'testers-no-reports',
'coverage combine',
'coverage report --skip-covered',
'coverage html',
@@ -163,9 +175,30 @@ python = [
'3.13',
'pypy3.10',
]
+# [tool.hatch.metadata]
+# allow-direct-references = true
[tool.hatch.version]
path = 'sources/classcore/__init__.py'
+# https://pycqa.github.io/isort/docs/configuration/config_files.html
+[tool.isort]
+# profile = 'black'
+src_paths = [ 'sources', 'tests' ]
+case_sensitive = true
+# ensure_newline_before_comments = true
+# force_sort_within_sections = true
+ignore_whitespace = true
+include_trailing_comma = true
+known_first_party = [ 'classcore' ]
+lines_between_types = 1
+line_length = 79
+multi_line_output = 3
+quiet = true
+# skip_gitignore = true
+skip_glob = [ '*/__/imports.py', '*/__init__.py' ]
+split_on_trailing_comma = true
+use_parentheses = true
+
# https://mypy.readthedocs.io/en/stable/config_file.html
[tool.mypy]
# Note: Due to repeated painful experiences with Mypy, we use Pyright instead.
@@ -179,8 +212,8 @@ strict = false
# https://microsoft.github.io/pyright/#/configuration
[tool.pyright]
-ignore = [ 'tests' ] # Stronger hint for language server.
-include = [ 'sources' ]
+ignore = [ '.auxiliary', 'documentation', 'tests' ] # Ignore diagnostics.
+include = [ 'sources', 'tests' ] # Consider for operations.
reportConstantRedefinition = true
reportInvalidTypeVarUse = true
reportMatchNotExhaustive = true
@@ -212,15 +245,19 @@ stubPath = 'sources/classcore/_typedecls'
# Note: Cannot run doctests from Pytest, because Pytest tries to update '_'
# attribute on protected modules. Instead, we use Sphinx to run doctests.
minversion = '8.1'
-addopts = '--capture=no --exitfirst -rfE'
+addopts = """--capture=no --exitfirst --quiet -rfE -m 'not slow'""" # TODO? '--fail-under'
testpaths = [ 'tests' ]
python_files = [ 'test_*.py' ]
python_functions = [ 'test_[0-9][0-9][0-9]_*' ]
cache_dir = '.auxiliary/caches/pytest'
+markers = [
+ "slow: long-running tests",
+]
[tool.ruff]
#builtins = [ 'ic' ]
cache-dir = '.auxiliary/caches/ruff'
+extend-exclude = [ 'vulturefood.py' ]
indent-width = 4
line-length = 79
[tool.ruff.lint]
@@ -310,3 +347,12 @@ showcontent = true
directory = 'repair'
name = 'Repairs'
showcontent = true
+
+[tool.vulture]
+paths = [ '.auxiliary/configuration/vulturefood.py', 'sources' ]
+min_confidence = 60
+exclude = [
+ '*/imports.py',
+]
+ignore_decorators = [ '@__.abc.abstractmethod', '@__.typx.overload' ]
+ignore_names = [ 'main' ]
diff --git a/sources/classcore/__/__init__.py b/sources/classcore/__/__init__.py
index ffb55ac..f97cc07 100644
--- a/sources/classcore/__/__init__.py
+++ b/sources/classcore/__/__init__.py
@@ -21,5 +21,13 @@
''' Common constants, imports, and utilities. '''
+from .doctab import *
from .imports import *
from .nomina import *
+
+
+T = typx.TypeVar( 'T', bound = type )
+U = typx.TypeVar( 'U' )
+
+
+dictproxy_empty: cabc.Mapping[ str, str ] = types.MappingProxyType( { } )
diff --git a/sources/classcore/__/doctab.py b/sources/classcore/__/doctab.py
new file mode 100644
index 0000000..fac22bf
--- /dev/null
+++ b/sources/classcore/__/doctab.py
@@ -0,0 +1,70 @@
+# vim: set filetype=python fileencoding=utf-8:
+# -*- coding: utf-8 -*-
+
+#============================================================================#
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+# #
+#============================================================================#
+
+
+''' Docstring fragments. '''
+
+
+from . import imports as __
+
+
+_FragmentsTable: __.typx.TypeAlias = __.cabc.Mapping[ str, str ]
+fragments: _FragmentsTable = __.types.MappingProxyType( {
+
+ 'cfc class conceal':
+ ''' By default, conceals non-public class attributes. ''',
+
+ 'cfc class protect':
+ ''' By default, protects class attributes. ''',
+
+ 'cfc dynadoc': ''' Applies Dynadoc decoration to classes. ''',
+
+ 'cfc instance conceal':
+ ''' Produces classes which can conceal instance attributes. ''',
+
+ 'cfc instance protect':
+ ''' Produces classes which can protect instance attributes. ''',
+
+ 'cfc produce dataclass':
+ ''' Produces inheritable dataclasses with keyword-only instantiation. ''',
+
+ 'cfc produce protocol class':
+ ''' Produces :pep:`544` protocol classes. ''',
+
+ 'class concealment':
+ ''' By default, non-public class attributes are invisible. ''',
+
+ 'class protection':
+ ''' By default, class attributes are immutable. ''',
+
+ 'class instance conceal':
+ ''' By default, conceals non-public instance attributes. ''',
+
+ 'class instance protect':
+ ''' By default, protects instance attributes. ''',
+
+ 'dataclass':
+ ''' Inheritable dataclass with keyword-only instantiation. ''',
+
+ 'protocol class':
+ ''' Protocol class (:pep:`544`). Nominal and structural subtyping. ''',
+
+ 'class dynadoc': ''' Is decorated by Dynadoc. ''',
+
+} )
diff --git a/sources/classcore/__/imports.py b/sources/classcore/__/imports.py
index 58d3bd3..abc9e78 100644
--- a/sources/classcore/__/imports.py
+++ b/sources/classcore/__/imports.py
@@ -18,25 +18,22 @@
#============================================================================#
-''' Common imports and type aliases used throughout the package. '''
+''' Common imports used throughout the package. '''
# ruff: noqa: F401
-from __future__ import annotations
-
import collections.abc as cabc
import dataclasses as dcls
import functools as funct
+import hashlib
import inspect
import platform
import re
import sys
import types
+import dynadoc as ddoc
import typing_extensions as typx
# --- BEGIN: Injected by Copier ---
# --- END: Injected by Copier ---
-
-
-ComparisonResult: typx.TypeAlias = bool | types.NotImplementedType
diff --git a/sources/classcore/__/nomina.py b/sources/classcore/__/nomina.py
index 1343cea..fcf14ac 100644
--- a/sources/classcore/__/nomina.py
+++ b/sources/classcore/__/nomina.py
@@ -18,14 +18,13 @@
#============================================================================#
-''' Catalog of common type aliases. '''
+''' Common names and type aliases. '''
-from __future__ import annotations
-
from . import imports as __
+ComparisonResult: __.typx.TypeAlias = bool | __.types.NotImplementedType
NominativeArguments: __.typx.TypeAlias = __.cabc.Mapping[ str, __.typx.Any ]
PositionalArguments: __.typx.TypeAlias = __.cabc.Sequence[ __.typx.Any ]
diff --git a/sources/classcore/__init__.py b/sources/classcore/__init__.py
index 25701a0..2714dae 100644
--- a/sources/classcore/__init__.py
+++ b/sources/classcore/__init__.py
@@ -18,7 +18,36 @@
#============================================================================#
-''' Foundational class factories and decorators. '''
+''' Foundational class factories and decorators.
+
+ Provides ability to create class decorators and metaclasses
+ with customization hooks. The metaclasses can apply class decorators
+ inline during the class construction and initialization process, properly
+ handling cases where decorators replace classes (e.g.,
+ ``dataclasses.dataclass( slots = True )``). They also backport the repair
+ mechanism from newer versions of CPython to ensure that the class closure
+ cells are rectified on replaced classes, so that zero-argument ``super``
+ calls function correctly in them.
+
+ The ``classcore.standard`` subpackage is an example of the decorators and
+ customization hooks being used to provide a set of practical classes and
+ class decorators. Furthermore, the exception classes in the
+ :py:mod:`classcore.exceptions` module inherit from one of the standard
+ classes, making both the exception classes, themselves, and their
+ instances immutable and concealing their non-public attributes to reduce
+ API noise. I.e., this package "eats its own dog food" and provides
+ practical examples in so doing.
+
+ This package is not as magical as it might seem. It does **not** rely on
+ any ``exec`` or ``eval`` calls and it does **not** do anything with
+ ``ctypes`` or similar surgical instruments. It relies completely on the
+ documented Python data model and the machinery that it provides. While it
+ is true that metaclasses can be tricky, this package is developed with a
+ deep, highly-evolved understanding of them. We seek simplicity over
+ cleverness and maintain robust tests across multiple Python
+ implementations and versions. The package is also very clean in terms of
+ static type checking (via Pyright).
+'''
from . import __
@@ -32,7 +61,9 @@
from .factories import *
-__version__ = '1.0a0'
+__version__: __.typx.Annotated[ str, __.ddoc.Visibilities.Reveal ]
+__version__ = '1.10'
-standard.reclassify_modules( __name__, recursive = True )
+standard.finalize_module(
+ __name__, dynadoc_table = __.fragments, recursive = True )
diff --git a/sources/classcore/decorators.py b/sources/classcore/decorators.py
index ffd0fed..dd2f310 100644
--- a/sources/classcore/decorators.py
+++ b/sources/classcore/decorators.py
@@ -21,17 +21,14 @@
''' Utilities for the decoration of classes, including metaclasses. '''
-from __future__ import annotations
-
from . import __
from . import nomina as _nomina
from . import utilities as _utilities
-_T = __.typx.TypeVar( '_T', bound = type )
-
-
-def apply_decorators( cls: type, decorators: _nomina.Decorators ) -> type:
+def apply_decorators(
+ cls: type[ __.U ], decorators: _nomina.Decorators[ __.U ]
+) -> type:
''' Applies sequence of decorators to class.
If decorators replace classes (e.g., ``dataclass( slots = True )``),
@@ -49,9 +46,9 @@ def apply_decorators( cls: type, decorators: _nomina.Decorators ) -> type:
def decoration_by(
- *decorators: _nomina.Decorator,
- preparers: _nomina.DecorationPreparers = ( ),
-) -> _nomina.Decorator:
+ *decorators: _nomina.Decorator[ __.U ],
+ preparers: _nomina.DecorationPreparers[ __.U ] = ( ),
+) -> _nomina.Decorator[ __.U ]:
''' Class decorator which applies other class decorators.
Useful to apply a stack of decorators as a sequence.
@@ -61,7 +58,7 @@ def decoration_by(
decorators list itself, such as to inject decorators based on
introspection of the class.
'''
- def decorate( cls: type ) -> type:
+ def decorate( cls: type[ __.U ] ) -> type[ __.U ]:
decorators_ = list( decorators )
for preparer in preparers: preparer( cls, decorators_ )
return apply_decorators( cls, decorators_ )
@@ -71,32 +68,52 @@ def decorate( cls: type ) -> type:
def produce_class_construction_decorator(
attributes_namer: _nomina.AttributesNamer,
- constructor: _nomina.ClassConstructor,
-) -> _nomina.Decorator:
+ constructor: _nomina.ClassConstructor[ __.T ],
+) -> _nomina.Decorator[ __.T ]:
''' Produces metaclass decorator to control class construction.
Decorator overrides ``__new__`` on metaclass.
'''
- def decorate( clscls: type[ _T ] ) -> type[ _T ]:
- constructor_name = attributes_namer( 'classes', 'constructor' )
- extant = getattr( clscls, constructor_name, None )
- original = getattr( clscls, '__new__' )
- if extant is original: return clscls
-
- def construct(
- clscls_: type[ _T ],
- name: str,
- bases: tuple[ type, ... ],
- namespace: dict[ str, __.typx.Any ], *,
- decorators: _nomina.Decorators = ( ),
- **arguments: __.typx.Any,
- ) -> type[ object ]:
- return constructor(
- clscls_, original,
- name, bases, namespace, arguments, decorators )
-
- setattr( clscls, constructor_name, construct )
- setattr( clscls, '__new__', construct )
+ def decorate( clscls: type[ __.T ] ) -> type[ __.T ]:
+ original = __.typx.cast(
+ _nomina.ClassConstructorLigation | None,
+ clscls.__dict__.get( '__new__' ) ) # pyright: ignore
+
+ if original is None:
+
+ def construct_with_super(
+ clscls_: type[ __.T ],
+ name: str,
+ bases: tuple[ type, ... ],
+ namespace: dict[ str, __.typx.Any ], *,
+ decorators: _nomina.Decorators[ __.T ] = ( ),
+ **arguments: __.typx.Any,
+ ) -> type[ object ]:
+ superf = super( clscls, clscls_ ).__new__
+ # TODO? Short-circuit if not at start of MRO.
+ return constructor(
+ clscls_, superf,
+ name, bases, namespace, arguments, decorators )
+
+ setattr( clscls, '__new__', construct_with_super )
+
+ else:
+
+ def construct_with_original(
+ clscls_: type[ __.T ],
+ name: str,
+ bases: tuple[ type, ... ],
+ namespace: dict[ str, __.typx.Any ], *,
+ decorators: _nomina.Decorators[ __.T ] = ( ),
+ **arguments: __.typx.Any,
+ ) -> type[ object ]:
+ # TODO? Short-circuit if not at start of MRO.
+ return constructor(
+ clscls_, original,
+ name, bases, namespace, arguments, decorators )
+
+ setattr( clscls, '__new__', construct_with_original )
+
return clscls
return decorate
@@ -105,26 +122,39 @@ def construct(
def produce_class_initialization_decorator(
attributes_namer: _nomina.AttributesNamer,
initializer: _nomina.ClassInitializer,
-) -> _nomina.Decorator:
+) -> _nomina.Decorator[ __.T ]:
''' Produces metaclass decorator to control class initialization.
Decorator overrides ``__init__`` on metaclass.
'''
- def decorate( clscls: type[ _T ] ) -> type[ _T ]:
- initializer_name = attributes_namer( 'classes', 'initializer' )
- extant = getattr( clscls, initializer_name, None )
- original = getattr( clscls, '__init__' )
- if extant is original: return clscls
-
- @__.funct.wraps( original )
- def initialize(
- cls: type, *posargs: __.typx.Any, **nomargs: __.typx.Any
- ) -> None:
- ligation = __.funct.partial( original, cls )
- initializer( cls, ligation, posargs, nomargs )
-
- setattr( clscls, initializer_name, initialize )
- clscls.__init__ = initialize
+ def decorate( clscls: type[ __.T ] ) -> type[ __.T ]:
+ original = __.typx.cast(
+ _nomina.InitializerLigation | None,
+ clscls.__dict__.get( '__init__' ) ) # pyright: ignore
+
+ if original is None:
+
+ def initialize_with_super(
+ cls: type, *posargs: __.typx.Any, **nomargs: __.typx.Any
+ ) -> None:
+ ligation = super( clscls, cls ).__init__
+ # TODO? Short-circuit if not at start of MRO.
+ initializer( cls, ligation, posargs, nomargs )
+
+ clscls.__init__ = initialize_with_super
+
+ else:
+
+ @__.funct.wraps( original )
+ def initialize_with_original(
+ cls: type, *posargs: __.typx.Any, **nomargs: __.typx.Any
+ ) -> None:
+ ligation = __.funct.partial( original, cls )
+ # TODO? Short-circuit if not at start of MRO.
+ initializer( cls, ligation, posargs, nomargs )
+
+ clscls.__init__ = initialize_with_original
+
return clscls
return decorate
diff --git a/sources/classcore/exceptions.py b/sources/classcore/exceptions.py
index 4b3c0b2..0263686 100644
--- a/sources/classcore/exceptions.py
+++ b/sources/classcore/exceptions.py
@@ -21,38 +21,44 @@
''' Exceptions from package. '''
-from __future__ import annotations
-
from . import __
+from . import nomina as _nomina
from . import standard as _standard
-class Omniexception( # pyright: ignore[reportGeneralTypeIssues]
- BaseException, _standard.Object,
- instances_visibles = ( '__cause__', '__context__' ), # pyright: ignore[reportCallIssue]
+exception_mutables_default = (
+ '__cause__', '__context__', '__suppress_context__', '__traceback__' )
+exception_visibles_default = (
+ *exception_mutables_default, _nomina.is_public_identifier )
+
+
+class Omniexception(
+ _standard.Object, BaseException,
+ instances_mutables = exception_mutables_default,
+ instances_visibles = exception_visibles_default,
):
''' Base exception for package. '''
-class Omnierror( Exception, Omniexception ):
+class Omnierror( Omniexception, Exception ):
''' Base error for package. '''
-class AttributeImmutability( AttributeError, Omnierror ):
+class AttributeImmutability( Omnierror, AttributeError ):
def __init__( self, name: str, target: str ):
super( ).__init__(
f"Could not assign or delete attribute {name!r} on {target}." )
-class BehaviorExclusionInvalidity( TypeError, ValueError, Omnierror ):
+class BehaviorExclusionInvalidity( Omnierror, TypeError, ValueError ):
def __init__( self, verifier: __.typx.Any ):
super( ).__init__(
f"Invalid behavior exclusion verifier: {verifier!r}" )
-class ErrorProvideFailure( RuntimeError, Omnierror ):
+class ErrorProvideFailure( Omnierror, RuntimeError ):
def __init__( self, name: str, reason: str ):
super( ).__init__(
diff --git a/sources/classcore/factories.py b/sources/classcore/factories.py
index b66853c..7ebbdd6 100644
--- a/sources/classcore/factories.py
+++ b/sources/classcore/factories.py
@@ -21,49 +21,46 @@
''' Factories which produce metaclass implementations. '''
-from __future__ import annotations
-
from . import __
from . import decorators as _decorators
from . import nomina as _nomina
from . import utilities as _utilities
-_T = __.typx.TypeVar( '_T', bound = type )
-
-
def produce_class_constructor(
attributes_namer: _nomina.AttributesNamer,
- preprocessors: _nomina.ProduceConstructorPreprocsArgument = ( ),
- postprocessors: _nomina.ProduceConstructorPostprocsArgument = ( ),
-) -> _nomina.ClassConstructor:
+ preprocessors: _nomina.ClassConstructionPreprocessors[ __.T ] = ( ),
+ postprocessors: _nomina.ClassConstructionPostprocessors[ __.T ] = ( ),
+) -> _nomina.ClassConstructor[ __.T ]:
''' Produces constructors for classes. '''
def construct( # noqa: PLR0913
- clscls: type[ _T ],
+ clscls: type[ __.T ],
superf: _nomina.ClassConstructorLigation,
name: str,
bases: tuple[ type, ... ],
namespace: dict[ str, __.typx.Any ],
arguments: __.NominativeArguments,
- decorators: _nomina.Decorators,
+ decorators: _nomina.Decorators[ __.T ],
) -> type:
''' Constructs class, applying decorators and hooks. '''
bases_ = list( bases )
arguments_ = dict( arguments )
decorators_ = list( decorators )
- for prep in preprocessors:
- prep( clscls, name, bases_, namespace, arguments_, decorators_ )
+ for preprocessor in preprocessors:
+ preprocessor(
+ clscls, name, bases_, namespace, arguments_, decorators_ )
cls = superf( clscls, name, tuple( bases_ ), namespace, **arguments_ )
# Some decorators create new classes, which invokes this method again.
# Short-circuit to prevent recursive decoration and other tangles.
progress_name = attributes_namer( 'class', 'in_progress' )
- in_progress = _utilities.getattr0( cls, progress_name, False )
+ progress_name_m = _utilities.mangle_name( cls, progress_name )
+ in_progress = getattr( cls, progress_name_m, False )
if in_progress: return cls
- setattr( cls, progress_name, True )
- for postp in postprocessors: postp( cls, decorators_ )
+ setattr( cls, progress_name_m, True )
+ for postprocessor in postprocessors: postprocessor( cls, decorators_ )
cls = _decorators.apply_decorators( cls, decorators_ )
- setattr( cls, progress_name, False )
+ setattr( cls, progress_name_m, False )
return cls
return construct
@@ -71,7 +68,7 @@ def construct( # noqa: PLR0913
def produce_class_initializer(
attributes_namer: _nomina.AttributesNamer,
- completers: _nomina.ProduceInitializerCompletersArgument = ( ),
+ completers: _nomina.ClassInitializationCompleters = ( ),
) -> _nomina.ClassInitializer:
''' Produces initializers for classes. '''
@@ -84,9 +81,10 @@ def initialize(
''' Initializes class, applying hooks. '''
superf( *posargs, **nomargs )
progress_name = attributes_namer( 'class', 'in_progress' )
- in_progress = _utilities.getattr0( cls, progress_name, False )
+ progress_name_m = _utilities.mangle_name( cls, progress_name )
+ in_progress = getattr( cls, progress_name_m, False )
if in_progress: return # If non-empty, then not top-level.
- delattr( cls, progress_name )
+ delattr( cls, progress_name_m )
for completer in completers: completer( cls )
return initialize
diff --git a/sources/classcore/nomina.py b/sources/classcore/nomina.py
index d97364b..f19d467 100644
--- a/sources/classcore/nomina.py
+++ b/sources/classcore/nomina.py
@@ -21,26 +21,71 @@
''' Catalog of common type aliases. '''
-from __future__ import annotations
-
from . import __
-AttributesNamer: __.typx.TypeAlias = (
- __.cabc.Callable[ [ str, str ], str ] )
+AttributesNamer: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Callable[ [ str, str ], str ],
+ __.ddoc.Doc(
+ ''' Names attribute from level and core arguments.
+
+ Level will be one of 'class', 'instances', or 'instance'.
+ Core will be the core of the name as supplied this package.
+
+ Can be used by downstream packages to determine names of
+ bookkeeping attributes assigned by this package.
+ ''' ),
+]
+
+Decorator: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Callable[ [ type[ __.U ] ], type[ __.U ] ],
+ __.ddoc.Doc(
+ ''' Class decorator.
+
+ Takes class argument and returns class.
+ ''' ),
+]
+Decorators: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Sequence[ Decorator[ __.U ] ],
+ __.ddoc.Doc(
+ ''' Sequence of class decorators.
+
+ Each element takes a class argument and returns a class.
+ ''' ),
+]
+DecoratorsMutable: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.MutableSequence[ Decorator[ __.U ] ],
+ __.ddoc.Doc(
+ ''' Sequence of class decorators.
+
+ Each element takes a class argument and returns a class.
-Decorator: __.typx.TypeAlias = __.cabc.Callable[ [ type ], type ]
-Decorators: __.typx.TypeAlias = __.cabc.Sequence[ Decorator ]
-DecoratorsMutable: __.typx.TypeAlias = __.cabc.MutableSequence[ Decorator ]
+ Decorators may be inserted or removed from sequence.
+ ''' ),
+]
-DecorationPreparer: __.typx.TypeAlias = (
- __.cabc.Callable[ [ type, DecoratorsMutable ], None ] )
-DecorationPreparers: __.typx.TypeAlias = (
- __.cabc.Sequence[ DecorationPreparer ] )
+DecorationPreparer: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Callable[ [ type[ __.U ], DecoratorsMutable[ __.U ] ], None ],
+ __.ddoc.Doc(
+ ''' Class decoration preparer.
+
+ Takes class and mutable sequence of decorators as arguments.
+ Can alter the sequence.
+ ''' ),
+]
+DecorationPreparers: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Sequence[ DecorationPreparer[ __.U ] ],
+ __.ddoc.Doc(
+ ''' Sequence of class decoration preparers.
+
+ Each element takes class and mutable sequence of decorators as
+ arguments. And, each element can alter the sequence.
+ ''' ),
+]
ClassConstructorLigation: __.typx.TypeAlias = __.typx.Annotated[
__.cabc.Callable[ ..., type ],
- __.typx.Doc(
+ __.ddoc.Doc(
''' Bound class constructor function.
Usually from ``super( ).__new__`` or a partial function.
@@ -48,7 +93,7 @@
]
InitializerLigation: __.typx.TypeAlias = __.typx.Annotated[
__.cabc.Callable[ ..., None ],
- __.typx.Doc(
+ __.ddoc.Doc(
''' Bound initializer function.
Usually from ``super( ).__init__`` or a partial function.
@@ -56,7 +101,7 @@
]
AssignerLigation: __.typx.TypeAlias = __.typx.Annotated[
__.cabc.Callable[ [ str, __.typx.Any ], None ],
- __.typx.Doc(
+ __.ddoc.Doc(
''' Bound attributes assigner function.
Usually from ``super( ).__setattr__`` or a partial function.
@@ -64,7 +109,7 @@
]
DeleterLigation: __.typx.TypeAlias = __.typx.Annotated[
__.cabc.Callable[ [ str ], None ],
- __.typx.Doc(
+ __.ddoc.Doc(
''' Bound attributes deleter function.
Usually from ``super( ).__delattr__`` or a partial function.
@@ -72,7 +117,7 @@
]
SurveyorLigation: __.typx.TypeAlias = __.typx.Annotated[
__.cabc.Callable[ [ ], __.cabc.Iterable[ str ] ],
- __.typx.Doc(
+ __.ddoc.Doc(
''' Bound attributes surveyor function.
Usually from ``super( ).__dir__`` or a partial function.
@@ -88,34 +133,48 @@
list[ type ], # bases (mutable)
dict[ str, __.typx.Any ], # namespace (mutable)
dict[ str, __.typx.Any ], # arguments (mutable)
- DecoratorsMutable, # decorators (mutable)
+ DecoratorsMutable[ __.U ], # decorators (mutable)
],
None
],
- __.typx.Doc(
+ __.ddoc.Doc(
''' Processes class data before construction.
For use cases, such as argument conversion.
''' ),
]
+ClassConstructionPreprocessors: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Sequence[ ClassConstructionPreprocessor[ __.U ] ],
+ __.ddoc.Doc( ''' Processors to apply before construction of class. ''' ),
+]
ClassConstructionPostprocessor: __.typx.TypeAlias = __.typx.Annotated[
- __.cabc.Callable[ [ type, DecoratorsMutable ], None ],
- __.typx.Doc(
+ __.cabc.Callable[ [ type, DecoratorsMutable[ __.U ] ], None ],
+ __.ddoc.Doc(
''' Processes class before decoration.
For use cases, such as decorator list manipulation.
''' ),
]
+ClassConstructionPostprocessors: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Sequence[ ClassConstructionPostprocessor[ __.U ] ],
+ __.ddoc.Doc(
+ ''' Processors to apply before decoration of class. ''' ),
+]
# TODO: ClassInitializationPreparer (arguments mutation)
ClassInitializationCompleter: __.typx.TypeAlias = __.typx.Annotated[
__.cabc.Callable[ [ type ], None ],
- __.typx.Doc(
+ __.ddoc.Doc(
''' Completes initialization of class.
For use cases, such as enabling immutability once all other
initialization has occurred.
''' ),
]
+ClassInitializationCompleters: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Sequence[ ClassInitializationCompleter ],
+ __.ddoc.Doc(
+ ''' Processors to apply at final stage of class initialization. ''' ),
+]
ClassConstructor: __.typx.TypeAlias = __.typx.Annotated[
@@ -127,11 +186,11 @@
tuple[ type, ... ],
dict[ str, __.typx.Any ],
__.NominativeArguments,
- Decorators,
+ Decorators[ __.U ],
],
type
],
- __.typx.Doc( ''' Constructor to use with metaclass. ''' ),
+ __.ddoc.Doc( ''' Constructor to use with metaclass. ''' ),
]
ClassInitializer: __.typx.TypeAlias = __.typx.Annotated[
__.cabc.Callable[
@@ -143,20 +202,10 @@
],
None
],
- __.typx.Doc( ''' Initializer to use with metaclass. ''' ),
+ __.ddoc.Doc( ''' Initializer to use with metaclass. ''' ),
]
-ProduceConstructorPreprocsArgument: __.typx.TypeAlias = __.typx.Annotated[
- __.cabc.Sequence[ ClassConstructionPreprocessor ],
- __.typx.Doc( ''' Processors to apply before construction of class. ''' ),
-]
-ProduceConstructorPostprocsArgument: __.typx.TypeAlias = __.typx.Annotated[
- __.cabc.Sequence[ ClassConstructionPostprocessor ],
- __.typx.Doc( ''' Processors to apply before decoration of class. ''' ),
-]
-ProduceInitializerCompletersArgument: __.typx.TypeAlias = __.typx.Annotated[
- __.cabc.Sequence[ ClassInitializationCompleter ],
- __.typx.Doc(
- ''' Processors to apply at final stage of class initialization. ''' ),
-]
+def is_public_identifier( name: str ) -> bool:
+ ''' Is Python identifier public? '''
+ return not name.startswith( '_' )
diff --git a/sources/classcore/standard/__.py b/sources/classcore/standard/__.py
index a0b55ce..81cff04 100644
--- a/sources/classcore/standard/__.py
+++ b/sources/classcore/standard/__.py
@@ -20,15 +20,11 @@
''' Common constants, imports, and utilities. '''
+# ruff: noqa: F403
-from __future__ import annotations
-from ..__ import * # noqa: F403
-
-
-def is_public_identifier( name: str ) -> bool:
- ''' Is Python identifier public? '''
- return not name.startswith( '_' )
+from ..__ import *
+from ..nomina import is_public_identifier
def provide_error_class( name: str ) -> type[ Exception ]:
diff --git a/sources/classcore/standard/__init__.py b/sources/classcore/standard/__init__.py
index 64f32fb..30a8576 100644
--- a/sources/classcore/standard/__init__.py
+++ b/sources/classcore/standard/__init__.py
@@ -18,10 +18,34 @@
#============================================================================#
-''' Decorators and class factories providing concealment and immutability. '''
+''' Decorators and class factories providing concealment and immutability.
+ Concealment restricts the visibility of attributes on classes and their
+ instances. By default, only public attributes (ones which do not start with
+ ``_``) are revealed for :py:func:`dir` calls. This behavior can be
+ overriden by supplying visibility verifiers as a decorator factory
+ argument or metaclass argument. These can be a sequence of attribute
+ names, regular expression :py:class:`re.Pattern` objects which match
+ attribute names, or predicate functions which match attribute names. Or,
+ total visibility (per the Python default) can be achieved by supplying
+ ``visibles = '*'`` instead of a sequence of verifiers.
-from __future__ import annotations
+ Immutability prevents assignment (including reassignment) or deletion of
+ attrubtes on classes and their instances after they have been completely
+ initialized. In addition to any standard Python class, this can be applied
+ to dataclasses, allowing them to use ``__post_init__`` to set attributes,
+ which ``dataclasses.dataclass( frozen = True )`` prevents. The
+ immutability behavior can be overridden by supplying mutability verifiers
+ as a decorator factory argument or metaclass argument. These behave
+ similarly to the visibility verifiers described above.
+
+ Hooks to modify the concealment and immutability behaviors are also
+ available.
+'''
+
+
+from . import dynadoc
+from . import nomina
from .classes import *
from .decorators import *
diff --git a/sources/classcore/standard/behaviors.py b/sources/classcore/standard/behaviors.py
index a136b8c..3d9fb30 100644
--- a/sources/classcore/standard/behaviors.py
+++ b/sources/classcore/standard/behaviors.py
@@ -23,15 +23,29 @@
# Maybe enum for mutability and visibility.
-from __future__ import annotations
-
from .. import utilities as _utilities
from . import __
from . import nomina as _nomina
-concealment_label = 'concealment'
-immutability_label = 'immutability'
+def access_core_function( # noqa: PLR0913
+ cls: type, /, *,
+ attributes_namer: _nomina.AttributesNamer,
+ arguments: __.cabc.Mapping[ str, __.typx.Any ],
+ level: str,
+ name: str,
+ default: __.cabc.Callable[ ..., __.typx.Any ],
+) -> __.cabc.Callable[ ..., __.typx.Any ]:
+ ''' Accesses core behavior function.
+
+ First checks for override argument, then checks for heritable
+ attribute. Finally, falls back to provided default.
+ '''
+ argument_name = f"{level}_{name}_core"
+ attribute_name = attributes_namer( level, f"{name}_core" )
+ return (
+ arguments.get( argument_name )
+ or getattr( cls, attribute_name, default ) )
def assign_attribute_if_mutable( # noqa: PLR0913
@@ -43,10 +57,11 @@ def assign_attribute_if_mutable( # noqa: PLR0913
name: str,
value: __.typx.Any,
) -> None:
+ ''' Assigns attribute if it is mutable, else raises error. '''
leveli = 'instance' if level == 'instances' else level
behaviors_name = attributes_namer( leveli, 'behaviors' )
behaviors = _utilities.getattr0( obj, behaviors_name, frozenset( ) )
- if immutability_label not in behaviors:
+ if _nomina.immutability_label not in behaviors:
ligation( name, value )
return
names_name = attributes_namer( level, 'mutables_names' )
@@ -83,10 +98,11 @@ def delete_attribute_if_mutable( # noqa: PLR0913
level: str,
name: str,
) -> None:
+ ''' Deletes attribute if it is mutable, else raises error. '''
leveli = 'instance' if level == 'instances' else level
behaviors_name = attributes_namer( leveli, 'behaviors' )
behaviors = _utilities.getattr0( obj, behaviors_name, frozenset( ) )
- if immutability_label not in behaviors:
+ if _nomina.immutability_label not in behaviors:
ligation( name )
return
names_name = attributes_namer( level, 'mutables_names' )
@@ -121,11 +137,12 @@ def survey_visible_attributes(
attributes_namer: _nomina.AttributesNamer,
level: str,
) -> __.cabc.Iterable[ str ]:
+ ''' Returns sequence of visible attributes. '''
names_base = ligation( )
leveli = 'instance' if level == 'instances' else level
behaviors_name = attributes_namer( leveli, 'behaviors' )
behaviors = _utilities.getattr0( obj, behaviors_name, frozenset( ) )
- if concealment_label not in behaviors: return names_base
+ if _nomina.concealment_label not in behaviors: return names_base
names_name = attributes_namer( level, 'visibles_names' )
names: _nomina.BehaviorExclusionNamesOmni = (
getattr( obj, names_name, frozenset( ) ) )
@@ -154,6 +171,30 @@ def survey_visible_attributes(
return names_
+def augment_class_attributes_allocations(
+ attributes_namer: _nomina.AttributesNamer,
+ namespace: dict[ str, __.typx.Any ],
+) -> None:
+ ''' Adds necessary slots for record-keeping attributes. '''
+ behaviors_name = attributes_namer( 'instance', 'behaviors' )
+ slots: __.typx.Union[
+ __.cabc.Mapping[ str, __.typx.Any ],
+ __.cabc.Sequence[ str ],
+ None
+ ] = namespace.get( '__slots__' )
+ if slots and behaviors_name in slots: return
+ if isinstance( slots, __.cabc.Mapping ):
+ slots_ = dict( slots )
+ slots_[ behaviors_name ] = 'Active behaviors.'
+ slots_ = __.types.MappingProxyType( slots_ )
+ elif isinstance( slots, __.cabc.Sequence ):
+ slots_ = list( slots )
+ slots_.append( behaviors_name )
+ slots_ = tuple( slots_ )
+ else: return # pragma: no cover
+ namespace[ '__slots__' ] = slots_
+
+
def classify_behavior_exclusion_verifiers(
verifiers: _nomina.BehaviorExclusionVerifiers
) -> tuple[
@@ -161,6 +202,7 @@ def classify_behavior_exclusion_verifiers(
_nomina.BehaviorExclusionRegexes,
_nomina.BehaviorExclusionPredicates,
]:
+ ''' Threshes sequence of behavior exclusion verifiers into bins. '''
names: set[ str ] = set( )
regexes: list[ __.re.Pattern[ str ] ] = [ ]
predicates: list[ __.cabc.Callable[ ..., bool ] ] = [ ]
@@ -179,7 +221,8 @@ def classify_behavior_exclusion_verifiers(
def produce_class_construction_preprocessor(
attributes_namer: _nomina.AttributesNamer
-) -> _nomina.ClassConstructionPreprocessor:
+) -> _nomina.ClassConstructionPreprocessor[ __.U ]:
+ ''' Produces construction processor which handles metaclass arguments. '''
def preprocess( # noqa: PLR0913
clscls: type,
@@ -187,31 +230,50 @@ def preprocess( # noqa: PLR0913
bases: list[ type ],
namespace: dict[ str, __.typx.Any ],
arguments: dict[ str, __.typx.Any ],
- decorators: _nomina.DecoratorsMutable,
+ decorators: _nomina.DecoratorsMutable[ __.U ],
) -> None:
record_class_construction_arguments(
attributes_namer, namespace, arguments )
+ if '__slots__' in namespace:
+ augment_class_attributes_allocations( attributes_namer, namespace )
return preprocess
def produce_class_construction_postprocessor(
- attributes_namer: _nomina.AttributesNamer
-) -> _nomina.ClassConstructionPostprocessor:
+ attributes_namer: _nomina.AttributesNamer,
+ error_class_provider: _nomina.ErrorClassProvider,
+) -> _nomina.ClassConstructionPostprocessor[ __.U ]:
+ ''' Produces construction processor which determines class decorators. '''
arguments_name = attributes_namer( 'class', 'construction_arguments' )
+ cores_default = dict(
+ assigner = assign_attribute_if_mutable,
+ deleter = delete_attribute_if_mutable,
+ surveyor = survey_visible_attributes )
def postprocess(
- cls: type, decorators: _nomina.DecoratorsMutable
+ cls: type, decorators: _nomina.DecoratorsMutable[ __.U ]
) -> None:
arguments = getattr( cls, arguments_name, { } )
+ clscls = type( cls )
dcls_spec = getattr( cls, '__dataclass_transform__', None )
if not dcls_spec: # either base class or metaclass may be marked
- clscls = type( cls )
dcls_spec = getattr( clscls, '__dataclass_transform__', None )
+ cores = { }
+ for core_name in ( 'assigner', 'deleter', 'surveyor' ):
+ core_function = access_core_function(
+ cls,
+ attributes_namer = attributes_namer,
+ arguments = arguments,
+ level = 'instances', name = core_name,
+ default = cores_default[ core_name ] )
+ cores[ core_name ] = core_function
instances_mutables = arguments.get(
'instances_mutables', __.mutables_default )
instances_visibles = arguments.get(
'instances_visibles', __.visibles_default )
+ instances_ignore_init_arguments = arguments.get(
+ 'instances_ignore_init_arguments', False )
if dcls_spec and dcls_spec.get( 'kw_only_default', False ):
from .decorators import dataclass_with_standard_behaviors
decorator_factory = dataclass_with_standard_behaviors
@@ -220,9 +282,27 @@ def postprocess(
else:
from .decorators import with_standard_behaviors
decorator_factory = with_standard_behaviors
- decorator = decorator_factory(
- mutables = instances_mutables, visibles = instances_visibles )
+ decorator: _nomina.Decorator[ __.U ] = decorator_factory(
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ assigner_core = __.typx.cast(
+ _nomina.AssignerCore, cores[ 'assigner' ] ),
+ deleter_core = __.typx.cast(
+ _nomina.DeleterCore, cores[ 'deleter' ] ),
+ surveyor_core = __.typx.cast(
+ _nomina.SurveyorCore, cores[ 'surveyor' ] ),
+ ignore_init_arguments = instances_ignore_init_arguments,
+ mutables = instances_mutables,
+ visibles = instances_visibles )
decorators.append( decorator )
+ # Dynadoc tracks objects in weakset.
+ # Must decorate after any potential class replacements.
+ dynadoc_cfg = arguments.get( 'dynadoc_configuration', { } )
+ if not dynadoc_cfg: # either metaclass argument or attribute
+ dynadoc_cfg_name = (
+ attributes_namer( 'classes', 'dynadoc_configuration' ) )
+ dynadoc_cfg = getattr( clscls, dynadoc_cfg_name, { } )
+ decorators.append( __.ddoc.with_docstring( **dynadoc_cfg ) )
return postprocess
@@ -230,6 +310,7 @@ def postprocess(
def produce_class_initialization_completer(
attributes_namer: _nomina.AttributesNamer
) -> _nomina.ClassInitializationCompleter:
+ ''' Produces initialization completer which finalizes class behaviors. '''
arguments_name = attributes_namer( 'class', 'construction_arguments' )
def complete( cls: type ) -> None:
@@ -243,15 +324,16 @@ def complete( cls: type ) -> None:
record_behavior(
cls, attributes_namer = attributes_namer,
level = 'class', basename = 'mutables',
- label = immutability_label, behaviors = behaviors,
+ label = _nomina.immutability_label, behaviors = behaviors,
verifiers = mutables )
record_behavior(
cls, attributes_namer = attributes_namer,
level = 'class', basename = 'visibles',
- label = concealment_label, behaviors = behaviors,
+ label = _nomina.concealment_label, behaviors = behaviors,
verifiers = visibles )
# Set behaviors attribute last since it enables enforcement.
- setattr( cls, attributes_namer( 'class', 'behaviors' ), behaviors )
+ behaviors_name = attributes_namer( 'class', 'behaviors' )
+ _utilities.setattr0( cls, behaviors_name, frozenset( behaviors ) )
return complete
@@ -265,6 +347,7 @@ def record_behavior( # noqa: PLR0913
behaviors: set[ str ],
verifiers: _nomina.BehaviorExclusionVerifiersOmni,
) -> None:
+ ''' Records details of particular class behavior, such as immutability. '''
names_name = attributes_namer( level, f"{basename}_names" )
if verifiers == '*':
setattr( cls, names_name, '*' )
@@ -297,6 +380,7 @@ def record_class_construction_arguments(
namespace: dict[ str, __.typx.Any ],
arguments: dict[ str, __.typx.Any ],
) -> None:
+ ''' Captures metaclass arguments as class attribute for later use. '''
arguments_name = attributes_namer( 'class', 'construction_arguments' )
arguments_ = namespace.get( arguments_name, { } )
# Decorators, which replace classes, will cause construction of the
@@ -306,6 +390,11 @@ def record_class_construction_arguments(
arguments_ = { }
for name in (
'class_mutables', 'class_visibles',
+ 'dynadoc_configuration',
+ 'instances_assigner_core',
+ 'instances_deleter_core',
+ 'instances_surveyor_core',
+ 'instances_ignore_init_arguments',
'instances_mutables', 'instances_visibles',
):
if name not in arguments: continue
diff --git a/sources/classcore/standard/classes.py b/sources/classcore/standard/classes.py
index 76f46b0..b68285e 100644
--- a/sources/classcore/standard/classes.py
+++ b/sources/classcore/standard/classes.py
@@ -19,74 +19,260 @@
''' Standard classes and class factories. '''
-# TODO? ClassMutable and ProtocolClassMutable
-# Need inheritance of omnimutability and omnivisibility.
-from __future__ import annotations
-
from . import __
from . import decorators as _decorators
+from . import dynadoc as _dynadoc
+from . import nomina as _nomina
-@_decorators.decoration_by( *_decorators.class_factory_decorators )
-class Class( type ): pass
+_dynadoc_configuration = (
+ _dynadoc.produce_dynadoc_configuration( table = __.fragments ) )
+_class_factory = __.funct.partial(
+ _decorators.class_factory, dynadoc_configuration = _dynadoc_configuration )
-@_decorators.decoration_by( *_decorators.class_factory_decorators )
-@__.typx.dataclass_transform( frozen_default = True, kw_only_default = True )
-class Dataclass( type ): pass
+class ClassFactoryExtraArguments( __.typx.TypedDict, total = False ):
+ ''' Extra arguments accepted by standard metaclasses. '''
+ class_mutables: _nomina.BehaviorExclusionVerifiersOmni
+ class_visibles: _nomina.BehaviorExclusionVerifiersOmni
+ dynadoc_configuration: _nomina.DynadocConfiguration
+ instances_assigner_core: _nomina.AssignerCore
+ instances_deleter_core: _nomina.DeleterCore
+ instances_surveyor_core: _nomina.SurveyorCore
+ instances_ignore_init_arguments: bool
+ instances_mutables: _nomina.BehaviorExclusionVerifiersOmni
+ instances_visibles: _nomina.BehaviorExclusionVerifiersOmni
-@_decorators.decoration_by( *_decorators.class_factory_decorators )
-@__.typx.dataclass_transform( kw_only_default = True )
-class DataclassMutable( type ): pass
+@_class_factory( )
+class Class( type ):
+ ''' Metaclass for standard classes. '''
+
+ _dynadoc_fragments_ = (
+ 'cfc class conceal', 'cfc class protect', 'cfc dynadoc',
+ 'cfc instance conceal', 'cfc instance protect' )
-@_decorators.decoration_by( *_decorators.class_factory_decorators )
-class ProtocolClass( type( __.typx.Protocol ) ): pass
+ def __new__( # Typechecker stub.
+ clscls: type[ __.T ],
+ name: str,
+ bases: tuple[ type, ... ],
+ namespace: dict[ str, __.typx.Any ], *,
+ decorators: _nomina.Decorators[ __.T ] = ( ),
+ **arguments: __.typx.Unpack[ ClassFactoryExtraArguments ],
+ ) -> __.T:
+ return super( ).__new__( clscls, name, bases, namespace )
-@_decorators.decoration_by( *_decorators.class_factory_decorators )
+@_class_factory( )
@__.typx.dataclass_transform( frozen_default = True, kw_only_default = True )
-class ProtocolDataclass( type( __.typx.Protocol ) ): pass
+class Dataclass( type ):
+ ''' Metaclass for standard dataclasses. '''
+
+ _dynadoc_fragments_ = (
+ 'cfc produce dataclass',
+ 'cfc class conceal', 'cfc class protect', 'cfc dynadoc',
+ 'cfc instance conceal', 'cfc instance protect' )
+
+ def __new__( # Typechecker stub.
+ clscls: type[ __.T ],
+ name: str,
+ bases: tuple[ type, ... ],
+ namespace: dict[ str, __.typx.Any ], *,
+ decorators: _nomina.Decorators[ __.T ] = ( ),
+ **arguments: __.typx.Unpack[ ClassFactoryExtraArguments ],
+ ) -> __.T:
+ return super( ).__new__( clscls, name, bases, namespace )
+
+
+@_class_factory( )
+@__.typx.dataclass_transform( kw_only_default = True )
+class DataclassMutable( type ):
+ ''' Metaclass for dataclasses with mutable instance attributes. '''
+
+ _dynadoc_fragments_ = (
+ 'cfc produce dataclass',
+ 'cfc class conceal', 'cfc class protect', 'cfc dynadoc',
+ 'cfc instance conceal' )
+
+ def __new__( # Typechecker stub.
+ clscls: type[ __.T ],
+ name: str,
+ bases: tuple[ type, ... ],
+ namespace: dict[ str, __.typx.Any ], *,
+ decorators: _nomina.Decorators[ __.T ] = ( ),
+ **arguments: __.typx.Unpack[ ClassFactoryExtraArguments ],
+ ) -> __.T:
+ return super( ).__new__( clscls, name, bases, namespace )
+
+
+@_class_factory( )
+class ProtocolClass( type( __.typx.Protocol ) ):
+ ''' Metaclass for standard protocol classes. '''
+
+ _dynadoc_fragments_ = (
+ 'cfc produce protocol class',
+ 'cfc class conceal', 'cfc class protect', 'cfc dynadoc',
+ 'cfc instance conceal', 'cfc instance protect' )
+
+ def __new__( # Typechecker stub.
+ clscls: type[ __.T ],
+ name: str,
+ bases: tuple[ type, ... ],
+ namespace: dict[ str, __.typx.Any ], *,
+ decorators: _nomina.Decorators[ __.T ] = ( ),
+ **arguments: __.typx.Unpack[ ClassFactoryExtraArguments ],
+ ) -> __.T:
+ return super( ).__new__( clscls, name, bases, namespace )
+
+
+@_class_factory( )
+@__.typx.dataclass_transform( frozen_default = True, kw_only_default = True )
+class ProtocolDataclass( type( __.typx.Protocol ) ):
+ ''' Metaclass for standard protocol dataclasses. '''
+
+ _dynadoc_fragments_ = (
+ 'cfc produce protocol class', 'cfc produce dataclass',
+ 'cfc class conceal', 'cfc class protect', 'cfc dynadoc',
+ 'cfc instance conceal', 'cfc instance protect' )
+
+ def __new__( # Typechecker stub.
+ clscls: type[ __.T ],
+ name: str,
+ bases: tuple[ type, ... ],
+ namespace: dict[ str, __.typx.Any ], *,
+ decorators: _nomina.Decorators[ __.T ] = ( ),
+ **arguments: __.typx.Unpack[ ClassFactoryExtraArguments ],
+ ) -> __.T:
+ return super( ).__new__( clscls, name, bases, namespace )
+
+
+@_class_factory( )
+@__.typx.dataclass_transform( kw_only_default = True )
+class ProtocolDataclassMutable( type( __.typx.Protocol ) ):
+ ''' Metaclass for protocol dataclasses with mutable instance attributes.
+ '''
+ _dynadoc_fragments_ = (
+ 'cfc produce protocol class', 'cfc produce dataclass',
+ 'cfc class conceal', 'cfc class protect', 'cfc dynadoc',
+ 'cfc instance conceal' )
+
+ def __new__( # Typechecker stub.
+ clscls: type[ __.T ],
+ name: str,
+ bases: tuple[ type, ... ],
+ namespace: dict[ str, __.typx.Any ], *,
+ decorators: _nomina.Decorators[ __.T ] = ( ),
+ **arguments: __.typx.Unpack[ ClassFactoryExtraArguments ],
+ ) -> __.T:
+ return super( ).__new__( clscls, name, bases, namespace )
-@_decorators.decoration_by( *_decorators.class_factory_decorators )
-@__.typx.dataclass_transform( kw_only_default = True )
-class ProtocolDataclassMutable( type( __.typx.Protocol ) ): pass
+class Object( metaclass = Class ):
+ ''' Standard base class. '''
-class Object( metaclass = Class ): pass
+ _dynadoc_fragments_ = (
+ 'class concealment', 'class protection', 'class dynadoc',
+ 'class instance conceal', 'class instance protect' )
-class ObjectMutable( # pyright: ignore[reportGeneralTypeIssues]
- metaclass = Class,
- instances_mutables = '*', # pyright: ignore[reportCallIssue]
-): pass
+class ObjectMutable( metaclass = Class, instances_mutables = '*' ):
+ ''' Base class with mutable instance attributes. '''
+ _dynadoc_fragments_ = (
+ 'class concealment', 'class protection', 'class dynadoc',
+ 'class instance conceal' )
-class DataclassObject( metaclass = Dataclass ): pass
+class DataclassObject( metaclass = Dataclass ):
+ ''' Standard base dataclass. '''
-class DataclassObjectMutable( metaclass = DataclassMutable ): pass
+ _dynadoc_fragments_ = (
+ 'dataclass',
+ 'class concealment', 'class protection', 'class dynadoc',
+ 'class instance conceal', 'class instance protect' )
-class Protocol( __.typx.Protocol, metaclass = ProtocolClass ): pass
+class DataclassObjectMutable( metaclass = DataclassMutable ):
+ ''' Base dataclass with mutable instance attributes. '''
+ _dynadoc_fragments_ = (
+ 'dataclass',
+ 'class concealment', 'class protection', 'class dynadoc',
+ 'class instance conceal' )
-class ProtocolMutable( # pyright: ignore[reportGeneralTypeIssues]
+
+class Protocol(
__.typx.Protocol,
metaclass = ProtocolClass,
- instances_mutables = '*', # pyright: ignore[reportCallIssue]
-): pass
+ class_mutables = (
+ '_abc_cache',
+ '_abc_negative_cache',
+ '_abc_negative_cache_version',
+ '_abc_registry',
+ ),
+):
+ ''' Standard base protocol class. '''
+
+ _dynadoc_fragments_ = (
+ 'protocol class',
+ 'class concealment', 'class protection', 'class dynadoc',
+ 'class instance conceal', 'class instance protect' )
+
+
+class ProtocolMutable(
+ __.typx.Protocol,
+ metaclass = ProtocolClass,
+ class_mutables = (
+ '_abc_cache',
+ '_abc_negative_cache',
+ '_abc_negative_cache_version',
+ '_abc_registry',
+ ),
+ instances_mutables = '*',
+):
+ ''' Base protocol class with mutable instance attributes. '''
+
+ _dynadoc_fragments_ = (
+ 'protocol class',
+ 'class concealment', 'class protection', 'class dynadoc',
+ 'class instance conceal' )
class DataclassProtocol(
- __.typx.Protocol, metaclass = ProtocolDataclass,
-): pass
+ __.typx.Protocol,
+ metaclass = ProtocolDataclass,
+ class_mutables = (
+ '_abc_cache',
+ '_abc_negative_cache',
+ '_abc_negative_cache_version',
+ '_abc_registry',
+ ),
+):
+ ''' Standard base protocol dataclass. '''
+
+ _dynadoc_fragments_ = (
+ 'dataclass', 'protocol class',
+ 'class concealment', 'class protection', 'class dynadoc',
+ 'class instance conceal', 'class instance protect' )
class DataclassProtocolMutable(
- __.typx.Protocol, metaclass = ProtocolDataclassMutable,
-): pass
+ __.typx.Protocol,
+ metaclass = ProtocolDataclassMutable,
+ class_mutables = (
+ '_abc_cache',
+ '_abc_negative_cache',
+ '_abc_negative_cache_version',
+ '_abc_registry',
+ ),
+):
+ ''' Base protocol dataclass with mutable instance attributes. '''
+
+ _dynadoc_fragments_ = (
+ 'dataclass', 'protocol class',
+ 'class concealment', 'class protection', 'class dynadoc',
+ 'class instance conceal' )
diff --git a/sources/classcore/standard/decorators.py b/sources/classcore/standard/decorators.py
index 9b6ed62..41b05e4 100644
--- a/sources/classcore/standard/decorators.py
+++ b/sources/classcore/standard/decorators.py
@@ -21,10 +21,6 @@
''' Standard decorators. '''
# TODO? Add attribute value transformer as standard decorator argument.
-# ruff: noqa: F401
-
-
-from __future__ import annotations
from .. import factories as _factories
from .. import utilities as _utilities
@@ -35,316 +31,631 @@
)
from . import __
from . import behaviors as _behaviors
+from . import dynadoc as _dynadoc
from . import nomina as _nomina
-_U = __.typx.TypeVar( '_U' )
+_dataclass_core = __.dcls.dataclass( kw_only = True, slots = True )
+_dynadoc_configuration = _dynadoc.produce_dynadoc_configuration( )
-_dataclass_core = __.dcls.dataclass( kw_only = True, slots = True )
+def prepare_dataclass_for_instances(
+ cls: type,
+ decorators: _nomina.DecoratorsMutable[ __.U ], /, *,
+ attributes_namer: _nomina.AttributesNamer,
+) -> None:
+ ''' Annotates dataclass in support of instantiation machinery. '''
+ annotations = __.inspect.get_annotations( cls )
+ behaviors_name = attributes_namer( 'instance', 'behaviors' )
+ # TODO: Only use mangling if not slotted.
+ # behaviors_name_ = _utilities.mangle_name( cls, behaviors_name )
+ behaviors_name_ = behaviors_name
+ annotations[ behaviors_name_ ] = set[ str ]
+ setattr( cls, '__annotations__', annotations ) # in case of absence
+ setattr( cls, behaviors_name_, __.dcls.field(
+ compare = False, hash = False, init = False, repr = False ) )
+
+
+def apply_cfc_core_functions(
+ clscls: type[ __.T ], /,
+ attributes_namer: _nomina.AttributesNamer,
+ assigner_core: __.typx.Optional[ _nomina.AssignerCore ] = None,
+ deleter_core: __.typx.Optional[ _nomina.DeleterCore ] = None,
+ surveyor_core: __.typx.Optional[ _nomina.SurveyorCore ] = None,
+) -> None:
+ ''' Stores core functions on metaclass. '''
+ cores = dict(
+ classes_assigner_core = assigner_core,
+ classes_deleter_core = deleter_core,
+ classes_surveyor_core = surveyor_core )
+ cores_default = dict(
+ assigner = _behaviors.assign_attribute_if_mutable,
+ deleter = _behaviors.delete_attribute_if_mutable,
+ surveyor = _behaviors.survey_visible_attributes )
+ for core_name in ( 'assigner', 'deleter', 'surveyor' ):
+ core_function = _behaviors.access_core_function(
+ clscls,
+ attributes_namer = attributes_namer,
+ arguments = cores,
+ level = 'classes', name = core_name,
+ default = cores_default[ core_name ] )
+ core_aname = attributes_namer( 'classes', f"{core_name}_core" )
+ setattr( clscls, core_aname, core_function )
+
+
+def apply_cfc_dynadoc_configuration(
+ clscls: type[ __.T ], /,
+ attributes_namer: _nomina.AttributesNamer,
+ configuration: _nomina.DynadocConfiguration,
+) -> None:
+ ''' Stores Dynadoc configuration on metaclass. '''
+ configuration_name = attributes_namer( 'classes', 'dynadoc_configuration' )
+ setattr( clscls, configuration_name, configuration )
-def _produce_class_factory_core(
+def apply_cfc_constructor(
+ clscls: type[ __.T ], /,
attributes_namer: _nomina.AttributesNamer,
error_class_provider: _nomina.ErrorClassProvider,
-) -> tuple[ _nomina.ClassConstructor, _nomina.ClassInitializer ]:
+) -> None:
+ ''' Injects '__new__' method into metaclass. '''
preprocessors = (
_behaviors.produce_class_construction_preprocessor(
attributes_namer = attributes_namer ), )
postprocessors = (
_behaviors.produce_class_construction_postprocessor(
- attributes_namer = attributes_namer ), )
- completers = (
- _behaviors.produce_class_initialization_completer(
- attributes_namer = attributes_namer ), )
- constructor = (
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider ), )
+ constructor: _nomina.ClassConstructor[ __.T ] = (
_factories.produce_class_constructor(
attributes_namer = attributes_namer,
preprocessors = preprocessors,
postprocessors = postprocessors ) )
+ decorator = produce_class_construction_decorator(
+ attributes_namer = attributes_namer, constructor = constructor )
+ decorator( clscls )
+
+
+def apply_cfc_initializer(
+ clscls: type[ __.T ], /, attributes_namer: _nomina.AttributesNamer
+) -> None:
+ ''' Injects '__init__' method into metaclass. '''
+ completers = (
+ _behaviors.produce_class_initialization_completer(
+ attributes_namer = attributes_namer ), )
initializer = (
_factories.produce_class_initializer(
attributes_namer = attributes_namer,
completers = completers ) )
- return constructor, initializer
+ decorator = produce_class_initialization_decorator(
+ attributes_namer = attributes_namer, initializer = initializer )
+ decorator( clscls )
-def prepare_dataclass_for_instances(
- cls: type,
- decorators: _nomina.DecoratorsMutable, /, *,
+def apply_cfc_attributes_assigner(
+ clscls: type[ __.T ], /,
attributes_namer: _nomina.AttributesNamer,
+ error_class_provider: _nomina.ErrorClassProvider,
+ implementation_core: __.typx.Optional[ _nomina.AssignerCore ],
) -> None:
- ''' Annotates dataclass in support of instantiation machinery. '''
- annotations = __.inspect.get_annotations( cls )
- behaviors_name = attributes_namer( 'instance', 'behaviors' )
- annotations[ behaviors_name ] = set[ str ]
- setattr( cls, '__annotations__', annotations ) # in case of absence
- setattr( cls, behaviors_name, __.dcls.field( init = False ) )
+ ''' Injects '__setattr__' method into metaclass. '''
+ decorator = produce_attributes_assignment_decorator(
+ level = 'classes',
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ implementation_core = implementation_core )
+ decorator( clscls )
+
+def apply_cfc_attributes_deleter(
+ clscls: type[ __.T ], /,
+ attributes_namer: _nomina.AttributesNamer,
+ error_class_provider: _nomina.ErrorClassProvider,
+ implementation_core: __.typx.Optional[ _nomina.DeleterCore ],
+) -> None:
+ ''' Injects '__delattr__' method into metaclass. '''
+ decorator = produce_attributes_deletion_decorator(
+ level = 'classes',
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ implementation_core = implementation_core )
+ decorator( clscls )
-def produce_class_factory_decorators(
+
+def apply_cfc_attributes_surveyor(
+ clscls: type[ __.T ],
+ attributes_namer: _nomina.AttributesNamer,
+ implementation_core: __.typx.Optional[ _nomina.SurveyorCore ],
+) -> None:
+ ''' Injects '__dir__' method into metaclass. '''
+ decorator = produce_attributes_surveillance_decorator(
+ level = 'classes',
+ attributes_namer = attributes_namer,
+ implementation_core = implementation_core )
+ decorator( clscls )
+
+
+def class_factory( # noqa: PLR0913
attributes_namer: _nomina.AttributesNamer = __.calculate_attrname,
error_class_provider: _nomina.ErrorClassProvider = __.provide_error_class,
- assigner_core: _nomina.AssignerCore = (
- _behaviors.assign_attribute_if_mutable ),
- deleter_core: _nomina.DeleterCore = (
- _behaviors.delete_attribute_if_mutable ),
- surveyor_core: _nomina.SurveyorCore = (
- _behaviors.survey_visible_attributes ),
-) -> _nomina.Decorators:
- decorators: list[ _nomina.Decorator ] = [ ]
- constructor, initializer = (
- _produce_class_factory_core(
+ assigner_core: __.typx.Optional[ _nomina.AssignerCore ] = None,
+ deleter_core: __.typx.Optional[ _nomina.DeleterCore ] = None,
+ surveyor_core: __.typx.Optional[ _nomina.SurveyorCore ] = None,
+ dynadoc_configuration: __.cabc.Mapping[ str, __.typx.Any ] = (
+ _dynadoc_configuration ),
+) -> _nomina.Decorator[ __.T ]:
+ ''' Produces decorator to apply standard behaviors to metaclass. '''
+ def decorate( clscls: type[ __.T ] ) -> type[ __.T ]:
+ apply_cfc_core_functions(
+ clscls,
attributes_namer = attributes_namer,
- error_class_provider = error_class_provider ) )
- decorators.append(
- produce_class_construction_decorator(
+ assigner_core = assigner_core,
+ deleter_core = deleter_core,
+ surveyor_core = surveyor_core )
+ apply_cfc_dynadoc_configuration(
+ clscls,
attributes_namer = attributes_namer,
- constructor = constructor ) )
- decorators.append(
- produce_class_initialization_decorator(
+ configuration = dynadoc_configuration )
+ apply_cfc_constructor(
+ clscls,
attributes_namer = attributes_namer,
- initializer = initializer ) )
- decorators.append(
- produce_attributes_assignment_decorator(
- level = 'class',
+ error_class_provider = error_class_provider )
+ apply_cfc_initializer( clscls, attributes_namer = attributes_namer )
+ apply_cfc_attributes_assigner(
+ clscls,
attributes_namer = attributes_namer,
error_class_provider = error_class_provider,
- implementation_core = assigner_core ) )
- decorators.append(
- produce_attributes_deletion_decorator(
- level = 'class',
+ implementation_core = assigner_core )
+ apply_cfc_attributes_deleter(
+ clscls,
attributes_namer = attributes_namer,
error_class_provider = error_class_provider,
- implementation_core = deleter_core ) )
- decorators.append(
- produce_attributes_surveillance_decorator(
- level = 'class',
+ implementation_core = deleter_core )
+ apply_cfc_attributes_surveyor(
+ clscls,
attributes_namer = attributes_namer,
- implementation_core = surveyor_core ) )
- return decorators
+ implementation_core = surveyor_core )
+ return clscls
+ return decorate
-def produce_instances_initialization_decorator(
+
+def produce_instances_inception_decorator( # noqa: PLR0913
attributes_namer: _nomina.AttributesNamer,
+ assigner_core: __.typx.Optional[ _nomina.AssignerCore ],
+ deleter_core: __.typx.Optional[ _nomina.DeleterCore ],
+ surveyor_core: __.typx.Optional[ _nomina.SurveyorCore ],
+ ignore_init_arguments: bool,
mutables: _nomina.BehaviorExclusionVerifiersOmni,
visibles: _nomina.BehaviorExclusionVerifiersOmni,
-) -> _nomina.Decorator:
- def decorate( cls: type[ _U ] ) -> type[ _U ]:
- initializer_name = attributes_namer( 'instances', 'initializer' )
- extant = getattr( cls, initializer_name, None )
- original = getattr( cls, '__init__' )
- if extant is original: return cls
+) -> _nomina.Decorator[ __.U ]:
+ ''' Produces decorator to inject '__new__' or '__init__' method.
+
+ Also handles common bookkeeping tasks.
+ '''
+ cores = dict(
+ instances_assigner_core = assigner_core,
+ instances_deleter_core = deleter_core,
+ instances_surveyor_core = surveyor_core )
+ cores_default = dict(
+ assigner = _behaviors.assign_attribute_if_mutable,
+ deleter = _behaviors.delete_attribute_if_mutable,
+ surveyor = _behaviors.survey_visible_attributes )
+
+ def decorate( cls: type[ __.U ] ) -> type[ __.U ]:
+ for core_name in ( 'assigner', 'deleter', 'surveyor' ):
+ core_function = _behaviors.access_core_function(
+ cls,
+ attributes_namer = attributes_namer,
+ arguments = cores,
+ level = 'instances', name = core_name,
+ default = cores_default[ core_name ] )
+ core_aname = attributes_namer( 'instances', f"{core_name}_core" )
+ setattr( cls, core_aname, core_function )
behaviors: set[ str ] = set( )
- behaviors_name = attributes_namer( 'instance', 'behaviors' )
_behaviors.record_behavior(
cls, attributes_namer = attributes_namer,
level = 'instances', basename = 'mutables',
- label = _behaviors.immutability_label, behaviors = behaviors,
+ label = _nomina.immutability_label, behaviors = behaviors,
verifiers = mutables )
_behaviors.record_behavior(
cls, attributes_namer = attributes_namer,
level = 'instances', basename = 'visibles',
- label = _behaviors.concealment_label, behaviors = behaviors,
+ label = _nomina.concealment_label, behaviors = behaviors,
verifiers = visibles )
-
- @__.funct.wraps( original )
- def initialize(
- self: object, *posargs: __.typx.Any, **nomargs: __.typx.Any
- ) -> None:
- original( self, *posargs, **nomargs )
- behaviors_ = _utilities.getattr0( self, behaviors_name, set( ) )
- if not behaviors_: setattr( self, behaviors_name, behaviors_ )
- behaviors_.update( behaviors )
-
- setattr( cls, initializer_name, initialize )
- cls.__init__ = initialize
- return cls
+ decorator = produce_instances_initialization_decorator(
+ attributes_namer = attributes_namer,
+ behaviors = behaviors,
+ ignore_init_arguments = ignore_init_arguments )
+ return decorator( cls )
return decorate
-def produce_attributes_assignment_decorator(
- level: str,
+# def produce_instances_construction_decorator(
+# attributes_namer: _nomina.AttributesNamer,
+# behaviors: __.cabc.MutableSet[ str ],
+# ) -> _nomina.Decorator[ __.U ]:
+# ''' Produces decorator to inject '__new__' method. '''
+# def decorate( cls_: type[ __.U ] ) -> type[ __.U ]:
+# behaviors_name = attributes_namer( 'instance', 'behaviors' )
+# original = cls_.__dict__.get( '__new__' )
+#
+# if original is None:
+#
+# def initialize_with_super(
+# cls: type[ __.U ],
+# *posargs: __.typx.Any,
+# **nomargs: __.typx.Any,
+# ) -> __.U:
+# self = super( cls_, cls ).__new__( cls, *posargs, **nomargs )
+# _activate_instance_behaviors(
+# cls_, self, behaviors_name, behaviors )
+# return self
+#
+# cls_.__new__ = initialize_with_super
+#
+# else:
+#
+# @__.funct.wraps( original )
+# def initialize_with_original(
+# cls: type[ __.U ],
+# *posargs: __.typx.Any,
+# **nomargs: __.typx.Any,
+# ) -> __.U:
+# self = original( cls, *posargs, **nomargs )
+# _activate_instance_behaviors(
+# cls_, self, behaviors_name, behaviors )
+# return self
+#
+# cls_.__new__ = initialize_with_original
+#
+# return cls_
+#
+# return decorate
+
+
+def produce_instances_initialization_decorator(
attributes_namer: _nomina.AttributesNamer,
- error_class_provider: _nomina.ErrorClassProvider,
- implementation_core: _nomina.AssignerCore,
-) -> _nomina.Decorator:
- def decorate( cls: type[ _U ] ) -> type[ _U ]:
- assigner_name = attributes_namer( level, 'assigner' )
- extant = getattr( cls, assigner_name, None )
- original = getattr( cls, '__setattr__' )
- if extant is original: return cls
-
- @__.funct.wraps( original )
- def assign( self: object, name: str, value: __.typx.Any ) -> None:
- implementation_core(
- self,
- ligation = __.funct.partial( original, self ),
- attributes_namer = attributes_namer,
- error_class_provider = error_class_provider,
- level = level,
- name = name, value = value )
+ behaviors: __.cabc.MutableSet[ str ],
+ ignore_init_arguments: bool,
+) -> _nomina.Decorator[ __.U ]:
+ ''' Produces decorator to inject '__init__' method into class. '''
+ def decorate( cls: type[ __.U ] ) -> type[ __.U ]:
+ behaviors_name = attributes_namer( 'instance', 'behaviors' )
+ original = cls.__dict__.get( '__init__' )
- setattr( cls, assigner_name, assign )
- cls.__setattr__ = assign
- return cls
+ if original is None:
- return decorate
+ def initialize_with_super(
+ self: object, *posargs: __.typx.Any, **nomargs: __.typx.Any
+ ) -> None:
+ if ignore_init_arguments: super( cls, self ).__init__( )
+ else: super( cls, self ).__init__( *posargs, **nomargs )
+ _activate_instance_behaviors(
+ cls, self, behaviors_name, behaviors )
+ cls.__init__ = initialize_with_super
-def produce_attributes_deletion_decorator(
- level: str,
- attributes_namer: _nomina.AttributesNamer,
- error_class_provider: _nomina.ErrorClassProvider,
- implementation_core: _nomina.DeleterCore,
-) -> _nomina.Decorator:
- def decorate( cls: type[ _U ] ) -> type[ _U ]:
- deleter_name = attributes_namer( level, 'deleter' )
- extant = getattr( cls, deleter_name, None )
- original = getattr( cls, '__delattr__' )
- if extant is original: return cls
-
- @__.funct.wraps( original )
- def delete( self: object, name: str ) -> None:
- implementation_core(
- self,
- ligation = __.funct.partial( original, self ),
- attributes_namer = attributes_namer,
- error_class_provider = error_class_provider,
- level = level,
- name = name )
+ else:
+
+ @__.funct.wraps( original )
+ def initialize_with_original(
+ self: object, *posargs: __.typx.Any, **nomargs: __.typx.Any
+ ) -> None:
+ if ignore_init_arguments: original( self )
+ else: original( self, *posargs, **nomargs )
+ _activate_instance_behaviors(
+ cls, self, behaviors_name, behaviors )
+
+ cls.__init__ = initialize_with_original
- setattr( cls, deleter_name, delete )
- cls.__delattr__ = delete
return cls
return decorate
-def produce_attributes_surveillance_decorator(
+def produce_attributes_assignment_decorator(
level: str,
attributes_namer: _nomina.AttributesNamer,
- implementation_core: _nomina.SurveyorCore,
-) -> _nomina.Decorator:
- def decorate( cls: type[ _U ] ) -> type[ _U ]:
- surveyor_name = attributes_namer( level, 'surveyor' )
- extant = getattr( cls, surveyor_name, None )
- original = getattr( cls, '__dir__' )
- if extant is original: return cls
-
- @__.funct.wraps( original )
- def survey( self: object ) -> __.cabc.Iterable[ str ]:
- return implementation_core(
- self,
- ligation = __.funct.partial( original, self ),
- attributes_namer = attributes_namer,
- level = level )
+ error_class_provider: _nomina.ErrorClassProvider,
+ implementation_core: __.typx.Optional[ _nomina.AssignerCore ],
+) -> _nomina.Decorator[ __.U ]:
+ ''' Produces decorator to inject '__setattr__' method into class. '''
+ def decorate( cls: type[ __.U ] ) -> type[ __.U ]:
+ leveli = 'class' if level == 'classes' else level
+ original = cls.__dict__.get( '__setattr__' )
+ core = _behaviors.access_core_function(
+ cls,
+ attributes_namer = attributes_namer,
+ arguments = { f"{level}_assigner": implementation_core },
+ level = level, name = 'assigner',
+ default = _behaviors.assign_attribute_if_mutable )
+
+ if original is None:
+
+ def assign_with_super(
+ self: object, name: str, value: __.typx.Any
+ ) -> None:
+ ligation = super( cls, self ).__setattr__
+ # Only enforce behaviors at start of MRO.
+ if cls is not type( self ):
+ ligation( name, value )
+ return
+ core(
+ self,
+ ligation = ligation,
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ level = leveli,
+ name = name, value = value )
+
+ cls.__setattr__ = assign_with_super
+
+ else:
+
+ @__.funct.wraps( original )
+ def assign_with_original(
+ self: object, name: str, value: __.typx.Any
+ ) -> None:
+ ligation = __.funct.partial( original, self )
+ # Only enforce behaviors at start of MRO.
+ if cls is not type( self ):
+ ligation( name, value )
+ return
+ core(
+ self,
+ ligation = ligation,
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ level = leveli,
+ name = name, value = value )
+
+ cls.__setattr__ = assign_with_original
- setattr( cls, surveyor_name, survey )
- cls.__dir__ = survey
return cls
return decorate
-def produce_decorators_factory( # noqa: PLR0913
+def produce_attributes_deletion_decorator(
level: str,
- attributes_namer: _nomina.AttributesNamer = __.calculate_attrname,
- error_class_provider: _nomina.ErrorClassProvider = __.provide_error_class,
- assigner_core: _nomina.AssignerCore = (
- _behaviors.assign_attribute_if_mutable ),
- deleter_core: _nomina.DeleterCore = (
- _behaviors.delete_attribute_if_mutable ),
- surveyor_core: _nomina.SurveyorCore = (
- _behaviors.survey_visible_attributes ),
-) -> __.cabc.Callable[
- [
- _nomina.BehaviorExclusionVerifiersOmni,
- _nomina.BehaviorExclusionVerifiersOmni
- ],
- _nomina.Decorators
-]:
- def produce(
- mutables: _nomina.BehaviorExclusionVerifiersOmni,
- visibles: _nomina.BehaviorExclusionVerifiersOmni,
- ) -> _nomina.Decorators:
- ''' Produces standard decorators. '''
- decorators: list[ _nomina.Decorator ] = [ ]
- decorators.append(
- produce_instances_initialization_decorator(
- attributes_namer = attributes_namer,
- mutables = mutables, visibles = visibles ) )
- if mutables != '*':
- decorators.append(
- produce_attributes_assignment_decorator(
- level = level,
+ attributes_namer: _nomina.AttributesNamer,
+ error_class_provider: _nomina.ErrorClassProvider,
+ implementation_core: __.typx.Optional[ _nomina.DeleterCore ],
+) -> _nomina.Decorator[ __.U ]:
+ ''' Produces decorator to inject '__delattr__' method into class. '''
+ def decorate( cls: type[ __.U ] ) -> type[ __.U ]:
+ leveli = 'class' if level == 'classes' else level
+ original = cls.__dict__.get( '__delattr__' )
+ core = _behaviors.access_core_function(
+ cls,
+ attributes_namer = attributes_namer,
+ arguments = { f"{level}_deleter": implementation_core },
+ level = level, name = 'deleter',
+ default = _behaviors.delete_attribute_if_mutable )
+
+ if original is None:
+
+ def delete_with_super( self: object, name: str ) -> None:
+ ligation = super( cls, self ).__delattr__
+ # Only enforce behaviors at start of MRO.
+ if cls is not type( self ):
+ ligation( name )
+ return
+ core(
+ self,
+ ligation = ligation,
attributes_namer = attributes_namer,
error_class_provider = error_class_provider,
- implementation_core = assigner_core ) )
- decorators.append(
- produce_attributes_deletion_decorator(
- level = level,
+ level = leveli,
+ name = name )
+
+ cls.__delattr__ = delete_with_super
+
+ else:
+
+ @__.funct.wraps( original )
+ def delete_with_original( self: object, name: str ) -> None:
+ ligation = __.funct.partial( original, self )
+ # Only enforce behaviors at start of MRO.
+ if cls is not type( self ):
+ ligation( name )
+ return
+ core(
+ self,
+ ligation = ligation,
attributes_namer = attributes_namer,
error_class_provider = error_class_provider,
- implementation_core = deleter_core ) )
- if visibles != '*':
- decorators.append(
- produce_attributes_surveillance_decorator(
- level = level,
- attributes_namer = attributes_namer,
- implementation_core = surveyor_core ) )
- return decorators
+ level = leveli,
+ name = name )
- return produce
+ cls.__delattr__ = delete_with_original
+ return cls
-def produce_decoration_preparers_factory(
- attributes_namer: _nomina.AttributesNamer = __.calculate_attrname,
- error_class_provider: _nomina.ErrorClassProvider = __.provide_error_class,
- class_preparer: __.typx.Optional[ _nomina.ClassPreparer ] = None,
-) -> __.cabc.Callable[ [ ], _nomina.DecorationPreparers ]:
- def produce( ) -> _nomina.DecorationPreparers:
- ''' Produces processors for standard decorators. '''
- preprocessors: list[ _nomina.DecorationPreparer ] = [ ]
- if class_preparer is not None:
- preprocessors.append(
- __.funct.partial(
- class_preparer,
- attributes_namer = attributes_namer ) )
- return tuple( preprocessors )
+ return decorate
+
+
+def produce_attributes_surveillance_decorator(
+ level: str,
+ attributes_namer: _nomina.AttributesNamer,
+ implementation_core: __.typx.Optional[ _nomina.SurveyorCore ],
+) -> _nomina.Decorator[ __.U ]:
+ ''' Produces decorator to inject '__dir__' method into class. '''
+ def decorate( cls: type[ __.U ] ) -> type[ __.U ]:
+ leveli = 'class' if level == 'classes' else level
+ original = cls.__dict__.get( '__dir__' )
+ core = _behaviors.access_core_function(
+ cls,
+ attributes_namer = attributes_namer,
+ arguments = { f"{level}_surveyor": implementation_core },
+ level = level, name = 'surveyor',
+ default = _behaviors.survey_visible_attributes )
+
+ if original is None:
+
+ def survey_with_super(
+ self: object
+ ) -> __.cabc.Iterable[ str ]:
+ ligation = super( cls, self ).__dir__
+ # Only enforce behaviors at start of MRO.
+ if cls is not type( self ): return ligation( )
+ return core(
+ self,
+ ligation = ligation,
+ attributes_namer = attributes_namer,
+ level = leveli )
+
+ cls.__dir__ = survey_with_super
+
+ else:
+
+ @__.funct.wraps( original )
+ def survey_with_original(
+ self: object
+ ) -> __.cabc.Iterable[ str ]:
+ ligation = __.funct.partial( original, self )
+ # Only enforce behaviors at start of MRO.
+ if cls is not type( self ): return ligation( )
+ return core(
+ self,
+ ligation = ligation,
+ attributes_namer = attributes_namer,
+ level = leveli )
- return produce
+ cls.__dir__ = survey_with_original
+ return cls
-class_factory_decorators = produce_class_factory_decorators( )
+ return decorate
@__.typx.dataclass_transform( frozen_default = True, kw_only_default = True )
-def dataclass_with_standard_behaviors(
- decorators: _nomina.Decorators = ( ),
+def dataclass_with_standard_behaviors( # noqa: PLR0913
+ attributes_namer: _nomina.AttributesNamer = __.calculate_attrname,
+ error_class_provider: _nomina.ErrorClassProvider = __.provide_error_class,
+ decorators: _nomina.Decorators[ __.U ] = ( ),
+ assigner_core: __.typx.Optional[ _nomina.AssignerCore ] = None,
+ deleter_core: __.typx.Optional[ _nomina.DeleterCore ] = None,
+ surveyor_core: __.typx.Optional[ _nomina.SurveyorCore ] = None,
+ ignore_init_arguments: bool = False,
mutables: _nomina.BehaviorExclusionVerifiersOmni = __.mutables_default,
visibles: _nomina.BehaviorExclusionVerifiersOmni = __.visibles_default,
-) -> _nomina.Decorator:
+) -> _nomina.Decorator[ __.U ]:
# https://github.com/microsoft/pyright/discussions/10344
''' Dataclass decorator factory. '''
- decorators_factory = produce_decorators_factory( level = 'instances' )
- decorators_ = decorators_factory( mutables, visibles )
- preparers_factory = produce_decoration_preparers_factory(
- class_preparer = prepare_dataclass_for_instances )
- preparers = preparers_factory( )
+ decorators_: _nomina.Decorators[ __.U ] = (
+ _produce_instances_decorators(
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ assigner_core = assigner_core,
+ deleter_core = deleter_core,
+ surveyor_core = surveyor_core,
+ ignore_init_arguments = ignore_init_arguments,
+ mutables = mutables,
+ visibles = visibles ) )
+ preparers: _nomina.DecorationPreparers[ __.U ] = (
+ _produce_instances_decoration_preparers(
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ class_preparer = prepare_dataclass_for_instances ) )
return decoration_by(
*decorators, _dataclass_core, *decorators_, preparers = preparers )
-def with_standard_behaviors(
- decorators: _nomina.Decorators = ( ),
+def with_standard_behaviors( # noqa: PLR0913
+ attributes_namer: _nomina.AttributesNamer = __.calculate_attrname,
+ error_class_provider: _nomina.ErrorClassProvider = __.provide_error_class,
+ decorators: _nomina.Decorators[ __.U ] = ( ),
+ assigner_core: __.typx.Optional[ _nomina.AssignerCore ] = None,
+ deleter_core: __.typx.Optional[ _nomina.DeleterCore ] = None,
+ surveyor_core: __.typx.Optional[ _nomina.SurveyorCore ] = None,
+ ignore_init_arguments: bool = False,
mutables: _nomina.BehaviorExclusionVerifiersOmni = __.mutables_default,
visibles: _nomina.BehaviorExclusionVerifiersOmni = __.visibles_default,
-) -> _nomina.Decorator:
+) -> _nomina.Decorator[ __.U ]:
''' Class decorator factory. '''
- decorators_factory = produce_decorators_factory( level = 'instances' )
- decorators_ = decorators_factory( mutables, visibles )
- preparers_factory = produce_decoration_preparers_factory( )
- preparers = preparers_factory( )
+ decorators_: _nomina.Decorators[ __.U ] = (
+ _produce_instances_decorators(
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ assigner_core = assigner_core,
+ deleter_core = deleter_core,
+ surveyor_core = surveyor_core,
+ ignore_init_arguments = ignore_init_arguments,
+ mutables = mutables,
+ visibles = visibles ) )
+ preparers: _nomina.DecorationPreparers[ __.U ] = (
+ _produce_instances_decoration_preparers(
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider ) )
return decoration_by( *decorators, *decorators_, preparers = preparers )
+
+
+def _activate_instance_behaviors(
+ cls: type[ __.U ],
+ self: object,
+ behaviors_name: str,
+ behaviors: __.cabc.MutableSet[ str ],
+) -> None:
+ # Only record behaviors at start of MRO.
+ if cls is not type( self ): return
+ behaviors_: set[ str ] = (
+ _utilities.getattr0( self, behaviors_name, set( ) ) )
+ behaviors_.update( behaviors )
+ _utilities.setattr0( self, behaviors_name, frozenset( behaviors_ ) )
+
+
+def _produce_instances_decoration_preparers(
+ attributes_namer: _nomina.AttributesNamer,
+ error_class_provider: _nomina.ErrorClassProvider,
+ class_preparer: __.typx.Optional[ _nomina.ClassPreparer ] = None,
+) -> _nomina.DecorationPreparers[ __.U ]:
+ ''' Produces processors for standard decorators. '''
+ preprocessors: list[ _nomina.DecorationPreparer[ __.U ] ] = [ ]
+ if class_preparer is not None:
+ preprocessors.append(
+ __.funct.partial(
+ class_preparer, attributes_namer = attributes_namer ) )
+ return tuple( preprocessors )
+
+
+def _produce_instances_decorators( # noqa: PLR0913
+ attributes_namer: _nomina.AttributesNamer,
+ error_class_provider: _nomina.ErrorClassProvider,
+ assigner_core: __.typx.Optional[ _nomina.AssignerCore ],
+ deleter_core: __.typx.Optional[ _nomina.DeleterCore ],
+ surveyor_core: __.typx.Optional[ _nomina.SurveyorCore ],
+ ignore_init_arguments: bool,
+ mutables: _nomina.BehaviorExclusionVerifiersOmni,
+ visibles: _nomina.BehaviorExclusionVerifiersOmni,
+) -> _nomina.Decorators[ __.U ]:
+ ''' Produces standard decorators. '''
+ decorators: list[ _nomina.Decorator[ __.U ] ] = [ ]
+ decorators.append(
+ produce_instances_inception_decorator(
+ attributes_namer = attributes_namer,
+ assigner_core = assigner_core,
+ deleter_core = deleter_core,
+ surveyor_core = surveyor_core,
+ ignore_init_arguments = ignore_init_arguments,
+ mutables = mutables, visibles = visibles ) )
+ decorators.append(
+ produce_attributes_assignment_decorator(
+ level = 'instances',
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ implementation_core = assigner_core ) )
+ decorators.append(
+ produce_attributes_deletion_decorator(
+ level = 'instances',
+ attributes_namer = attributes_namer,
+ error_class_provider = error_class_provider,
+ implementation_core = deleter_core ) )
+ decorators.append(
+ produce_attributes_surveillance_decorator(
+ level = 'instances',
+ attributes_namer = attributes_namer,
+ implementation_core = surveyor_core ) )
+ return decorators
diff --git a/sources/classcore/standard/dynadoc.py b/sources/classcore/standard/dynadoc.py
new file mode 100644
index 0000000..a9eace7
--- /dev/null
+++ b/sources/classcore/standard/dynadoc.py
@@ -0,0 +1,129 @@
+# vim: set filetype=python fileencoding=utf-8:
+# -*- coding: utf-8 -*-
+
+#============================================================================#
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+# #
+#============================================================================#
+
+
+''' Dynadoc integration. '''
+
+
+from .. import utilities as _utilities
+from . import __
+from . import nomina as _nomina
+
+
+dynadoc_context = __.ddoc.produce_context( )
+dynadoc_class_introspection_control = (
+ __.ddoc.ClassIntrospectionControl(
+ inheritance = True,
+ introspectors = (
+ __.ddoc.introspection.introspect_special_classes, ) ) )
+dynadoc_module_introspection_control = (
+ __.ddoc.ModuleIntrospectionControl( ) )
+
+
+def dynadoc_avoid_immutables(
+ objct: object,
+ introspection: __.ddoc.IntrospectionControl,
+ attributes_namer: _nomina.AttributesNamer,
+) -> __.ddoc.IntrospectionControl:
+ ''' Disables introspection of immutable objects. '''
+ if __.inspect.isclass( objct ):
+ behaviors_name = attributes_namer( 'class', 'behaviors' )
+ behaviors = _utilities.getattr0( objct, behaviors_name, frozenset( ) )
+ if _nomina.immutability_label in behaviors:
+ return introspection.with_limit(
+ __.ddoc.IntrospectionLimit( disable = True ) )
+ return introspection
+
+
+def produce_dynadoc_introspection_limiter(
+ attributes_namer: _nomina.AttributesNamer = __.calculate_attrname,
+) -> __.ddoc.IntrospectionLimiter:
+ ''' Produces introspection limiter which avoids immutable objects. '''
+ return __.funct.partial(
+ dynadoc_avoid_immutables, attributes_namer = attributes_namer )
+
+dynadoc_introspection_limiter = produce_dynadoc_introspection_limiter( )
+
+
+def produce_dynadoc_introspection_control(
+ enable: bool = True,
+ class_control: __.ddoc.ClassIntrospectionControl = (
+ dynadoc_class_introspection_control ),
+ module_control: __.ddoc.ModuleIntrospectionControl = (
+ dynadoc_module_introspection_control ),
+ limiters: __.ddoc.IntrospectionLimiters = (
+ dynadoc_introspection_limiter, ),
+ targets: __.ddoc.IntrospectionTargets = (
+ __.ddoc.IntrospectionTargetsSansModule ),
+) -> __.ddoc.IntrospectionControl:
+ ''' Produces compatible Dynadoc introspection control. '''
+ return __.ddoc.IntrospectionControl(
+ enable = enable,
+ class_control = class_control,
+ module_control = module_control,
+ limiters = limiters,
+ targets = targets )
+
+dynadoc_introspection_on_class = produce_dynadoc_introspection_control( )
+dynadoc_introspection_on_package = (
+ produce_dynadoc_introspection_control(
+ targets = __.ddoc.IntrospectionTargetsOmni ) )
+
+
+def assign_module_docstring( # noqa: PLR0913
+ module: str | __.types.ModuleType, /,
+ *fragments: __.ddoc.interfaces.Fragment,
+ context: _nomina.DynadocContextArgument = dynadoc_context,
+ introspection: _nomina.DynadocIntrospectionArgument = (
+ dynadoc_introspection_on_package ),
+ preserve: _nomina.DynadocPreserveArgument = True,
+ renderer: __.ddoc.xtnsapi.Renderer = (
+ __.ddoc.assembly.renderer_default ),
+ table: _nomina.DynadocTableArgument = __.dictproxy_empty,
+) -> None:
+ ''' Updates module docstring based on introspection.
+
+ By default, recursively updates docstrings of all module members
+ which have docstrings.
+
+ By default, ignores previously-decorated immutable classes.
+ '''
+ __.ddoc.assign_module_docstring(
+ module,
+ *fragments,
+ context = context,
+ introspection = introspection,
+ preserve = preserve,
+ renderer = renderer,
+ table = table )
+
+
+def produce_dynadoc_configuration(
+ context: _nomina.DynadocContextArgument = dynadoc_context,
+ introspection: _nomina.DynadocIntrospectionArgument = (
+ dynadoc_introspection_on_class ),
+ preserve: _nomina.DynadocPreserveArgument = True,
+ table: _nomina.DynadocTableArgument = __.dictproxy_empty,
+) -> _nomina.ProduceDynadocConfigurationReturn:
+ ''' Produces compatible Dynadoc configuration. '''
+ return __.types.MappingProxyType( dict(
+ context = context,
+ introspection = introspection,
+ preserve = preserve,
+ table = table ) )
diff --git a/sources/classcore/standard/modules.py b/sources/classcore/standard/modules.py
index e0f6b5e..c33ebc4 100644
--- a/sources/classcore/standard/modules.py
+++ b/sources/classcore/standard/modules.py
@@ -21,69 +21,189 @@
''' Standard module classes and reclassifers. '''
-from __future__ import annotations
-
+from .. import utilities as _utilities
from . import __
-from . import behaviors as _behaviors
from . import classes as _classes
+from . import dynadoc as _dynadoc
from . import nomina as _nomina
-class Module( __.types.ModuleType, _classes.Object ):
+class Module( _classes.Object, __.types.ModuleType ):
''' Modules with attributes immutability and concealment. '''
+def finalize_module( # noqa: PLR0913
+ module: __.typx.Annotated[
+ str | __.types.ModuleType,
+ __.ddoc.Doc( ''' Module or module name to finalize. ''' ),
+ ], /,
+ *fragments: __.ddoc.interfaces.Fragment,
+ attributes_namer: _nomina.AttributesNamer = __.calculate_attrname,
+ dynadoc_introspection: _nomina.DynadocIntrospectionArgument = (
+ _dynadoc.dynadoc_introspection_on_package ),
+ dynadoc_table: _nomina.DynadocTableArgument = __.dictproxy_empty,
+ excludes: __.typx.Annotated[
+ __.typx.Optional[ __.cabc.MutableSet[ __.types.ModuleType ] ],
+ __.ddoc.Doc( ''' Modules to exclude from reclassification. ''' ),
+ ] = None,
+ recursive: __.typx.Annotated[
+ bool, __.ddoc.Doc( ''' Recursively reclassify package modules? ''' )
+ ] = False,
+ replacement_class: __.typx.Annotated[
+ type[ __.types.ModuleType ],
+ __.ddoc.Doc( ''' New class for module. ''' ),
+ ] = Module,
+) -> None:
+ ''' Combines Dynadoc docstring assignment and module reclassification.
+
+ Applies module docstring generation via Dynadoc introspection,
+ then reclassifies modules for immutability and concealment.
+
+ When recursive is False, automatically excludes module targets from
+ dynadoc introspection to document only the provided module. When
+ recursive is True, automatically includes module targets so Dynadoc
+ can recursively document all modules.
+ '''
+ module_target = __.ddoc.IntrospectionTargets.Module
+ if recursive:
+ if not ( dynadoc_introspection.targets & module_target ):
+ targets = dynadoc_introspection.targets | module_target
+ introspection = __.ddoc.IntrospectionControl(
+ enable = dynadoc_introspection.enable,
+ class_control = dynadoc_introspection.class_control,
+ module_control = dynadoc_introspection.module_control,
+ limiters = dynadoc_introspection.limiters,
+ targets = targets )
+ else: introspection = dynadoc_introspection
+ elif dynadoc_introspection.targets & module_target:
+ limit = __.ddoc.IntrospectionLimit(
+ targets_exclusions = module_target )
+ introspection = dynadoc_introspection.with_limit( limit )
+ else: introspection = dynadoc_introspection
+ _dynadoc.assign_module_docstring(
+ module,
+ *fragments,
+ introspection = introspection,
+ table = dynadoc_table )
+ _reclassify_module(
+ module,
+ attributes_namer = attributes_namer,
+ excludes = excludes, recursive = recursive,
+ replacement_class = replacement_class )
+
+
+@__.typx.deprecated( "Use 'finalize_module' instead." )
def reclassify_modules(
attributes: __.typx.Annotated[
__.cabc.Mapping[ str, __.typx.Any ] | __.types.ModuleType | str,
- __.typx.Doc(
- 'Module, module name, or dictionary of object attributes.' ),
+ __.ddoc.Doc(
+ ''' Module, module name, or dictionary of object attributes. ''' ),
], /, *,
attributes_namer: __.typx.Annotated[
_nomina.AttributesNamer,
- __.typx.Doc(
+ __.ddoc.Doc(
''' Attributes namer function with which to seal class. ''' ),
] = __.calculate_attrname,
+ excludes: __.typx.Annotated[
+ __.typx.Optional[ __.cabc.MutableSet[ __.types.ModuleType ] ],
+ __.ddoc.Doc( ''' Modules to exclude from reclassification. ''' ),
+ ] = None,
recursive: __.typx.Annotated[
- bool, __.typx.Doc( 'Recursively reclassify package modules?' )
+ bool, __.ddoc.Doc( ''' Recursively reclassify package modules? ''' )
] = False,
+ replacement_class: __.typx.Annotated[
+ type[ __.types.ModuleType ],
+ __.ddoc.Doc( ''' New class for module. ''' ),
+ ] = Module,
) -> None:
- # TODO? Ensure correct operation with namespace packages.
''' Reclassifies modules to have attributes concealment and immutability.
Can operate on individual modules or entire package hierarchies.
- Notes
- -----
- * Only converts modules within the same package to prevent unintended
- modifications to external modules.
- * When used with a dictionary, converts any module objects found as
- values if they belong to the same package.
- * Has no effect on already-reclassified modules.
+ Only converts modules within the same package to prevent unintended
+ modifications to external modules.
+
+ When used with a dictionary, converts any module objects found as
+ values if they belong to the same package.
+
+ Has no effect on already-reclassified modules.
+ '''
+ _reclassify_module(
+ attributes,
+ attributes_namer = attributes_namer,
+ excludes = excludes, recursive = recursive,
+ replacement_class = replacement_class )
+
+
+def _reclassify_module( # noqa: C901,PLR0912
+ attributes: __.typx.Annotated[
+ __.cabc.Mapping[ str, __.typx.Any ] | __.types.ModuleType | str,
+ __.ddoc.Doc(
+ ''' Module, module name, or dictionary of object attributes. ''' ),
+ ], /, *,
+ attributes_namer: __.typx.Annotated[
+ _nomina.AttributesNamer,
+ __.ddoc.Doc(
+ ''' Attributes namer function with which to seal class. ''' ),
+ ] = __.calculate_attrname,
+ excludes: __.typx.Annotated[
+ __.typx.Optional[ __.cabc.MutableSet[ __.types.ModuleType ] ],
+ __.ddoc.Doc( ''' Modules to exclude from reclassification. ''' ),
+ ] = None,
+ recursive: __.typx.Annotated[
+ bool, __.ddoc.Doc( ''' Recursively reclassify package modules? ''' )
+ ] = False,
+ replacement_class: __.typx.Annotated[
+ type[ __.types.ModuleType ],
+ __.ddoc.Doc( ''' New class for module. ''' ),
+ ] = Module,
+) -> None:
+ # TODO? Ensure correct operation with namespace packages.
+ ''' Core implementation for module reclassification.
+
+ Reclassifies modules to have attributes concealment and immutability.
+ Can operate on individual modules or entire package hierarchies.
+
+ Only converts modules within the same package to prevent unintended
+ modifications to external modules.
+
+ When used with a dictionary, converts any module objects found as
+ values if they belong to the same package.
+
+ Has no effect on already-reclassified modules.
'''
if isinstance( attributes, str ):
attributes = __.sys.modules[ attributes ]
if isinstance( attributes, __.types.ModuleType ):
module = attributes
- attributes = attributes.__dict__
+ if excludes and module in excludes: return
+ attributes = module.__dict__
else: module = None
+ if excludes is None: excludes = set( )
+ if module: excludes.add( module )
package_name = (
attributes.get( '__package__' ) or attributes.get( '__name__' ) )
if not package_name: return
for value in attributes.values( ):
if not __.inspect.ismodule( value ): continue
if not value.__name__.startswith( f"{package_name}." ): continue
- if recursive: reclassify_modules( value, recursive = True )
- if isinstance( value, Module ): continue
- _seal_module( value, attributes_namer )
- if module and not isinstance( module, Module ):
- _seal_module( module, attributes_namer )
+ if isinstance( value, replacement_class ): continue
+ if recursive:
+ _reclassify_module(
+ value,
+ attributes_namer = attributes_namer,
+ excludes = excludes, recursive = True,
+ replacement_class = replacement_class )
+ if module and not isinstance( module, replacement_class ):
+ _seal_module( module, attributes_namer, replacement_class )
def _seal_module(
- module: __.types.ModuleType, attributes_namer: _nomina.AttributesNamer
+ module: __.types.ModuleType,
+ attributes_namer: _nomina.AttributesNamer,
+ replacement_class: type[ __.types.ModuleType ],
) -> None:
- behaviors = { _behaviors.concealment_label, _behaviors.immutability_label }
+ behaviors = { _nomina.concealment_label, _nomina.immutability_label }
behaviors_name = attributes_namer( 'instance', 'behaviors' )
- setattr( module, behaviors_name, behaviors )
- module.__class__ = Module
+ module.__class__ = replacement_class
+ _utilities.setattr0( module, behaviors_name, behaviors )
diff --git a/sources/classcore/standard/nomina.py b/sources/classcore/standard/nomina.py
index 6fd0545..1681937 100644
--- a/sources/classcore/standard/nomina.py
+++ b/sources/classcore/standard/nomina.py
@@ -18,14 +18,15 @@
#============================================================================#
-''' Catalog of common type aliases. '''
+''' Catalog of common names and type aliases. '''
# ruff: noqa: F403,F405
-from __future__ import annotations
-
-from . import __
from ..nomina import *
+from . import __
+
+concealment_label = 'concealment'
+immutability_label = 'immutability'
BehaviorExclusionNames: __.typx.TypeAlias = __.cabc.Set[ str ]
@@ -44,8 +45,15 @@
__.cabc.Sequence[ BehaviorExclusionVerifier ] )
BehaviorExclusionVerifiersOmni: __.typx.TypeAlias = (
BehaviorExclusionVerifiers | __.typx.Literal[ '*' ] )
-ErrorClassProvider: __.typx.TypeAlias = (
- __.cabc.Callable[ [ str ], type[ Exception ] ] )
+ErrorClassProvider: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Callable[ [ str ], type[ Exception ] ],
+ __.ddoc.Doc(
+ ''' Takes name of exception class and returns corresponding class.
+
+ Can be used by downstream packages to provide exceptions from their
+ own hierarchies rather than the hierarchy from this package.
+ ''' ),
+]
class AssignerCore( __.typx.Protocol ):
@@ -95,6 +103,46 @@ class ClassPreparer( __.typx.Protocol ):
@staticmethod
def __call__( # pragma: no branch
class_: type,
- decorators: DecoratorsMutable, /, *,
+ decorators: DecoratorsMutable[ __.U ], /, *,
attributes_namer: AttributesNamer,
) -> None: raise NotImplementedError
+
+
+DynadocConfiguration: __.typx.TypeAlias = __.cabc.Mapping[ str, __.typx.Any ]
+# TODO: Use argument type aliases from 'dynadoc' package.
+DynadocContextArgument: __.typx.TypeAlias = __.typx.Annotated[
+ __.ddoc.Context,
+ __.ddoc.Doc(
+ ''' Dynadoc context.
+
+ Renderer, dictionaries for resolution of stringified annotations,
+ etc....
+ ''' ),
+]
+DynadocIntrospectionArgument: __.typx.TypeAlias = __.typx.Annotated[
+ __.ddoc.IntrospectionControl,
+ __.ddoc.Doc(
+ ''' Dynadoc introspection control.
+
+ Which kinds of object to recursively introspect?
+ Scan unnannotated attributes?
+ Consider base classes?
+ Etc...
+ ''' ),
+]
+DynadocPreserveArgument: __.typx.TypeAlias = __.typx.Annotated[
+ bool, __.ddoc.Doc( ''' Preserve existing docstring? ''' )
+]
+DynadocTableArgument: __.typx.TypeAlias = __.typx.Annotated[
+ __.cabc.Mapping[ str, str ],
+ __.ddoc.Doc( ''' Table of documentation fragments. ''' ),
+]
+ProduceDynadocConfigurationReturn: __.typx.TypeAlias = __.typx.Annotated[
+ DynadocConfiguration,
+ __.ddoc.Doc(
+ ''' Dynadoc configuration dictionary.
+
+ Suitable as a keyword expansion (``**``) argument to
+ ``assign_module_docstring`` or ``with_docstring``.
+ ''' ),
+]
diff --git a/sources/classcore/utilities.py b/sources/classcore/utilities.py
index 40c91b6..b8ec664 100644
--- a/sources/classcore/utilities.py
+++ b/sources/classcore/utilities.py
@@ -21,37 +21,100 @@
''' Various utilities for class manipulation. '''
-from __future__ import annotations
-
from . import __
-def describe_object( obj: object ) -> str:
- if __.inspect.isclass( obj ):
- return "class '{}'".format( qualify_class_name( obj ) )
+def describe_object( objct: object, / ) -> str:
+ ''' Returns object type with fully-qualified name. '''
+ if __.inspect.isclass( objct ):
+ return "class '{}'".format( qualify_class_name( objct ) )
# TODO? functions, methods, etc...
- return "instance of {}".format( describe_object( type( obj ) ) )
-
-
-def getattr0( obj: object, name: str, default: __.typx.Any ) -> __.typx.Any:
- ''' Returns attribute from object without inheritance. '''
- # Inspect object dictionary directly to suppress getattr inheritance.
- attrsdict = getattr( obj, '__dict__', { } )
- if name in attrsdict: return attrsdict[ name ]
- slots = getattr( obj, '__slots__', ( ) )
- # Name may be in slots but not yet assigned.
- if name in slots: return getattr( obj, name, default )
- return default
+ return "instance of {}".format( describe_object( type( objct ) ) )
+
+
+def getattr0(
+ objct: object, /, name: str, default: __.typx.Any
+) -> __.typx.Any:
+ ''' Returns special private attribute from object.
+
+ This avoids inheritance-related collisions.
+
+ Uses mangled attribute name which is unique to the class,
+ except when attribute is slotted. Slotted attributes are effectively
+ isolated from inheritance.
+ '''
+ if not __.inspect.isclass( objct ):
+ for base in type( objct ).mro( ):
+ slots = getattr( base, '__slots__', ( ) )
+ if name in slots: return getattr( objct, name, default )
+ name_m = mangle_name( objct, name )
+ return getattr( objct, name_m, default )
+
+
+def delattr0( objct: object, /, name: str ) -> None:
+ ''' Deletes special private attribute on object.
+
+ This avoids inheritance-related collisions.
+
+ Uses mangled attribute name which is unique to the class,
+ except when attribute is slotted. Slotted attributes are effectively
+ isolated from inheritance.
+ '''
+ if not __.inspect.isclass( objct ):
+ for base in type( objct ).mro( ):
+ slots = getattr( base, '__slots__', ( ) )
+ if name in slots:
+ delattr( objct, name )
+ return
+ name_m = mangle_name( objct, name )
+ delattr( objct, name_m )
+
+
+def setattr0( objct: object, /, name: str, value: __.typx.Any ) -> None:
+ ''' Assigns special private attribute to object.
+
+ This avoids inheritance-related collisions.
+
+ Uses mangled attribute name which is unique to the class,
+ except when attribute is slotted. Slotted attributes are effectively
+ isolated from inheritance.
+ '''
+ if not __.inspect.isclass( objct ):
+ for base in type( objct ).mro( ):
+ slots = getattr( base, '__slots__', ( ) )
+ if name in slots:
+ setattr( objct, name, value )
+ return
+ name_m = mangle_name( objct, name )
+ setattr( objct, name_m, value )
+
+
+def mangle_name( objct: object, /, name: str ) -> str:
+ ''' Mangles attribute name so that it is unique.
+
+ Effectively provides name of private member attribute,
+ which is unique across class inheritance.
+ '''
+ # TODO: Replace expensive SHA-256 hash with simple 'id'.
+ # Need to debug weird issue with using 'id' early on dataclasses.
+ if not __.inspect.isclass( objct ):
+ return mangle_name( type( objct ), name )
+ # return "{name}{uid}".format( name = name, uid = id( objct ) )
+ namehash = __.hashlib.sha256( )
+ namehash.update( qualify_class_name( objct ).encode( ) )
+ namehash_hex = namehash.hexdigest( )
+ return f"{name}{namehash_hex}"
def qualify_class_name( cls: type ) -> str:
+ ''' Returns fully-qualified class name. '''
return f"{cls.__module__}.{cls.__qualname__}"
def repair_class_reproduction( original: type, reproduction: type ) -> None:
''' Repairs a class reproduction, if necessary. '''
match __.platform.python_implementation( ):
- case 'CPython': # pragma: no branch
+ case 'CPython' | 'PyPy': # pragma: no branch
_repair_cpython_class_closures( original, reproduction )
case _: pass # pragma: no cover
diff --git a/tests/test_000_classcore/__.py b/tests/test_000_classcore/__.py
new file mode 100644
index 0000000..bc13919
--- /dev/null
+++ b/tests/test_000_classcore/__.py
@@ -0,0 +1,64 @@
+# vim: set filetype=python fileencoding=utf-8:
+# -*- coding: utf-8 -*-
+
+#============================================================================#
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+# #
+#============================================================================#
+
+
+''' Common test utilities and helpers. '''
+
+
+import types
+
+from pathlib import Path
+
+
+PACKAGE_NAME = 'classcore'
+PACKAGES_NAMES = ( PACKAGE_NAME, )
+
+
+_modules_cache: dict[ str, types.ModuleType ] = { }
+def cache_import_module( qname: str ) -> types.ModuleType:
+ ''' Imports module from package by name and caches it. '''
+ from importlib import import_module
+ package_name, *maybe_module_name = qname.rsplit( '.', maxsplit = 1 )
+ if not maybe_module_name: arguments = ( qname, )
+ else: arguments = ( f".{maybe_module_name[0]}", package_name, )
+ if qname not in _modules_cache:
+ _modules_cache[ qname ] = import_module( *arguments )
+ return _modules_cache[ qname ]
+
+
+def _discover_module_names( package_name: str ) -> tuple[ str, ... ]:
+ package = cache_import_module( package_name )
+ if not package.__file__: return ( )
+ return tuple(
+ path.stem
+ for path in Path( package.__file__ ).parent.glob( '*.py' )
+ if path.name not in ( '__init__.py', '__main__.py' )
+ and path.is_file( ) )
+
+
+MODULES_NAMES_BY_PACKAGE_NAME = types.MappingProxyType( {
+ name: _discover_module_names( name ) for name in PACKAGES_NAMES } )
+PACKAGES_NAMES_BY_MODULE_QNAME = types.MappingProxyType( {
+ f"{subpackage_name}.{module_name}": subpackage_name
+ for subpackage_name in PACKAGES_NAMES
+ for module_name in MODULES_NAMES_BY_PACKAGE_NAME[ subpackage_name ] } )
+MODULES_QNAMES = tuple( PACKAGES_NAMES_BY_MODULE_QNAME.keys( ) )
+MODULES_NAMES_BY_MODULE_QNAME = types.MappingProxyType( {
+ name: name.rsplit( '.', maxsplit = 1 )[ -1 ]
+ for name in PACKAGES_NAMES_BY_MODULE_QNAME } )
\ No newline at end of file
diff --git a/tests/test_000_classcore/__init__.py b/tests/test_000_classcore/__init__.py
index 6c22331..a1dcd7f 100644
--- a/tests/test_000_classcore/__init__.py
+++ b/tests/test_000_classcore/__init__.py
@@ -18,50 +18,4 @@
#============================================================================#
-''' Package of tests.
-
- Common imports, constants, and utilities for tests.
-'''
-
-
-import types
-
-from pathlib import Path
-
-
-PACKAGE_NAME = 'classcore'
-PACKAGES_NAMES = ( PACKAGE_NAME, f"{PACKAGE_NAME}.standard" )
-
-
-_modules_cache: dict[ str, types.ModuleType ] = { }
-def cache_import_module( qname: str ) -> types.ModuleType:
- ''' Imports module from package by name and caches it. '''
- from importlib import import_module
- package_name, *maybe_module_name = qname.rsplit( '.', maxsplit = 1 )
- if not maybe_module_name: arguments = ( qname, )
- else: arguments = ( f".{maybe_module_name[0]}", package_name, )
- if qname not in _modules_cache:
- _modules_cache[ qname ] = import_module( *arguments )
- return _modules_cache[ qname ]
-
-
-def _discover_module_names( package_name: str ) -> tuple[ str, ... ]:
- package = cache_import_module( package_name )
- if not package.__file__: return ( )
- return tuple(
- path.stem
- for path in Path( package.__file__ ).parent.glob( '*.py' )
- if path.name not in ( '__init__.py', '__main__.py' )
- and path.is_file( ) )
-
-
-MODULES_NAMES_BY_PACKAGE_NAME = types.MappingProxyType( {
- name: _discover_module_names( name ) for name in PACKAGES_NAMES } )
-PACKAGES_NAMES_BY_MODULE_QNAME = types.MappingProxyType( {
- f"{subpackage_name}.{module_name}": subpackage_name
- for subpackage_name in PACKAGES_NAMES
- for module_name in MODULES_NAMES_BY_PACKAGE_NAME[ subpackage_name ] } )
-MODULES_QNAMES = tuple( PACKAGES_NAMES_BY_MODULE_QNAME.keys( ) )
-MODULES_NAMES_BY_MODULE_QNAME = types.MappingProxyType( {
- name: name.rsplit( '.', maxsplit = 1 )[ -1 ]
- for name in PACKAGES_NAMES_BY_MODULE_QNAME } )
+''' Package of tests. '''
diff --git a/tests/test_000_classcore/conftest.py b/tests/test_000_classcore/conftest.py
new file mode 100644
index 0000000..939d81d
--- /dev/null
+++ b/tests/test_000_classcore/conftest.py
@@ -0,0 +1,3 @@
+def pytest_sessionfinish( session, exitstatus ):
+ if exitstatus == 5: # pytest exit code for "no tests collected"
+ session.exitstatus = 0
diff --git a/tests/test_000_classcore/test_000_package.py b/tests/test_000_classcore/test_000_package.py
index f3d5e3e..b29c3d0 100644
--- a/tests/test_000_classcore/test_000_package.py
+++ b/tests/test_000_classcore/test_000_package.py
@@ -23,46 +23,21 @@
import pytest
-from . import (
- # MODULES_NAMES_BY_MODULE_QNAME,
- MODULES_QNAMES,
- PACKAGES_NAMES,
- PACKAGES_NAMES_BY_MODULE_QNAME,
- cache_import_module,
-)
+from . import __
-@pytest.mark.parametrize( 'package_name', PACKAGES_NAMES )
+@pytest.mark.parametrize( 'package_name', __.PACKAGES_NAMES )
def test_000_sanity( package_name ):
''' Package is sane. '''
- package = cache_import_module( package_name )
+ package = __.cache_import_module( package_name )
assert package.__package__ == package_name
assert package.__name__ == package_name
-# @pytest.mark.parametrize( 'module_qname', MODULES_QNAMES )
-# def test_010_attribute_module_existence( module_qname ):
-# ''' Package module is attribute of package. '''
-# package_name = PACKAGES_NAMES_BY_MODULE_QNAME[ module_qname ]
-# package = cache_import_module( package_name )
-# module_name = MODULES_NAMES_BY_MODULE_QNAME[ module_qname ]
-# assert module_name in package.__dict__
-
-
-# @pytest.mark.parametrize( 'module_qname', MODULES_QNAMES )
-# def test_011_attribute_module_classification( module_qname ):
-# ''' Package attribute is module. '''
-# from inspect import ismodule
-# package_name = PACKAGES_NAMES_BY_MODULE_QNAME[ module_qname ]
-# package = cache_import_module( package_name )
-# module_name = MODULES_NAMES_BY_MODULE_QNAME[ module_qname ]
-# assert ismodule( getattr( package, module_name ) )
-
-
-@pytest.mark.parametrize( 'module_qname', MODULES_QNAMES )
+@pytest.mark.parametrize( 'module_qname', __.MODULES_QNAMES )
def test_100_sanity( module_qname ):
''' Package module is sane. '''
- package_name = PACKAGES_NAMES_BY_MODULE_QNAME[ module_qname ]
- module = cache_import_module( module_qname )
+ package_name = __.PACKAGES_NAMES_BY_MODULE_QNAME[ module_qname ]
+ module = __.cache_import_module( module_qname )
assert module.__package__ == package_name
assert module.__name__ == module_qname
diff --git a/tests/test_000_classcore/test_010_base.py b/tests/test_000_classcore/test_010_base.py
index e61dcee..343798d 100644
--- a/tests/test_000_classcore/test_010_base.py
+++ b/tests/test_000_classcore/test_010_base.py
@@ -23,7 +23,7 @@
import pytest
-from . import PACKAGE_NAME, cache_import_module
+from . import __
@pytest.mark.parametrize(
@@ -31,5 +31,5 @@
)
def test_100_exports( module_name ):
''' Module exports expected names. '''
- module = cache_import_module( f"{PACKAGE_NAME}.__.imports" )
+ module = __.cache_import_module( f"{__.PACKAGE_NAME}.__.imports" )
assert hasattr( module, module_name )
diff --git a/tests/test_000_classcore/test_100_exceptions.py b/tests/test_000_classcore/test_100_exceptions.py
index 2ae3c03..4fc84ee 100644
--- a/tests/test_000_classcore/test_100_exceptions.py
+++ b/tests/test_000_classcore/test_100_exceptions.py
@@ -20,7 +20,7 @@
import pytest
-from . import PACKAGE_NAME, cache_import_module
+from .__ import PACKAGE_NAME, cache_import_module
CLASS_NAMES = (
diff --git a/tests/test_000_classcore/test_110_utilities.py b/tests/test_000_classcore/test_110_utilities.py
index 81c136a..a8951dc 100644
--- a/tests/test_000_classcore/test_110_utilities.py
+++ b/tests/test_000_classcore/test_110_utilities.py
@@ -19,37 +19,17 @@
from dataclasses import dataclass
-from platform import python_implementation
import pytest
-from . import PACKAGE_NAME, cache_import_module
+from .__ import PACKAGE_NAME, cache_import_module
MODULE_QNAME = f"{PACKAGE_NAME}.utilities"
-pyimpl = python_implementation( )
-
class Foo: x = 1
-class Bar( Foo ): pass
-
-
-class Baz:
- def __init__( self, value ):
- self.x = value
-
-
-class FooSlotsBase: __slots__ = ( 'x', )
-
-
-class FooSlots( FooSlotsBase ):
- def __init__( self, value ):
- self.x = value
-
-
foo = Foo( )
-bar = Bar( )
def test_100_qualify_class_name( ):
@@ -65,33 +45,32 @@ def test_110_describe_object( ):
assert 'instance of class' in module.describe_object( foo )
-def test_200_getattr0_dict( ):
- ''' Attribute from object dictionary without inheritance. '''
+def test_200_attr0( ):
+ ''' Can access and mutate special private attribute. '''
module = cache_import_module( MODULE_QNAME )
- function = module.getattr0
sentinel = object( )
- assert 1 == function( Foo, 'x', sentinel )
- assert sentinel == function( Bar, 'x', sentinel )
- assert sentinel == function( foo, 'x', sentinel )
- assert sentinel == function( bar, 'x', sentinel )
- baz = Baz( 42 )
- assert 42 == function( baz, 'x', sentinel )
-
-
-def test_205_getattr0_slots( ):
- ''' Attribute from object slots (empty and filled). '''
- module = cache_import_module( MODULE_QNAME )
- function = module.getattr0
- sentinel = object( )
- foono = FooSlotsBase( )
- assert sentinel == function( foono, 'x', sentinel )
- foo = FooSlots( 42 )
- assert 42 == function( foo, 'x', sentinel )
-
-
-@pytest.mark.skipif(
- 'CPython' != pyimpl, reason = 'Only relevant to CPython.' )
-def test_300_cpython_class_repair_function_closure( ):
+ class C: pass
+ module.setattr0( C, 'x', 1 )
+ assert 1 == module.getattr0( C, 'x', sentinel )
+ assert sentinel == module.getattr0( C, 'y', sentinel )
+ class D( C ): pass
+ module.setattr0( D, 'y', 2 )
+ assert sentinel == module.getattr0( D, 'x', sentinel )
+ assert 2 == module.getattr0( D, 'y', sentinel )
+ module.delattr0( C, 'x' )
+ assert sentinel == module.getattr0( C, 'x', sentinel )
+ class CS: __slots__ = ( 'z', )
+ cs = CS( )
+ assert sentinel == module.getattr0( cs, 'z', sentinel )
+ module.setattr0( cs, 'z', 3 )
+ assert 3 == module.getattr0( cs, 'z', sentinel )
+ module.delattr0( cs, 'z' )
+ assert sentinel == module.getattr0( cs, 'z', sentinel )
+ with pytest.raises( AttributeError ):
+ module.delattr0( cs, 'missing' )
+
+
+def test_300_class_repair_function_closure( ):
''' Reproduction has class cell repaired in function closure. '''
class Wut:
def __dir__( self ): return super( ).__dir__( )
@@ -104,9 +83,7 @@ def __dir__( self ): return super( ).__dir__( )
assert closure.cell_contents is DataclassWut
-@pytest.mark.skipif(
- 'CPython' != pyimpl, reason = 'Only relevant to CPython.' )
-def test_301_cpython_class_repair_property_closure( ):
+def test_301_class_repair_property_closure( ):
''' Reproduction has class cell repaired in property closure. '''
class Wut:
@property
@@ -119,9 +96,7 @@ def name( self ): return super( ).__str__( )
assert wut.name == str( wut )
-@pytest.mark.skipif(
- 'CPython' != pyimpl, reason = 'Only relevant to CPython.' )
-def test_302_cpython_class_repair_nothing( ):
+def test_302_class_repair_nothing( ):
''' Reproduction has no class cell to repair in anything. '''
class Wut:
@property
diff --git a/tests/test_000_classcore/test_210_decorators.py b/tests/test_000_classcore/test_210_decorators.py
new file mode 100644
index 0000000..6101e30
--- /dev/null
+++ b/tests/test_000_classcore/test_210_decorators.py
@@ -0,0 +1,49 @@
+# vim: set filetype=python fileencoding=utf-8:
+# -*- coding: utf-8 -*-
+
+#============================================================================#
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+# #
+#============================================================================#
+
+
+# import pytest
+
+from .__ import PACKAGE_NAME, cache_import_module
+
+
+MODULE_QNAME = f"{PACKAGE_NAME}.decorators"
+
+
+def test_111_produce_class_initialization_decorator_original( ):
+ module = cache_import_module( MODULE_QNAME )
+ base_module = cache_import_module( f"{PACKAGE_NAME}.__" )
+ factories_module = cache_import_module( f"{PACKAGE_NAME}.factories" )
+ constructor = factories_module.produce_class_constructor(
+ attributes_namer = base_module.calculate_attrname )
+ cdecorator = module.produce_class_construction_decorator(
+ attributes_namer = base_module.calculate_attrname,
+ constructor = constructor )
+ initializer = factories_module.produce_class_initializer(
+ attributes_namer = base_module.calculate_attrname )
+ idecorator = module.produce_class_initialization_decorator(
+ attributes_namer = base_module.calculate_attrname,
+ initializer = initializer )
+ @idecorator
+ @cdecorator
+ class Class( type ):
+ def __init__( self, *posargs, **nomargs ):
+ self._hello = 'Hi'
+ class Object( metaclass = Class ): pass
+ assert Object._hello == 'Hi'
diff --git a/tests/test_000_classcore/test_310_standard_internals.py b/tests/test_000_classcore/test_310_standard_internals.py
index 85427e6..b874098 100644
--- a/tests/test_000_classcore/test_310_standard_internals.py
+++ b/tests/test_000_classcore/test_310_standard_internals.py
@@ -20,7 +20,7 @@
import pytest
-from . import PACKAGE_NAME, cache_import_module
+from .__ import PACKAGE_NAME, cache_import_module
MODULE_QNAME = f"{PACKAGE_NAME}.standard.__"
diff --git a/tests/test_000_classcore/test_330_standard_decorators.py b/tests/test_000_classcore/test_330_standard_decorators.py
new file mode 100644
index 0000000..d4edecb
--- /dev/null
+++ b/tests/test_000_classcore/test_330_standard_decorators.py
@@ -0,0 +1,186 @@
+# vim: set filetype=python fileencoding=utf-8:
+# -*- coding: utf-8 -*-
+
+#============================================================================#
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+# #
+#============================================================================#
+
+
+import pytest
+
+from .__ import PACKAGE_NAME, cache_import_module
+
+
+MODULE_QNAME = f"{PACKAGE_NAME}.standard.decorators"
+
+
+def simple_assigner_core(
+ objct,
+ ligation,
+ attributes_namer,
+ error_class_provider,
+ level, name, value
+):
+ object.__setattr__( objct, name, value )
+
+
+def simple_deleter_core(
+ objct,
+ ligation,
+ attributes_namer,
+ error_class_provider,
+ level, name
+):
+ object.__delattr__( objct, name )
+
+
+def simple_surveyor_core( objct, ligation, attributes_namer, level ):
+ return object.__dir__( objct )
+
+
+def class_simple_assigner_core(
+ cls,
+ ligation,
+ attributes_namer,
+ error_class_provider,
+ level, name, value
+):
+ type.__setattr__( cls, name, value )
+
+
+def class_simple_deleter_core(
+ cls,
+ ligation,
+ attributes_namer,
+ error_class_provider,
+ level, name
+):
+ type.__delattr__( cls, name )
+
+
+def class_simple_surveyor_core( cls, ligation, attributes_namer, level ):
+ return type.__dir__( cls )
+
+
+def test_120_decorator_core_function_inheritance( ):
+ ''' Core functions (via decorator) inherited. '''
+ module = cache_import_module( MODULE_QNAME )
+ exceptions = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
+
+ @module.with_standard_behaviors( )
+ class Base: pass
+
+ b = Base( )
+ with pytest.raises( exceptions.AttributeImmutability ):
+ b.bar = 6
+ assert not hasattr( b, 'bar' )
+ assert 'bar' not in dir( b )
+ with pytest.raises( exceptions.AttributeImmutability ):
+ del b.bar
+
+ @module.with_standard_behaviors( )
+ class Derivation( Base ): pass
+
+ d = Derivation( )
+ with pytest.raises( exceptions.AttributeImmutability ):
+ d.foo = 2
+ assert not hasattr( d, 'foo' )
+ assert 'foo' not in dir( d )
+ with pytest.raises( exceptions.AttributeImmutability ):
+ del d.foo
+
+
+def test_121_decorator_core_function_replacements_inheritance( ):
+ ''' Replacement core functions (via decorator) inherited. '''
+ module = cache_import_module( MODULE_QNAME )
+
+ @module.with_standard_behaviors(
+ assigner_core = simple_assigner_core,
+ deleter_core = simple_deleter_core,
+ surveyor_core = simple_surveyor_core,
+ )
+ class Base: pass
+
+ b = Base( )
+ b.bar = 6
+ assert b.bar == 6
+ assert 'bar' in dir( b )
+ del b.bar
+ assert not hasattr( b, 'bar' )
+
+ @module.with_standard_behaviors( )
+ class Derivation( Base ): pass
+
+ d = Derivation( )
+ d.foo = 2
+ assert d.foo == 2
+ assert 'foo' in dir( d )
+ del d.foo
+ assert not hasattr( d, 'foo' )
+
+
+def test_220_cfc_core_function_inheritance( ):
+ ''' Core functions (via metaclass) inherited. '''
+ module = cache_import_module( MODULE_QNAME )
+ exceptions = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
+
+ @module.class_factory( )
+ class Class( type ): pass
+
+ class Base( metaclass = Class ): pass
+
+ with pytest.raises( exceptions.AttributeImmutability ):
+ Base.bar = 6
+ assert not hasattr( Base, 'bar' )
+ assert 'bar' not in dir( Base )
+ with pytest.raises( exceptions.AttributeImmutability ):
+ del Base.bar
+
+ class Derivation( Base ): pass
+
+ with pytest.raises( exceptions.AttributeImmutability ):
+ Derivation.foo = 2
+ assert not hasattr( Derivation, 'foo' )
+ assert 'foo' not in dir( Derivation )
+ with pytest.raises( exceptions.AttributeImmutability ):
+ del Derivation.foo
+
+
+def test_221_cfc_core_function_replacements_inheritance( ):
+ ''' Replacement core functions (via metaclass) inherited. '''
+ module = cache_import_module( MODULE_QNAME )
+
+ @module.class_factory(
+ assigner_core = class_simple_assigner_core,
+ deleter_core = class_simple_deleter_core,
+ surveyor_core = class_simple_surveyor_core,
+ )
+ class Class( type ): pass
+
+ class Base( metaclass = Class ): pass
+
+ Base.bar = 6
+ assert Base.bar == 6
+ assert 'bar' in dir( Base )
+ del Base.bar
+ assert not hasattr( Base, 'bar' )
+
+ class Derivation( Base ): pass
+
+ Derivation.foo = 2
+ assert Derivation.foo == 2
+ assert 'foo' in dir( Derivation )
+ del Derivation.foo
+ assert not hasattr( Derivation, 'foo' )
diff --git a/tests/test_000_classcore/test_320_standard_decorators.py b/tests/test_000_classcore/test_340_standard_classes.py
similarity index 62%
rename from tests/test_000_classcore/test_320_standard_decorators.py
rename to tests/test_000_classcore/test_340_standard_classes.py
index ad5c7aa..dd44178 100644
--- a/tests/test_000_classcore/test_320_standard_decorators.py
+++ b/tests/test_000_classcore/test_340_standard_classes.py
@@ -18,21 +18,31 @@
#============================================================================#
-from . import PACKAGE_NAME, cache_import_module
+# import pytest
+from .__ import PACKAGE_NAME, cache_import_module
-MODULE_QNAME = f"{PACKAGE_NAME}.standard.decorators"
+MODULE_QNAME = f"{PACKAGE_NAME}.standard.classes"
-def test_210_class_factory_decorator_idempotence( ):
- ''' Class factory decorators are idempotent. '''
+def test_124_cfc_instances_ignore_init_arguments( ):
+ ''' Metaclass respects 'instances_ignore_init_arguments'. '''
+ from urllib.parse import ParseResult as UrlParts
+ from urllib.parse import urlparse
module = cache_import_module( MODULE_QNAME )
- @module.decoration_by( *module.class_factory_decorators )
- class Class: pass
- @module.decoration_by( *module.class_factory_decorators )
- class BetterClass( Class ): pass
- assert Class.__new__ is BetterClass.__new__
- assert Class.__init__ is BetterClass.__init__
- assert Class.__setattr__ is BetterClass.__setattr__
- assert Class.__delattr__ is BetterClass.__delattr__
- assert Class.__dir__ is BetterClass.__dir__
+
+ class Url(
+ module.Object, UrlParts, instances_ignore_init_arguments = True
+ ): pass
+
+ u = Url( *urlparse( 'https://python.org' ) )
+ assert u.scheme == 'https'
+
+ class UrlWithInit(
+ module.Object, UrlParts, instances_ignore_init_arguments = True
+ ):
+ def __init__( self, *posargs, **nomargs ):
+ super( ).__init__( *posargs, **nomargs )
+
+ u = UrlWithInit( *urlparse( 'https://python.org' ) )
+ assert u.scheme == 'https'
diff --git a/tests/test_000_classcore/test_350_standard_modules.py b/tests/test_000_classcore/test_350_standard_modules.py
index 140ca88..15cc060 100644
--- a/tests/test_000_classcore/test_350_standard_modules.py
+++ b/tests/test_000_classcore/test_350_standard_modules.py
@@ -19,40 +19,114 @@
import types
+import warnings
-from . import PACKAGE_NAME, cache_import_module
+import pytest
+
+from .__ import PACKAGE_NAME, cache_import_module
MODULE_QNAME = f"{PACKAGE_NAME}.standard.modules"
-def test_200_reclassification_of_package_module( ):
- ''' Reclassifies package module directly. '''
+def test_200_reclassification_of_independent_module( ):
+ ''' Reclassifies independent module directly. '''
module = cache_import_module( MODULE_QNAME )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
module_class = module.Module
module_ = types.ModuleType( 'foobarnotreal' )
module_.__package__ = None
assert module_.__class__ is not module_class
- module.reclassify_modules( module_ )
- assert module_.__class__ is module_class
- module.reclassify_modules( module_ ) # idempotence
- assert module_.__class__ is module_class
+ with warnings.catch_warnings( ):
+ warnings.simplefilter( 'ignore', DeprecationWarning )
+ module.reclassify_modules( module_ )
+ assert module_.__class__ is module_class
+ module.reclassify_modules( module_ ) # idempotence
+ assert module_.__class__ is module_class
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ module_.foo = 1
def test_201_reclassification_of_normal_module( ):
''' Reclassifies normal module directly. '''
module = cache_import_module( MODULE_QNAME )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
module_class = module.Module
module_ = types.ModuleType( 'fakepackage.foobarnotreal' )
module_.__package__ = 'fakepackage'
assert module_.__class__ is not module_class
- module.reclassify_modules( module_ )
- assert module_.__class__ is module_class
- module.reclassify_modules( module_ ) # idempotence
- assert module_.__class__ is module_class
+ with warnings.catch_warnings( ):
+ warnings.simplefilter( 'ignore', DeprecationWarning )
+ module.reclassify_modules( module_ )
+ assert module_.__class__ is module_class
+ module.reclassify_modules( module_ ) # idempotence
+ assert module_.__class__ is module_class
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ module_.foo = 1
+
+def test_202_reclassification_of_package( ):
+ ''' Reclassifies package directly. '''
+ module = cache_import_module( MODULE_QNAME )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
+ module_class = module.Module
+ package_module = types.ModuleType( 'foobarnotreal' )
+ package_module.__package__ = 'foobarnotreal'
+ member_module = types.ModuleType( 'foobarnotreal.member' )
+ member_module.__package__ = 'foobarnotreal'
+ package_module.member = member_module
+ assert package_module.__class__ is not module_class
+ assert member_module.__class__ is not module_class
+ with warnings.catch_warnings( ):
+ warnings.simplefilter( 'ignore', DeprecationWarning )
+ module.reclassify_modules( package_module )
+ assert package_module.__class__ is module_class
+ assert member_module.__class__ is not module_class
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ package_module.foo = 1
+ member_module.foo = 1
+ assert member_module.foo == 1
-def test_202_reclassification_of_incomplete_module( ):
+
+def test_203_reclassification_of_package_recursive( ):
+ ''' Reclassifies package recursively. '''
+ module = cache_import_module( MODULE_QNAME )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
+ module_class = module.Module
+ package_module = types.ModuleType( 'foobarnotreal' )
+ package_module.__package__ = 'foobarnotreal'
+ member_module = types.ModuleType( 'foobarnotreal.member' )
+ member_module.__package__ = 'foobarnotreal'
+ package_module.member = member_module
+ assert package_module.__class__ is not module_class
+ assert member_module.__class__ is not module_class
+ with warnings.catch_warnings( ):
+ warnings.simplefilter( 'ignore', DeprecationWarning )
+ module.reclassify_modules( package_module, recursive = True )
+ assert package_module.__class__ is module_class
+ assert member_module.__class__ is module_class
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ package_module.foo = 1
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ member_module.foo = 1
+
+
+def test_204_reclassification_of_module_exclude( ):
+ ''' Reclassification ignores excluded module. '''
+ module = cache_import_module( MODULE_QNAME )
+ module_class = module.Module
+ module_ = types.ModuleType( 'fakepackage.foobarnotreal' )
+ module_.__package__ = 'fakepackage'
+ assert module_.__class__ is not module_class
+ with warnings.catch_warnings( ):
+ warnings.simplefilter( 'ignore', DeprecationWarning )
+ module.reclassify_modules( module_, excludes = { module_ } )
+ assert module_.__class__ is not module_class
+ module_.foo = 1
+ assert module_.foo == 1
+
+
+def test_205_reclassification_of_incomplete_module( ):
''' Reclassification ignores incomplete module. '''
module = cache_import_module( MODULE_QNAME )
module_class = module.Module
@@ -60,18 +134,117 @@ def test_202_reclassification_of_incomplete_module( ):
module_.__package__ = None
del module_.__name__
assert module_.__class__ is not module_class
- module.reclassify_modules( module_ )
+ with warnings.catch_warnings( ):
+ warnings.simplefilter( 'ignore', DeprecationWarning )
+ module.reclassify_modules( module_ )
assert module_.__class__ is not module_class
-def test_205_reclassification_via_module_globals( ):
+def test_206_reclassification_via_module_globals( ):
''' Reclassifies via module globals dictionary. '''
module = cache_import_module( MODULE_QNAME )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
module_class = module.Module
module_ = types.ModuleType( 'fakepackage.foobarnotreal' )
module_dict = { 'mod': module_, '__package__': 'fakepackage' }
assert module_.__class__ is not module_class
- module.reclassify_modules( module_dict )
+ with warnings.catch_warnings( ):
+ warnings.simplefilter( 'ignore', DeprecationWarning )
+ module.reclassify_modules( module_dict, recursive = True )
+ assert module_.__class__ is module_class
+ module.reclassify_modules( module_dict, recursive = True ) # idempotent
+ assert module_.__class__ is module_class
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ module.foo = 1
+
+
+def test_210_finalize_module_basic( ):
+ ''' Finalizes module with default parameters. '''
+ module = cache_import_module( MODULE_QNAME )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
+ module_class = module.Module
+ module_ = types.ModuleType( 'fakepackage.foobarnotreal' )
+ module_.__package__ = 'fakepackage'
+ assert module_.__class__ is not module_class
+ module.finalize_module( module_ )
+ assert module_.__class__ is module_class
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ module_.foo = 1
+
+
+def test_211_finalize_module_recursive_with_module_targets( ):
+ ''' Finalizes module recursively when introspection has Module targets. '''
+ module = cache_import_module( MODULE_QNAME )
+ dynadoc_module = cache_import_module( f"{PACKAGE_NAME}.standard.dynadoc" )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
+ module_class = module.Module
+ module_ = types.ModuleType( 'fakepackage.foobarnotreal' )
+ module_.__package__ = 'fakepackage'
+ introspection_with_module = dynadoc_module.dynadoc_introspection_on_package
+ assert module_.__class__ is not module_class
+ module.finalize_module(
+ module_,
+ dynadoc_introspection = introspection_with_module,
+ recursive = True )
assert module_.__class__ is module_class
- module.reclassify_modules( module_dict ) # idempotence
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ module_.foo = 1
+
+
+def test_212_finalize_module_recursive_without_module_targets( ):
+ ''' Finalizes module recursively when introspection lacks targets. '''
+ module = cache_import_module( MODULE_QNAME )
+ dynadoc_module = cache_import_module( f"{PACKAGE_NAME}.standard.dynadoc" )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
+ module_class = module.Module
+ module_ = types.ModuleType( 'fakepackage.foobarnotreal' )
+ module_.__package__ = 'fakepackage'
+ introspection_without_module = (
+ dynadoc_module.dynadoc_introspection_on_class )
+ assert module_.__class__ is not module_class
+ module.finalize_module(
+ module_,
+ dynadoc_introspection = introspection_without_module,
+ recursive = True )
+ assert module_.__class__ is module_class
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ module_.foo = 1
+
+
+def test_213_finalize_module_nonrecursive_with_module_targets( ):
+ ''' Finalizes module non-recursively when introspection has targets. '''
+ module = cache_import_module( MODULE_QNAME )
+ dynadoc_module = cache_import_module( f"{PACKAGE_NAME}.standard.dynadoc" )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
+ module_class = module.Module
+ module_ = types.ModuleType( 'fakepackage.foobarnotreal' )
+ module_.__package__ = 'fakepackage'
+ introspection_with_module = dynadoc_module.dynadoc_introspection_on_package
+ assert module_.__class__ is not module_class
+ module.finalize_module(
+ module_,
+ dynadoc_introspection = introspection_with_module,
+ recursive = False )
+ assert module_.__class__ is module_class
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ module_.foo = 1
+
+
+def test_214_finalize_module_nonrecursive_without_module_targets( ):
+ ''' Finalizes module non-recursively when introspection lacks targets. '''
+ module = cache_import_module( MODULE_QNAME )
+ dynadoc_module = cache_import_module( f"{PACKAGE_NAME}.standard.dynadoc" )
+ exceptions_module = cache_import_module( f"{PACKAGE_NAME}.exceptions" )
+ module_class = module.Module
+ module_ = types.ModuleType( 'fakepackage.foobarnotreal' )
+ module_.__package__ = 'fakepackage'
+ introspection_without_module = (
+ dynadoc_module.dynadoc_introspection_on_class )
+ assert module_.__class__ is not module_class
+ module.finalize_module(
+ module_,
+ dynadoc_introspection = introspection_without_module,
+ recursive = False )
assert module_.__class__ is module_class
+ with pytest.raises( exceptions_module.AttributeImmutability ):
+ module_.foo = 1